prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
( | Timestamp('2013-07-01 00:00:00') | pandas.Timestamp |
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Convolutional Neural Networks (CNNs) for NLP
# MAGIC
# MAGIC ##  In this lesson, you learn:
# MAGIC - How to apply 1D convolutions to classify text sentiment
# COMMAND ----------
# MAGIC %run ../Includes/Classroom-Setup
# COMMAND ----------
# MAGIC %md
# MAGIC ### Prepare Data
# MAGIC
# MAGIC The following few cells follow the same text preprocessing steps as the previous notebook when we built bi-directional LSTMs. CNN for NLP pipeline is only different in the model building part!
# COMMAND ----------
### We are reusing the same configurations used in bi-directional LSTMs
vocab_size = 10000
max_length = 400
# COMMAND ----------
from pyspark.sql.functions import col, when
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
text_df = (spark.read.parquet(f"{datasets_dir}/nlp/reviews/reviews_cleaned.parquet")
.select("Text", "Score")
.limit(5000) ### limit to only 5000 rows to reduce training time
.withColumn("sentiment", when(col("Score") > 3, 1).otherwise(0))
)
### Splitting data into train/test
train_df, test_df = text_df.randomSplit([0.8, 0.2])
train_pdf = train_df.toPandas()
X_train = train_pdf["Text"].values
y_train = train_pdf["sentiment"].values
### Tokenization
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(X_train)
### Convert the texts to sequences
X_train_seq = tokenizer.texts_to_sequences(X_train)
X_train_seq_padded = pad_sequences(X_train_seq, maxlen=max_length, padding="post")
### Follow the same process for test_df
test_pdf = test_df.toPandas()
X_test = test_pdf["Text"].values
y_test = test_pdf["sentiment"].values
X_test_seq = tokenizer.texts_to_sequences(X_test)
X_test_seq_padded = pad_sequences(X_test_seq, maxlen=max_length, padding="post")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Let's build a Convolutional Neural Network (CNN) for sentiment analysis!
# MAGIC
# MAGIC Notice that we keep the hyperparameter values the same as the previous LSTM notebook. But we also have two new hyperparameters here: **`filters`** and **`kernel_size`** unique to CNNs.
# COMMAND ----------
batch_size = 32
embedding_dim = 300
hidden_dim = 250
epochs = 1
### Only for CNN
filters = 250
kernel_size = 3
# COMMAND ----------
# MAGIC %md
# MAGIC Now, let's define our architecture. There is a new component we haven't learned, which is **dropout**.
# MAGIC
# MAGIC **Dropout** is a regularization method that reduces overfitting by randomly and temporarily removing nodes during training.
# MAGIC
# MAGIC It works like this: <br>
# MAGIC
# MAGIC * Apply to most type of layers (e.g. fully connected, convolutional, recurrent) and larger networks
# MAGIC * Temporarily and randomly remove nodes and their connections during each training cycle
# MAGIC
# MAGIC 
# MAGIC
# MAGIC <img src="https://files.training.databricks.com/images/icon_note_24.png"/> See the original paper here: <a href="http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf" target="_blank">Dropout: A Simple Way to Prevent Neural Networks from Overfitting</a>
# COMMAND ----------
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, GlobalMaxPool1D, Dense, Dropout, Embedding
model = Sequential([
Embedding(vocab_size, embedding_dim, input_length=max_length),
Conv1D(filters, kernel_size, strides=1, padding="valid", activation="relu", input_shape=(max_length, embedding_dim)),
GlobalMaxPool1D(),
Dense(hidden_dim, activation="relu"),
Dropout(0.1),
Dense(1, activation="sigmoid")
])
# COMMAND ----------
# MAGIC %md
# MAGIC What is Global Max Pooling?
# MAGIC
# MAGIC - We set the pool size to be equal to the input size, so the max of the entire input is the global max pooling output value.
# MAGIC - It further reduces the dimensionality.
# MAGIC - <a href="https://www.machinecurve.com/index.php/2020/01/30/what-are-max-pooling-average-pooling-global-max-pooling-and-global-average-pooling/" target="_blank">Click here to read more.</a>
# MAGIC - Or <a href="https://github.com/keras-team/keras/blob/3d176e926f848c5aacd036d6095ab015a2f8cc83/keras/layers/pooling.py#L433" target="_blank">click here to look at the Keras source code</a>
# MAGIC - Example papers that use global max pooling:
# MAGIC - <a href="https://arxiv.org/pdf/1604.00187.pdf" target="_blank">A Deep CNN for Word Spotting in Handwritten Documents, 2017</a>
# MAGIC - <a href="https://hal.inria.fr/hal-01015140/file/Oquab15.pdf" target="_blank">Is object localization for free? 2015</a>
# MAGIC
# MAGIC <img src="https://www.machinecurve.com/wp-content/uploads/2020/01/Global-Max-Pooling-1.png">
# COMMAND ----------
model.summary()
# COMMAND ----------
from tensorflow.keras.optimizers import Adam
model.compile(optimizer=Adam(learning_rate=0.001), loss="binary_crossentropy", metrics=["accuracy", "AUC"])
# COMMAND ----------
# MAGIC %md
# MAGIC ### Train CNN and log using MLflow
# COMMAND ----------
import mlflow
mlflow.autolog()
with mlflow.start_run() as run:
history = model.fit(X_train_seq_padded,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.1,
verbose=1)
history
# COMMAND ----------
# MAGIC %md
# MAGIC ### Evaluate the model
# COMMAND ----------
test_loss, test_accuracy, test_auc = model.evaluate(X_test_seq_padded, y_test)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Apply distributed inference
# MAGIC
# MAGIC The code below is also the same as in the previous LSTM notebook.
# COMMAND ----------
import pandas as pd
logged_model = f"runs:/{run.info.run_id}/model"
### Load model as a Spark UDF
predict = mlflow.pyfunc.spark_udf(spark, model_uri=logged_model)
df = spark.createDataFrame(pd.concat([ | pd.DataFrame(data=y_test, columns=["label"]) | pandas.DataFrame |
import factal.schema as schema
from arcgis.features import GeoAccessor
from arcgis.gis import GIS
from datetime import datetime, timedelta
import pandas as pd
import requests
import time
class Extractor(object):
def __init__(self, token):
self.token = token
self.urls = self.get_urls()
self.gis = None
@staticmethod
def get_urls():
""" Return Dictionary of Target API Endpoints """
return {
'item': 'https://www.factal.com/api/v2/item',
'topic': 'https://www.factal.com/api/v2/topic'
}
def add(self, lyr, sdf, id_field):
incoming_ids = sdf[id_field].tolist()
existing_ids = [f.attributes[id_field] for f in lyr.query().features]
new_item_ids = list(set(incoming_ids).difference(set(existing_ids)))
add_features = sdf[sdf[id_field].isin(new_item_ids)]
if len(add_features) > 0:
res = lyr.edit_features(adds=add_features.spatial.to_featureset())['addResults']
return len([i for i in res if i['success']])
else:
return 0
def build_incident_hfl(self):
incidents, arcs = self.parse_items(self.fetch_items())
incident_df = self.get_df(incidents)
incident_df.spatial.to_featurelayer(f'Factal_{round(time.time())}', gis=self.gis, tags='Factal')
def connect(self, agol_url, username, password):
self.gis = GIS(agol_url, username, password)
def convert_item_to_df(self, item_data):
""" Return Data Frame from a List of Dictionaries Representing Item/Topic Locations """
df = pd.DataFrame(item_data)
df = df.spatial.from_xy(df, 'longitude', 'latitude')
# Convert Time Series to UTC
for field in schema.item_times:
df[field] = pd.to_datetime(df[field], utc=True)
return df
def convert_topic_to_df(self, topic_data):
""" Return Data Frame from a List of Dictionaries Representing Topics """
df = | pd.DataFrame(topic_data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 5 14:19:54 2018
@author: canf
"""
import pandas as pd
from sklearn import ensemble
from sklearn.model_selection import cross_validate
from sklearn import metrics
from sklearn.naive_bayes import MultinomialNB,BernoulliNB,GaussianNB
import gzip
import xgboost as xgb
def loadData(df, scaler=None):
data = pd.DataFrame(index=range(len(df)))
data = df.get(['X','Y'])
DayOfWeeks = df.DayOfWeek.unique()
DayOfWeekMap = {}
i = 0
for day in DayOfWeeks:
DayOfWeekMap[day] = i
i += 1
data = data.join(df['DayOfWeek'].map(DayOfWeekMap))
PdDistricts = df.PdDistrict.unique()
PdDistrictMap = {}
i = 0
for s in PdDistricts:
PdDistrictMap[s] = i
i += 1
data = data.join(df['PdDistrict'].map(PdDistrictMap))
date_time = pd.to_datetime(df.Dates)
year = date_time.dt.year
data['Year'] = year
month = date_time.dt.month
data['Month'] = month
day = date_time.dt.day
data['Day'] = day
hour = date_time.dt.hour
data['hour'] = hour
minute = date_time.dt.minute
time = hour*60+minute
data['Time'] = time
data['StreetCorner'] = df['Address'].str.contains('/').map(int)
data['Block'] = df['Address'].str.contains('Block').map(int)
X = data.values
Y = None
if 'Category' in df.columns:
Y = df.Category.values
return X, Y, scaler
def RFpredict(X,Y,Xhat):
clf = ensemble.RandomForestClassifier()
clf.set_params(min_samples_split=1000)
clf.fit(X,Y)
Yhat = clf.predict_proba(Xhat)
return Yhat,clf
def NBpredict_Gauss(X,Y,Xhat):
clf = GaussianNB()
clf.fit(X,Y)
Yhat = clf.predict_proba(Xhat)
return Yhat,clf
def NBpredict_Bernoulli(X,Y,Xhat):
clf = BernoulliNB()
clf.fit(X,Y)
Yhat = clf.predict_proba(Xhat)
return Yhat,clf
train = | pd.read_csv("./input/train.csv") | pandas.read_csv |
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[Term('labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = ['ItemA'], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
# non-existant partial selection
result = self.store.select('p4d',[Term('labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = [], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
wp = tm.makePanel()
wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
self.store.append('s1', wp, min_itemsize = 20)
self.store.append('s1', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s1'], expected)
# test dict format
self.store.append('s2', wp, min_itemsize = { 'minor_axis' : 20 })
self.store.append('s2', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s2'], expected)
# apply the wrong field (similar to #1)
self.store.append('s3', wp, min_itemsize = { 'major_axis' : 20 })
self.assertRaises(Exception, self.store.append, 's3')
# test truncation of bigger strings
self.store.append('s4', wp)
self.assertRaises(Exception, self.store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123,'asdqwerty'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big',df, min_itemsize = { 'values' : 1024 })
tm.assert_frame_equal(self.store.select('df_big'), df)
# appending smaller string ok
df2 = DataFrame([[124,'asdqy'], [346,'dggnhefbdfb']])
self.store.append('df_big',df2)
expected = concat([ df, df2 ])
tm.assert_frame_equal(self.store.select('df_big'), expected)
# avoid truncation on elements
df = DataFrame([[123,'as<PASSWORD>'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big2',df, min_itemsize = { 'values' : 10 })
tm.assert_frame_equal(self.store.select('df_big2'), df)
# bigger string on next append
self.store.append('df_new',df, min_itemsize = { 'values' : 16 })
df_new = DataFrame([[124,'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(Exception, self.store.append, 'df_new',df_new)
def test_create_table_index(self):
wp = tm.makePanel()
self.store.append('p5', wp)
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.is_indexed == True)
assert(self.store.handle.root.p5.table.cols.minor_axis.is_indexed == False)
# default optlevels
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
# let's change the indexing scheme
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', optlevel=9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', kind='full')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'full')
self.store.create_table_index('p5', optlevel=1, kind='light')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 1)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'light')
df = tm.makeTimeDataFrame()
self.store.append('f', df[:10])
self.store.append('f', df[10:])
self.store.create_table_index('f')
# try to index a non-table
self.store.put('f2', df)
self.assertRaises(Exception, self.store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, self.store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2','2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, self.store.create_table_index, 'f')
for v in ['2.3.1','2.3.1b','2.4dev','2.4',original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table(self):
raise nose.SkipTest('no big table')
# create and write a big table
wp = Panel(np.random.randn(20, 1000, 1000), items= [ 'Item%s' % i for i in xrange(20) ],
major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%s' % i for i in xrange(1000) ])
wp.ix[:,100:200,300:400] = np.nan
try:
store = HDFStore(self.scratchpath)
store._debug_memory = True
store.append('wp',wp)
recons = store.select('wp')
finally:
store.close()
os.remove(self.scratchpath)
def test_append_diff_item_order(self):
raise nose.SkipTest('append diff item order')
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
self.store.put('panel', wp1, table=True)
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index= | date_range('1/1/2000', periods=3) | pandas.date_range |
# -*- coding: utf-8 -*-
import os
import sys
from typing import List, NamedTuple
from datetime import datetime
from google.cloud import aiplatform, storage
from google.cloud.aiplatform import gapic as aip
from kfp.v2 import compiler, dsl
from kfp.v2.dsl import component, pipeline, Input, Output, Model, Metrics, Dataset, HTML
USERNAME = "<lowercase user name>" # @param username
BUCKET_NAME = "gs://<USED BUCKET>" # @param bucket name
REGION = "<REGION>" # @param region
PROJECT_ID = "<GCP PROJECT ID>" # @param project id
PROJECT_NUMBER = "<GCP PROJECT NUMBER>" # @param project number
PIPELINE_NAME = f"diamonds-predictor-serving-pipeline-{USERNAME}"
ARTIFACT_REGISTRY_NAME = "diamonds-predictor-repo"
SUPERWISE_CLIENT_ID = "<YOUR SUPERWISE ACCOUNT CLIENT ID>" # @param project number
SUPERWISE_SECRET = "<YOUR SUPERWISE ACCOUNT SECRET>" # @param project number
SUPERWISE_MODEL_NAME = "Regression - Diamonds Price Predictor"
aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_NAME)
""" Vertex definitions """
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
PIPELINE_ROOT = "{}/{}_pipeline_root/workshop".format(BUCKET_NAME, USERNAME)
# Load the data Component
@component(packages_to_install=["pandas"])
def load_data(dataset: Output[Dataset]):
import pandas as pd
df = pd.read_csv("https://www.openml.org/data/get_csv/21792853/dataset")
df = df[df["price"] < 10000]
print("Load Data: ", df.head())
df.to_csv(dataset.path, index=False)
# Validate the data Component
@component(packages_to_install=["pandas"])
def validate_data(df: Input[Dataset], validated_df: Output[Dataset]):
import pandas as pd
df = pd.read_csv(df.path)
print("Validate_data: ", df.head())
BINARY_FEATURES = []
# List all column names for numeric features
NUMERIC_FEATURES = ["carat", "depth", "table", "x", "y", "z"]
# List all column names for categorical features
CATEGORICAL_FEATURES = ["cut", "color", "clarity"]
# ID column - needed to support predict() over numpy arrays
ID = ["record_id"]
TARGET = "price"
ALL_COLUMNS = ID + BINARY_FEATURES + NUMERIC_FEATURES + CATEGORICAL_FEATURES
# define the column name for the target
df = df.reset_index().rename(columns={"index": "record_id"})
for n in NUMERIC_FEATURES:
df[n] = pd.to_numeric(df[n], errors="coerce")
df = df.fillna(df.mean(numeric_only=True))
def data_selection(df: pd.DataFrame, selected_columns: List[str]):
selected_columns.append(TARGET)
data = df.loc[:, selected_columns]
return data
## Feature selection
df = data_selection(df, ALL_COLUMNS)
return df.to_csv(validated_df.path, index=False)
# Prepare data for training Component
@component(packages_to_install=["scikit-learn==1.0.2", "pandas"])
def prepare_data(
df: Input[Dataset],
X_train: Output[Dataset],
y_train: Output[Dataset],
X_test: Output[Dataset],
y_test: Output[Dataset],
):
import pandas as pd
from sklearn.model_selection import train_test_split
target = "price"
df = pd.read_csv(df.path)
print("Prepare data: ", df.head())
X, y = df.drop(columns=[target]), df[target]
X_train_data, X_test_data, y_train_data, y_test_data = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train_data.to_csv(X_train.path, index=False)
y_train_data.to_csv(y_train.path, index=False)
X_test_data.to_csv(X_test.path, index=False)
y_test_data.to_csv(y_test.path, index=False)
# Train model Component
@component(packages_to_install=["scikit-learn==1.0.2", "pandas", "joblib"])
def train_model(
X_train: Input[Dataset],
y_train: Input[Dataset],
model_artifact: Output[Model],
):
import joblib
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
# List all column names for numeric features
NUMERIC_FEATURES = ["carat", "depth", "table", "x", "y", "z"]
# List all column names for categorical features
CATEGORICAL_FEATURES = ["cut", "color", "clarity"]
# ID column - needed to support predict() over numpy arrays
ID = ["record_id"]
ALL_COLUMNS = ID + NUMERIC_FEATURES + CATEGORICAL_FEATURES
X, y = | pd.read_csv(X_train.path) | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# %%
import os
import warnings
warnings.filterwarnings('ignore')
import time as t
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
from imblearn.over_sampling import SMOTE
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, recall_score, precision_score, classification_report, roc_curve, auc, roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
# %% [markdown]
# ## Data Preprocessing
# 1. Data Loading
# 2. Data Cleaning
# 3. X y split
# 4. Data Scaling
# %%
def data_load(
): #check for the availability of the dataset and change cwd if not found
df = pd.read_csv("../input/breast-cancer-prediction/data.csv")
return df
def data_clean(df):
return df
def X_y_split(df):
X = df.drop(['diagnosis'], axis=1)
y = df['diagnosis']
return X, y
def data_split_scale(X, y, sampling):
#Splitting dataset into Train and Test Set
X_tr, X_test, y_tr, y_test = train_test_split(X, y, test_size=0.3)
#Feature Scaling using Standardization
ss = StandardScaler()
X_tr = ss.fit_transform(X_tr)
X_test = ss.fit_transform(X_test)
print(
"'For 'Sampling strategies', I have 3 options. \n \t'1' stands for 'Upsampling'\n \t'2' stands for 'downsampling'. \n \t'3' stands for 'SMOTE''"
)
samp_sel = int(input("Now enter your selection for sampling strategy: \t"))
samp = [sampling.upsample, sampling.downsample, sampling.smote]
temp = samp[samp_sel - 1]
X_train, y_train = temp(X_train=pd.DataFrame(X_tr),
y_train=pd.DataFrame(y_tr))
return pd.DataFrame(X_train), | pd.DataFrame(X_test) | pandas.DataFrame |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz= | timezones.tz_standardize(dtype.tz) | pandas._libs.tslibs.timezones.tz_standardize |
import os
import unittest
import numpy as np
import pandas as pd
from cgnal.core.data.model.ml import (
LazyDataset,
IterGenerator,
MultiFeatureSample,
Sample,
PandasDataset,
PandasTimeIndexedDataset,
CachedDataset,
features_and_labels_to_dataset,
)
from typing import Iterator, Generator
from cgnal.core.tests.core import TestCase, logTest
from tests import TMP_FOLDER
samples = [
Sample(features=[100, 101], label=1),
Sample(features=[102, 103], label=2),
Sample(features=[104, 105], label=3),
Sample(features=[106, 107], label=4),
Sample(features=[108, 109], label=5),
Sample(features=[110, 111], label=6),
Sample(features=[112, 113], label=7),
Sample(features=[114, 115], label=8),
Sample(features=[116, 117], label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
lazyDat = LazyDataset(IterGenerator(samples_gen))
class features_and_labels_to_datasetTests(TestCase):
def test_features_and_labels_to_dataset(self):
dataset = features_and_labels_to_dataset(
pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
pd.Series([0, 0, 0, 1], name="Label"),
)
dataset_no_labels = features_and_labels_to_dataset(
pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
None,
)
self.assertTrue(isinstance(dataset_no_labels, CachedDataset))
self.assertTrue(isinstance(dataset, CachedDataset))
self.assertTrue(
(
dataset.getFeaturesAs("pandas")
== pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
)
)
.all()
.all()
)
self.assertTrue(
(
dataset.getLabelsAs("pandas")
== pd.DataFrame(pd.Series([0, 0, 0, 1], name="Label"))
)
.all()
.all()
)
class LazyDatasetTests(TestCase):
@logTest
def test_withLookback_MultiFeatureSample(self):
samples = [
MultiFeatureSample(
features=[np.array([100.0, 101.0]), np.array([np.NaN])], label=1.0
),
MultiFeatureSample(
features=[np.array([102.0, 103.0]), np.array([1.0])], label=2.0
),
MultiFeatureSample(
features=[np.array([104.0, 105.0]), np.array([2.0])], label=3.0
),
MultiFeatureSample(
features=[np.array([106.0, 107.0]), np.array([3.0])], label=4.0
),
MultiFeatureSample(
features=[np.array([108.0, 109.0]), np.array([4.0])], label=5.0
),
MultiFeatureSample(
features=[np.array([110.0, 111.0]), np.array([5.0])], label=6.0
),
MultiFeatureSample(
features=[np.array([112.0, 113.0]), np.array([6.0])], label=7.0
),
MultiFeatureSample(
features=[np.array([114.0, 115.0]), np.array([7.0])], label=8.0
),
MultiFeatureSample(
features=[np.array([116.0, 117.0]), np.array([8.0])], label=9.0
),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[102.0, 103.0], [104.0, 105.0], [106.0, 107.0]],
[[104.0, 105.0], [106.0, 107.0], [108.0, 109.0]],
[[106.0, 107.0], [108.0, 109.0], [110.0, 111.0]],
[[108.0, 109.0], [110.0, 111.0], [112.0, 113.0]],
]
)
y1 = np.array(
[
[[1.0], [2.0], [3.0]],
[[2.0], [3.0], [4.0]],
[[3.0], [4.0], [5.0]],
[[4.0], [5.0], [6.0]],
]
)
lab1 = np.array([4.0, 5.0, 6.0, 7.0])
X2 = np.array(
[
[[110.0, 111.0], [112.0, 113.0], [114.0, 115.0]],
[[112.0, 113.0], [114.0, 115.0], [116.0, 117.0]],
]
)
y2 = np.array([[[5.0], [6.0], [7.0]], [[6.0], [7.0], [8.0]]])
lab2 = np.array([8.0, 9.0])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
temp1X = np.array(list(map(lambda x: np.stack(x), tmp1[:, :, 0])))
temp1y = np.array(list(map(lambda x: np.stack(x), tmp1[:, :, 1])))
tmp1lab = batch1.getLabelsAs("array")
res = [
np.array_equal(temp1X, X1),
np.array_equal(temp1y, y1),
np.array_equal(tmp1lab, lab1),
]
tmp2 = batch2.getFeaturesAs("array")
temp2X = np.array(list(map(lambda x: np.stack(x), tmp2[:, :, 0])))
temp2y = np.array(list(map(lambda x: np.stack(x), tmp2[:, :, 1])))
tmp2lab = batch2.getLabelsAs("array")
res = res + [
np.array_equal(temp2X, X2),
np.array_equal(temp2y, y2),
np.array_equal(tmp2lab, lab2),
]
self.assertTrue(all(res))
@logTest
def test_withLookback_ArrayFeatureSample(self):
samples = [
Sample(features=np.array([100, 101]), label=1),
Sample(features=np.array([102, 103]), label=2),
Sample(features=np.array([104, 105]), label=3),
Sample(features=np.array([106, 107]), label=4),
Sample(features=np.array([108, 109]), label=5),
Sample(features=np.array([110, 111]), label=6),
Sample(features=np.array([112, 113]), label=7),
Sample(features=np.array([114, 115]), label=8),
Sample(features=np.array([116, 117]), label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[100, 101], [102, 103], [104, 105]],
[[102, 103], [104, 105], [106, 107]],
[[104, 105], [106, 107], [108, 109]],
[[106, 107], [108, 109], [110, 111]],
]
)
lab1 = np.array([3, 4, 5, 6])
X2 = np.array(
[
[[108, 109], [110, 111], [112, 113]],
[[110, 111], [112, 113], [114, 115]],
[[112, 113], [114, 115], [116, 117]],
]
)
lab2 = np.array([7, 8, 9])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
tmp1lab = batch1.getLabelsAs("array")
res = [np.array_equal(tmp1, X1), np.array_equal(tmp1lab, lab1)]
tmp2 = batch2.getFeaturesAs("array")
tmp2lab = batch2.getLabelsAs("array")
res = res + [np.array_equal(tmp2, X2), np.array_equal(tmp2lab, lab2)]
self.assertTrue(all(res))
@logTest
def test_withLookback_ListFeatureSample(self):
samples = [
Sample(features=[100, 101], label=1),
Sample(features=[102, 103], label=2),
Sample(features=[104, 105], label=3),
Sample(features=[106, 107], label=4),
Sample(features=[108, 109], label=5),
Sample(features=[110, 111], label=6),
Sample(features=[112, 113], label=7),
Sample(features=[114, 115], label=8),
Sample(features=[116, 117], label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[100, 101], [102, 103], [104, 105]],
[[102, 103], [104, 105], [106, 107]],
[[104, 105], [106, 107], [108, 109]],
[[106, 107], [108, 109], [110, 111]],
]
)
lab1 = np.array([3, 4, 5, 6])
X2 = np.array(
[
[[108, 109], [110, 111], [112, 113]],
[[110, 111], [112, 113], [114, 115]],
[[112, 113], [114, 115], [116, 117]],
]
)
lab2 = np.array([7, 8, 9])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
tmp1lab = batch1.getLabelsAs("array")
res = [np.array_equal(tmp1, X1), np.array_equal(tmp1lab, lab1)]
tmp2 = batch2.getFeaturesAs("array")
tmp2lab = batch2.getLabelsAs("array")
res = res + [np.array_equal(tmp2, X2), np.array_equal(tmp2lab, lab2)]
self.assertTrue(all(res))
@logTest
def test_features_labels(self):
self.assertTrue(isinstance(lazyDat.features(), Generator))
self.assertTrue(isinstance(lazyDat.labels(), Generator))
self.assertTrue(isinstance(lazyDat.getFeaturesAs(), Generator))
self.assertTrue(isinstance(lazyDat.getLabelsAs(), Generator))
self.assertEqual(next(lazyDat.getFeaturesAs()), samples[0].features)
self.assertEqual(next(lazyDat.getLabelsAs()), samples[0].label)
self.assertEqual(next(lazyDat.features()), samples[0].features)
self.assertEqual(next(lazyDat.labels()), samples[0].label)
class CachedDatasetTests(TestCase):
@logTest
def test_to_df(self):
self.assertTrue(isinstance(CachedDataset(lazyDat).to_df(), pd.DataFrame))
self.assertTrue(
(
CachedDataset(lazyDat).to_df()["features"][0].values
== [100, 102, 104, 106, 108, 110, 112, 114, 116]
).all()
)
self.assertTrue(
(
CachedDataset(lazyDat).to_df()["labels"][0].values
== [1, 2, 3, 4, 5, 6, 7, 8, 9]
).all()
)
@logTest
def test_asPandasDataset(self):
self.assertTrue(
isinstance(CachedDataset(lazyDat).asPandasDataset, PandasDataset)
)
self.assertTrue(
(
CachedDataset(lazyDat).asPandasDataset.features[0].values
== [100, 102, 104, 106, 108, 110, 112, 114, 116]
).all()
)
self.assertTrue(
(
CachedDataset(lazyDat).asPandasDataset.labels[0].values
== [1, 2, 3, 4, 5, 6, 7, 8, 9]
).all()
)
class PandasDatasetTests(TestCase):
dataset: PandasDataset = PandasDataset(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=pd.Series([0, 0, 0, 1], name="Label"),
)
dataset_no_label: PandasDataset = PandasDataset(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
)
)
@logTest
def test_check_none(self):
self.assertEqual(self.dataset._check_none(None), None)
self.assertEqual(self.dataset._check_none("test"), "test")
@logTest
def test__len__(self):
self.assertEqual(self.dataset.__len__(), 4)
@logTest
def test_items(self):
self.assertTrue(isinstance(self.dataset.items, Iterator))
self.assertEqual(next(self.dataset.items).features, {"feat1": 1.0, "feat2": 1})
self.assertEqual(next(self.dataset.items).label["Label"], 0)
self.assertEqual(
next(self.dataset_no_label.items).features, {"feat1": 1.0, "feat2": 1}
)
self.assertEqual(next(self.dataset_no_label.items).label, None)
@logTest
def test_dropna_none_labels(self):
res = pd.concat(
[pd.Series([1, 2, 3], name="feat1"), pd.Series([1, 3, 4], name="feat2")],
axis=1,
)
self.assertTrue(
(
self.dataset.dropna(subset=["feat1"]).features.reset_index(drop=True)
== res
)
.all()
.all()
)
self.assertTrue(
(
self.dataset.dropna(feat__subset=["feat1"]).features.reset_index(
drop=True
)
== res
)
.all()
.all()
)
self.assertTrue(
(
self.dataset.dropna(labs__subset=["Label"]).features.reset_index(
drop=True
)
== res
)
.all()
.all()
)
@logTest
def test_cached(self):
self.assertTrue(self.dataset.cached)
@logTest
def test_features_labels(self):
self.assertEqual(
self.dataset.features,
pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
)
self.assertTrue((self.dataset.labels["Label"] == pd.Series([0, 0, 0, 1])).all())
@logTest
def test_index(self):
self.assertTrue((self.dataset.index == range(4)).all())
@logTest
def test_createObject(self):
self.assertTrue(
isinstance(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
),
PandasDataset,
)
)
self.assertEqual(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
).features,
self.dataset_no_label.features,
)
self.assertEqual(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
).labels,
self.dataset_no_label.labels,
)
@logTest
def test_take(self):
self.assertTrue(isinstance(self.dataset.takeAsPandas(1), PandasDataset))
self.assertEqual(
self.dataset.takeAsPandas(1).features.feat2, pd.Series([1], name="feat2")
)
self.assertEqual(
self.dataset.takeAsPandas(1).labels["Label"], pd.Series([0], name="Label")
)
@logTest
def test_loc(self):
self.assertEqual(self.dataset.loc(2).features[2]["feat1"], 2)
self.assertEqual(self.dataset.loc(2).features[2]["feat2"], 3)
self.assertEqual(self.dataset.loc(2).labels[2]["Label"], 0)
self.assertTrue(self.dataset_no_label.loc(2).labels is None)
@logTest
def test_from_sequence(self):
features_1 = pd.DataFrame(
{"feat1": [1, 2, 3, 4], "feat2": [100, 200, 300, 400]}, index=[1, 2, 3, 4]
)
features_2 = pd.DataFrame(
{"feat1": [9, 11, 13, 14], "feat2": [90, 110, 130, 140]},
index=[10, 11, 12, 13],
)
features_3 = pd.DataFrame(
{"feat1": [90, 10, 10, 1400], "feat2": [0.9, 0.11, 0.13, 0.14]},
index=[15, 16, 17, 18],
)
labels_1 = pd.DataFrame({"target": [1, 0, 1, 1]}, index=[1, 2, 3, 4])
labels_2 = pd.DataFrame({"target": [1, 1, 1, 0]}, index=[10, 11, 12, 13])
labels_3 = pd.DataFrame({"target": [0, 1, 1, 0]}, index=[15, 16, 17, 18])
dataset_1 = PandasDataset(features_1, labels_1)
dataset_2 = PandasDataset(features_2, labels_2)
dataset_3 = PandasDataset(features_3, labels_3)
dataset_merged = PandasDataset.from_sequence([dataset_1, dataset_2, dataset_3])
self.assertEqual(
pd.concat([features_1, features_2, features_3]), dataset_merged.features
)
self.assertEqual(
pd.concat([labels_1, labels_2, labels_3]), dataset_merged.labels
)
@logTest
def test_serialization(self):
filename = os.path.join(TMP_FOLDER, "my_dataset.p")
self.dataset.write(filename)
newDataset: PandasDataset = PandasDataset.load(filename)
self.assertTrue(isinstance(newDataset, PandasDataset))
self.assertTrue(
(self.dataset.features.fillna("NaN") == newDataset.features.fillna("NaN"))
.all()
.all()
)
@logTest
def test_creation_from_samples(self):
samples = [
Sample(features=[100, 101], label=1, name=1),
Sample(features=[102, 103], label=2, name=2),
Sample(features=[104, 105], label=1, name=3),
Sample(features=[106, 107], label=2, name=4),
Sample(features=[108, 109], label=2, name=5),
Sample(features=[110, 111], label=2, name=6),
Sample(features=[112, 113], label=1, name=7),
Sample(features=[114, 115], label=2, name=8),
Sample(features=[116, 117], label=2, name=9),
]
lazyDataset = CachedDataset(samples).filter(lambda x: x.label <= 5)
assert isinstance(lazyDataset, LazyDataset)
for format in ["pandas", "array", "dict"]:
features1 = lazyDataset.getFeaturesAs(format)
labels1 = lazyDataset.getLabelsAs(format)
cached: CachedDataset = lazyDataset.asCached
features2 = cached.getFeaturesAs(format)
labels2 = cached.getLabelsAs(format)
self.assertEqual(features1, features2)
self.assertEqual(labels1, labels2)
pandasDataset = cached.asPandasDataset
features3 = pandasDataset.getFeaturesAs(format)
labels3 = pandasDataset.getLabelsAs(format)
self.assertEqual(features1, features3)
self.assertEqual(labels1, labels3)
@logTest
def test_union(self):
union = self.dataset.union(
PandasDataset(
features=pd.concat(
[
pd.Series([np.nan, 5, 6, 7], name="feat1"),
pd.Series([7, 8, 9, 10], name="feat2"),
],
axis=1,
),
labels= | pd.Series([0, 0, 0, 1], name="Label") | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test functions for tools.tools
"""
import warnings
from six.moves import range
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal,
assert_string_equal)
import pandas as pd
import pandas.util.testing as tm
import pytest
from sm2.datasets import longley
from sm2.tools import tools
class TestAddConstant(object):
def test_add_constant_list(self):
x = list(range(1, 5))
x = tools.add_constant(x)
y = np.asarray([[1, 1, 1, 1], [1, 2, 3, 4.]]).T
assert_equal(x, y)
def test_add_constant_1darray(self):
x = np.arange(1, 5)
x = tools.add_constant(x)
y = np.asarray([[1, 1, 1, 1], [1, 2, 3, 4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1darray(self):
x = np.ones(5)
x = tools.add_constant(x, has_constant='skip')
assert_equal(x, np.ones((5, 1)))
with pytest.raises(ValueError):
tools.add_constant(x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.ones((5, 2)))
def test_add_constant_has_constant2darray(self):
x = np.asarray([[1, 1, 1, 1], [1, 2, 3, 4.]]).T
y = tools.add_constant(x, has_constant='skip')
assert_equal(x, y)
with pytest.raises(ValueError):
tools.add_constant(x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.column_stack((np.ones(4), x)))
def test_add_constant_recarray(self):
dt = np.dtype([('', int), ('', '<S4'),
('', np.float32), ('', np.float64)])
x = np.array([(1, 'abcd', 1.0, 2.0),
(7, 'abcd', 2.0, 4.0),
(21, 'abcd', 2.0, 8.0)], dt)
x = x.view(np.recarray)
y = tools.add_constant(x)
assert_equal(y['const'], np.array([1.0, 1.0, 1.0]))
for f in x.dtype.fields:
assert y[f].dtype == x[f].dtype
def test_add_constant_series(self):
s = pd.Series([1.0, 2.0, 3.0])
output = tools.add_constant(s)
expected = pd.Series([1.0, 1.0, 1.0], name='const')
tm.assert_series_equal(expected, output['const'])
def test_add_constant_dataframe(self):
df = pd.DataFrame([[1.0, 'a', 4], [2.0, 'bc', 9], [3.0, 'def', 16]])
output = tools.add_constant(df)
expected = pd.Series([1.0, 1.0, 1.0], name='const')
tm.assert_series_equal(expected, output['const'])
dfc = df.copy()
dfc.insert(0, 'const', np.ones(3))
tm.assert_frame_equal(dfc, output)
def test_add_constant_zeros_array(self):
a = np.zeros(100)
output = tools.add_constant(a)
assert_equal(output[:, 0], np.ones(100))
def test_add_constant_zeros_series(self):
s = pd.Series([0.0, 0.0, 0.0])
output = tools.add_constant(s)
expected = pd.Series([1.0, 1.0, 1.0], name='const')
| tm.assert_series_equal(expected, output['const']) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 13:24:18 2020
@author: earne
"""
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sipperplots import (
get_any_idi,
get_side_idi,
get_content_idi,
get_chronogram_vals,
preproc_averaging
)
def format_avg_output(output, averaging):
if averaging == 'datetime':
output.index.name = 'Date'
elif averaging == 'time':
first = output.index[0]
output.index = [i - first for i in output.index]
output.index = (output.index.total_seconds()/3600).astype(int)
output.index.name = 'Hours Since {}:00'.format(str(first.hour))
elif averaging == 'elapsed':
output.index = output.index.astype(int)
output.index.name = 'Elapsed Hours'
return output
def drinkcount_cumulative(sipper, show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
l = pd.DataFrame({'LeftCount' : df['LeftCount']}, index=df.index)
output = output.join(l, how='outer')
if show_right:
r = pd.DataFrame({'RightCount' : df['RightCount']}, index=df.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
if not count.empty:
temp = pd.DataFrame({c +'Count' : count}, index=count.index)
output = output.join(temp, how='outer')
return output
def drinkcount_binned(sipper, binsize='1H', show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
if show_left:
binned = df['LeftCount'].diff().resample(binsize, base=base).sum()
l = pd.DataFrame({'LeftCount' : binned}, index=binned.index)
output = output.join(l, how='outer')
if show_right:
binned = df['RightCount'].diff().resample(binsize, base=base).sum()
r = pd.DataFrame({'RightCount' : binned}, index=binned.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
binned = count.diff().resample(binsize, base=base).sum()
if not count.empty:
temp = pd.DataFrame({c+'Count' : binned}, index=binned.index)
output = output.join(temp, how='outer')
return output
def drinkduration_cumulative(sipper, show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
l = pd.DataFrame({'LeftDuration' : df['LeftDuration']}, index=df.index)
output = output.join(l, how='outer')
if show_right:
r = pd.DataFrame({'RightDuration' : df['RightDuration']}, index=df.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
if not count.empty:
temp = pd.DataFrame({c+'Duration' : count}, index=count.index)
output = output.join(temp, how='outer')
return output
def drinkduration_binned(sipper, binsize='1H', show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
if show_left:
binned = df['LeftDuration'].diff().resample(binsize, base=base).sum()
l = pd.DataFrame({'LeftDuration' : binned}, index=binned.index)
output = output.join(l, how='outer')
if show_right:
binned = df['RightDuration'].diff().resample(binsize, base=base).sum()
r = pd.DataFrame({'RightDuration' : binned}, index=binned.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
binned = count.diff().resample(binsize, base=base).sum()
if not count.empty:
temp = pd.DataFrame({c+'Duration' : binned}, index=binned.index)
output = output.join(temp, how='outer')
return output
def interdrink_intervals(sippers, kde=True, logx=True,
combine=False, **kwargs):
if combine:
output = idi_onecurve(sippers, kde, logx, **kwargs)
else:
output = idi_multicurve(sippers, kde, logx, **kwargs)
return output
def idi_onecurve(sippers, kde, logx, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
combined = []
for sipper in sippers:
fig = plt.figure()
plt.clf()
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_any_idi(sipper)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
kde_df = kde_df.reindex(x)
kde_df['Values'] = y
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
bar_df = bar_df.reindex(bar_x)
bar_df['Values'] = bar_h
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
plt.close()
return bar_df, kde_df
def idi_multicurve(sippers, kde, logx, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for sipper in sippers:
fig = plt.figure()
plt.clf()
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_any_idi(sipper)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
plot = sns.distplot(y, bins=bins, norm_hist=False, kde=kde)
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = pd.DataFrame({sipper.filename : bar_h}, index=bar_x)
bar_df = bar_df.join(btemp, how='outer')
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({sipper.filename : y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
plt.close()
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
return bar_df, kde_df
def interdrink_intervals_byside(sippers, kde=True, logx=True, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for side in ['Left', 'Right']:
combined = []
fig = plt.figure()
plt.clf()
for sipper in sippers:
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_side_idi(sipper, side)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({side:y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = pd.DataFrame({side:bar_h}, index=bar_x)
bar_df = bar_df.join(btemp, how='outer')
plt.close()
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
return bar_df, kde_df
def interdrink_intervals_bycontent(sippers, idi_content, kde=True, logx=True,
**kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for c in idi_content:
combined = []
fig = plt.figure()
plt.clf()
for sipper in sippers:
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_content_idi(sipper, c, df=df)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({c:y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = | pd.DataFrame({c:bar_h}, index=bar_x) | pandas.DataFrame |
import pandas as pd
import gensim
import csv
import random
def kw_bigram_score(concept, segment):
"""
Rank the segment using the key word search algorithm
:param segment (list): a list of the tokens in the segment
:param concept (str): the concept
:return: a numeric score of the number of occurences of the concept word normalized by document length
"""
score = 0
prev = None
for word in segment:
if prev is not None:
if prev.lower() == concept[0].lower() and word.lower() == concept[1].lower():
score += 1
prev = word
return score/len(segment)
def load_segments(path):
data = {'segment': []}
index = []
with open(path) as csv_file:
r = csv.reader(csv_file)
for row in r:
data['segment'].append(row[1:])
index.append(row[0])
return pd.DataFrame(data=data, index=index)
def rank_using_kw(segments, concept):
distances = []
for indx, row in segments.iterrows():
if indx.startswith('def'):
continue
acs_score = kw_bigram_score(concept, row['segment'])
distances.append({'id': indx, 'kw_score': acs_score})
srt_distances = sorted(distances, key=lambda x: x['kw_score'], reverse=True)
print(srt_distances[:10])
rankings = [{'id': seg['id'], 'rank': i} for i, seg in enumerate(srt_distances)]
print(rankings[:10])
data = {'rank':[]}
index = []
for ranking in rankings:
data['rank'].append(ranking['rank'])
index.append(f'{ranking["id"]}-{concept[0].capitalize()} {concept[1].capitalize()}')
return | pd.DataFrame(data=data, index=index) | pandas.DataFrame |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal( | pd.Series([5, 1, 2], index=idx, name='impliedVolatility') | pandas.Series |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import os
import yaml
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
# from .utils import Boba_Utils as u
# from ._03_Modeling import Boba_Modeling as m
class Boba_Sys_Diagnostics():
def __init__(self):
pass
def run_sys_scoring(self, model, target,prod):
if prod == True:
pass
elif (self.position_group == 'hitters' and target in ['BABIP','BB%','K%']):
pass
elif (self.position_group == 'SP' and target in ['OBP','SLG','ShO_per_GS','CG_per_GS']):
pass
elif (self.position_group == 'RP' and target in ['OBP','SLG','HLD_per_G']):
pass
else:
master_df = pd.read_csv('data/processed/'+self.position_group+'/master_df.csv',index_col=0)
path = 'data/scoring/evaluation_'+self.position_group+'_'+str(self.year-1)+'.csv'
if os.path.exists(path):
print('does exist')
evaluation_df = | pd.read_csv(path,index_col=0) | pandas.read_csv |
from ipywidgets import Button, Text, VBox, HBox, Layout, Dropdown, Checkbox, \
DatePicker, Select, SelectMultiple, Tab, BoundedFloatText, Label, Output, interactive
from IPython.display import display
import traitlets
import itertools
import pandas as pd
import copy
from tkinter import Tk, filedialog
from datetime import datetime, date
import xlsxwriter
import data_inputs, scenario_handling, observed_handling
#-------------------------------------------------------------------------------------------------#
#-----------------------------------Tab 1 - Observed flow widgets---------------------------------#
#-------------------------------------------------------------------------------------------------#
# Input tab widgets--------------------------------------------------------------------------------
# Label widgets:
defaults_label = {'style': {'description_width':'initial'}}
gauge_inputs_title = Label(value="Configure inputs for observed EWRs", **defaults_label)
date_selection = Label(value="1. Select date range of interest", **defaults_label)
cs_selection = Label(value="2. Select the catchment (1) and sites (1+)", **defaults_label)
allowance_header_o = Label(value="3. Enter an allowance", **defaults_label)
run_header_o = Label(value="4. Enter output file name and run", **defaults_label)
# Date selection widgets:
start_date = DatePicker(description='Start date:', value = date(2014,1,1), disabled=False)
end_date = DatePicker(description='End date', value = date(2020,1,1), disabled=False)
# Catchment and site selection widgets:
defaults_sites = {'layout': Layout(width="100%"), 'style': {'description_width':'initial'}}
catchments_gauges = data_inputs.catchments_gauges_dict()
def change_catchment(*args):
'''Updates the sites displayed based on the user selection of the catchment'''
catchments_gauges = data_inputs.catchments_gauges_dict()
sites.options=list(catchments_gauges[catchment.value].values())
catchment = Select(options=list(catchments_gauges.keys()),
description='Catchment: (select one)',
rows=len(catchments_gauges.keys()), **defaults_sites)
sites = SelectMultiple(options=catchments_gauges[catchment.value].values(),
description='Sites: (shift to select multiple)',
rows=len(catchments_gauges[catchment.value].values()),
**defaults_sites)
catchment.observe(change_catchment, 'value')
# Apply allowance to EWR indicators:
defaults = {'value': 0, 'min': 0, 'max': 20, 'step': 0.1, 'disabled': False,
'style': {'description_width':'initial'}}
min_threshold_allowance_o = BoundedFloatText(description='Allowance applied to min threshold (%)', **defaults)
max_threshold_allowance_o = BoundedFloatText(description='Allowance applied to max threshold (%)', **defaults)
duration_allowance_o = BoundedFloatText(description='Allowance applied to min duration (%)', **defaults)
drawdown_allowance_o = BoundedFloatText(description='Allowance applied to max drawdown rate (%)', **defaults)
# Output file name:
file_name_o = Text(value='Observed flow EWRs', placeholder='Enter file name',
description='Output file name:', disabled=False,
style= {'description_width':'initial'})
# Results tab--------------------------------------------------------------------------------------
def get_gauges_to_pull():
'''Convert sites selected to gauge numbers'''
catchments_gauges = data_inputs.catchments_gauges_dict()
gauges = []
for gauge, name in catchments_gauges[catchment.value].items():
if name in sites.value:
gauges.append(gauge)
return gauges
def on_gauge_button_clicked(b):
'''Run the realtime program'''
with gauge_run_output:
b.style.button_color='lightgreen'
tab_gauge.selected_index = 1
global raw_data_o, results_summary_o
raw_data_o, results_summary_o = None, None
# Get gauge list:
gauges = get_gauges_to_pull()
# Retrieve and convert dates:
# dates = {'start_date': str(start_date.value).replace('-',''),
# 'end_date': str(end_date.value).replace('-','')}
dates = {'start_date': start_date.value,
'end_date': end_date.value}
# Get allowances:
MINT = (100 - min_threshold_allowance_s.value)/100
MAXT = (100 + max_threshold_allowance_s.value)/100
DUR = (100 - duration_allowance_s.value)/100
DRAW = (100 - drawdown_allowance_s.value)/100
allow ={'minThreshold': MINT, 'maxThreshold': MAXT, 'duration': DUR, 'drawdown': DRAW}
# hardcode in the climate cats for the observed flows:
raw_data_o, results_summary_o = observed_handling.observed_handler(gauges, dates, allow,
'Standard - 1911 to 2018 climate categorisation')
display(results_summary_o.style)
def gauge_output_button_clicked(b):
'''Output the realtime analysis to excel'''
with gauge_output:
b.style.button_color='lightgreen'
global results_summary_o
global raw_data_o
realtime_fileName = file_name_o.value # Gettng the user file name
writer = pd.ExcelWriter('Output_files/' + realtime_fileName + '.xlsx', engine='xlsxwriter')
results_summary_o.to_excel(writer, sheet_name='Gauge_data_summary')
PU_items = data_inputs.get_planning_unit_info()
PU_items.to_excel(writer, sheet_name='Planning unit metadata')
get_index = results_summary_o.reset_index().set_index(['gauge', 'planning unit']).index.unique()
for locPU in get_index:
temp_df = pd.DataFrame()
if temp_df.empty == True:
temp_df = raw_data_o['observed'][locPU[0]][locPU[1]].copy(deep=True)
temp_df.columns = pd.MultiIndex.from_product([[str('observed')],temp_df.columns])
else:
df_to_add = pd.DataFrame()
df_to_add = raw_data_o['observed'][locPU[0]][locPU[1]].copy(deep=True)
df_to_add.columns = \
pd.MultiIndex.from_product([[str('observed')],df_to_add.columns])
temp_df = pd.concat([temp_df, df_to_add], axis = 1)
PU_code = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == locPU[1]].index[0]]
temp_df.to_excel(writer, sheet_name=str(PU_code))
writer.save()
#Run the analysis (from input tab):
gauge_run_button = Button(description="Run gauge program")
gauge_run_output = Output(layout={'border': '1px solid black'})
gauge_run_button.on_click(on_gauge_button_clicked)
# Output results tab-------------------------------------------------------------------------------
default_output = {'style': {'description_width':'initial'}}
gauge_output_button = Button(description='Output to Excel', **default_output)
gauge_output = Output(layout={'border': '1px solid black', **default_output})
gauge_output_button.on_click(gauge_output_button_clicked)
#-------------------------------------------------------------------------------------------------#
#-----------------------------------Tab 2 - Scenario testing widgets------------------------------#
#-------------------------------------------------------------------------------------------------#
# Input tab----------------------------------------------------------------------------------------
#Labels:
defaults_label = {'style': {'description_width':'initial'}}
model_inputs_title = Label(value="Configure inputs for scenario testing", **defaults_label)
model_selection = Label(value="1. Upload model files", **defaults_label)
model_format_header = Label(value="2. Select format of timeseries data", **defaults_label)
climate_format_header = Label(value="3. Select climate sequence to load", **defaults_label)
allowance_header_s = Label(value="4. Enter an allowance", **defaults_label)
run_header_s = Label(value="5. Enter output file name and run", **defaults_label)
fileLocationHeader = Label(value="Provide file path and names:", **defaults_label)
# Local file upload:
class SelectFilesButton(Button):
"""A file widget that leverages tkinter.filedialog."""
def __init__(self):
super(SelectFilesButton, self).__init__()
# Add the selected_files trait
self.add_traits(files=traitlets.traitlets.List())
# Create the button.
self.description = "Select Files"
self.icon = "square-o"
self.style.button_color = "orange"
# Set on click behavior.
self.on_click(self.select_files)
@staticmethod
def select_files(b):
global file_names
global select_benchmark
"""Generate instance of tkinter.filedialog.
Parameters
----------
b : obj:
An instance of ipywidgets.widgets.Button
"""
try:
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows.
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected fileswill be set to b.value
b.files = filedialog.askopenfilename(multiple=True)
b.description = "Files Selected"
b.icon = "check-square-o"
b.style.button_color = "lightgreen"
except:
print('no file selected')
pass
load_model_files = SelectFilesButton()
# Remote file upload:
box_defaults = {'value':'', 'placeholder':'Enter/path/to/file', 'disabled': False,
'style': {'description_width':'initial'}}
path_box_1 = Text(description='Scenario file one name:', **box_defaults)
path_box_2 = Text(description='Scenario file two name:', **box_defaults)
path_box_3 = Text(description='Scenario file three name:', **box_defaults)
path_box_4 = Text(description='Scenario file four name:', **box_defaults)
path_box_5 = Text(description='Scenario file five name:', **box_defaults)
#Allowance widgets:
default = {'value': 0, 'min': 0, 'max': 20, 'step': 0.1, 'disabled': False,
'style': {'description_width':'initial'}}
min_threshold_allowance_s = BoundedFloatText(description='Allowance applied to min threshold (%)', **defaults)
max_threshold_allowance_s = BoundedFloatText(description='Allowance applied to max threshold (%)', **defaults)
duration_allowance_s = BoundedFloatText(description='Allowance applied to min duration (%)', **defaults)
drawdown_allowance_s = BoundedFloatText(description='Allowance applied to max drawdown rate (%)', **defaults)
# Model format widget:
model_format_type = Dropdown(
options=[('Bigmod - MDBA'), ('IQQM - NSW 10,000 years'), ('Source - NSW (res.csv)')],
value='Bigmod - MDBA',
description='',
style= {'description_width':'initial'})
# Climate file widget:
climate_file = Dropdown(
options=[('Standard - 1911 to 2018 climate categorisation'), ('NSW 10,000 year climate sequence')],
value='Standard - 1911 to 2018 climate categorisation',
description='',
style= {'description_width':'initial'})
# Output file name:
file_name_s = Text(
value='Scenario_test',
placeholder='Enter file name',
description='Output file name:',
disabled=False,
style= {'description_width':'initial'})
# Results display tab------------------------------------------------------------------------------
def catchment_checker(catchment):
'''Pass catchment name, returns list of gauges in this catchment'''
catchments_gauges = data_inputs.catchments_gauges_dict()
return list(catchments_gauges[catchment].keys())
def view(x=''):
'''Shows either all results, or results restricted to user catchment selection'''
if x=='All':
return display(data_summary_s.style)
return display((data_summary_s.loc[data_summary_s.index.isin(catchment_checker(x), level='gauge')]).style)
def get_locations_from_scenarios(data_summary):
'''Ingest a summary of results, look at the locations analysed,
return a list of catchments included in the analysis'''
catchments_gauges = data_inputs.catchments_gauges_dict()
location_list = set(data_summary_s.index.get_level_values(0))
catchments_gauges_subset = copy.deepcopy(catchments_gauges)
for catch in catchments_gauges.keys():
for site in catchments_gauges[catch]:
if site not in location_list:
del catchments_gauges_subset[catch][site]
else:
continue
items = ['All']+list(catchments_gauges_subset.keys())
return items
def get_file_names(loaded_files):
'''Take in the file location strings from the users loaded files,
return dictionary with file location string mapped to file name'''
file_locations = {}
for file in loaded_files:
full_name = file.split('/')
name_exclude_extension = full_name[-1].split('.csv')[0]
file_locations[str(name_exclude_extension)] = file
return file_locations
def on_model_button_clicked(b):
'''Run the scenario testing program'''
with model_run_output:
b.style.button_color='lightgreen'
tab_model.selected_index = 1
global raw_data_s, data_summary_s, select_benchmark
# Get file names and their system locations
if load_model_files.files != []:
modelFiles = load_model_files.files
else:
modelFiles = []
if path_box_1.value != '':
PB1 = path_box_1.value.strip()
modelFiles.append(PB1)
if path_box_2.value != '':
PB2 = path_box_2.value.strip()
modelFiles.append(PB2)
if path_box_3.value != '':
PB3 = path_box_3.value.strip()
modelFiles.append(PB3)
if path_box_4.value != '':
PB4 = path_box_4.value.strip()
modelFiles.append(PB4)
if path_box_5.value != '':
PB5 = path_box_5.value.strip()
modelFiles.append(PB5)
file_locations = get_file_names(modelFiles)
# Get tolerance:
minThreshold_tolerance = (100 - min_threshold_allowance_s.value)/100
maxThreshold_tolerance = (100 + max_threshold_allowance_s.value)/100
duration_tolerance = (100 - duration_allowance_s.value)/100
drawdown_tolerance = (100 - drawdown_allowance_s.value)/100
allowanceDict ={'minThreshold': minThreshold_tolerance, 'maxThreshold': maxThreshold_tolerance,
'duration': duration_tolerance, 'drawdown': drawdown_tolerance}
# Run the requested analysis on the loaded scenarios:
raw_data_s, data_summary_s = scenario_handling.scenario_handler(file_locations,
model_format_type.value,
allowanceDict,
climate_file.value)
items = get_locations_from_scenarios(data_summary_s)
w = Select(options=items)
#show the results of the selected catchment:
out = interactive(view, x=w)
display(out)
def getMetadata():
'''Run this function to get the metadata from the model run.
Returns a pandas dataframe with the relevant metadata'''
todaysDate = str(datetime.today().strftime('%Y-%m-%d'))
metadata = pd.DataFrame(columns = ['Name', 'Details'])
nameList = ['date run', 'database accessed on', 'duration tolerance applied', 'minimum threshold tolerance applied', 'maximum threshold tolerance applied',
'maximum drawdown rate tolerance applied']
metadataList = [todaysDate, todaysDate, duration_allowance_s.value, min_threshold_allowance_s.value, max_threshold_allowance_s .value, drawdown_allowance_s.value]
metadata['Name'] = nameList
metadata['Details'] = metadataList
return metadata
def model_output_button_clicked(b):
'''Output the scenario testing to excel'''
with model_output:
b.style.button_color='lightgreen'
global data_summary_s
global raw_data_s
# Get the metadata from the model run:
metadata_df = getMetadata()
PU_items = data_inputs.get_planning_unit_info()
model_file_name = file_name_s.value # Gettng the user file name
model_scenario_list = list(data_summary_s.columns.levels[0]) # Get scenarios / Repeated?
writer = | pd.ExcelWriter('Output_files/' + model_file_name + '.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pytest
from ..testing_utils import make_ecommerce_entityset
from featuretools import Timedelta
from featuretools.computational_backends import PandasBackend
from featuretools.primitives import (
Absolute,
Add,
Count,
CumCount,
CumMax,
CumMean,
CumMin,
CumSum,
Day,
Diff,
DirectFeature,
Divide,
Equals,
Feature,
GreaterThan,
GreaterThanEqualTo,
Haversine,
Hour,
IdentityFeature,
IsIn,
IsNull,
Latitude,
LessThan,
LessThanEqualTo,
Longitude,
Mod,
Mode,
Multiply,
Negate,
Not,
NotEquals,
NumCharacters,
NumWords,
Percentile,
Subtract,
Sum,
get_transform_primitives,
make_trans_primitive
)
from featuretools.synthesis.deep_feature_synthesis import match
from featuretools.variable_types import Boolean, Datetime, Numeric, Variable
# some tests change the entityset values, so we have to create it fresh
# for each test (rather than setting scope='module')
@pytest.fixture
def es():
return make_ecommerce_entityset()
@pytest.fixture(scope='module')
def int_es():
return make_ecommerce_entityset(with_integer_time_index=True)
def test_make_trans_feat(es):
f = Hour(es['log']['datetime'])
pandas_backend = PandasBackend(es, [f])
df = pandas_backend.calculate_all_features(instance_ids=[0],
time_last=None)
v = df[f.get_name()][0]
assert v == 10
def test_diff(es):
value = IdentityFeature(es['log']['value'])
customer_id_feat = \
DirectFeature(es['sessions']['customer_id'],
child_entity=es['log'])
diff1 = Diff(value, es['log']['session_id'])
diff2 = Diff(value, customer_id_feat)
pandas_backend = PandasBackend(es, [diff1, diff2])
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
val1 = df[diff1.get_name()].values.tolist()
val2 = df[diff2.get_name()].values.tolist()
correct_vals1 = [
np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7
]
correct_vals2 = [np.nan, 5, 5, 5, 5, -20, 1, 1, 1, -3, np.nan, 5, -5, 7, 7]
for i, v in enumerate(val1):
v1 = val1[i]
if np.isnan(v1):
assert (np.isnan(correct_vals1[i]))
else:
assert v1 == correct_vals1[i]
v2 = val2[i]
if np.isnan(v2):
assert (np.isnan(correct_vals2[i]))
else:
assert v2 == correct_vals2[i]
def test_diff_single_value(es):
diff = Diff(es['stores']['num_square_feet'], es['stores'][u'région_id'])
pandas_backend = PandasBackend(es, [diff])
df = pandas_backend.calculate_all_features(instance_ids=[5],
time_last=None)
assert df.shape[0] == 1
assert df[diff.get_name()].dropna().shape[0] == 0
def test_compare_of_identity(es):
to_test = [(Equals, [False, False, True, False]),
(NotEquals, [True, True, False, True]),
(LessThan, [True, True, False, False]),
(LessThanEqualTo, [True, True, True, False]),
(GreaterThan, [False, False, False, True]),
(GreaterThanEqualTo, [False, False, True, True])]
features = []
for test in to_test:
features.append(test[0](es['log']['value'], 10))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_direct(es):
log_rating = DirectFeature(es['products']['rating'],
child_entity=es['log'])
to_test = [(Equals, [False, False, False, False]),
(NotEquals, [True, True, True, True]),
(LessThan, [False, False, False, True]),
(LessThanEqualTo, [False, False, False, True]),
(GreaterThan, [True, True, True, False]),
(GreaterThanEqualTo, [True, True, True, False])]
features = []
for test in to_test:
features.append(test[0](log_rating, 4.5))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_transform(es):
day = Day(es['log']['datetime'])
to_test = [(Equals, [False, True]),
(NotEquals, [True, False]),
(LessThan, [True, False]),
(LessThanEqualTo, [True, True]),
(GreaterThan, [False, False]),
(GreaterThanEqualTo, [False, True])]
features = []
for test in to_test:
features.append(test[0](day, 10))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 14],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_agg(es):
count_logs = Count(es['log']['id'],
parent_entity=es['sessions'])
to_test = [(Equals, [False, False, False, True]),
(NotEquals, [True, True, True, False]),
(LessThan, [False, False, True, False]),
(LessThanEqualTo, [False, False, True, True]),
(GreaterThan, [True, True, False, False]),
(GreaterThanEqualTo, [True, True, False, True])]
features = []
for test in to_test:
features.append(test[0](count_logs, 2))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_all_nans(es):
nan_feat = Mode(es['log']['product_id'], es['sessions'])
compare = nan_feat == 'brown bag'
# before all data
time_last = pd.Timestamp('1/1/1993')
pandas_backend = PandasBackend(es, [nan_feat, compare])
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=time_last)
assert df[nan_feat.get_name()].dropna().shape[0] == 0
assert not df[compare.get_name()].any()
def test_arithmetic_of_val(es):
to_test = [(Add, [2.0, 7.0, 12.0, 17.0], [2.0, 7.0, 12.0, 17.0]),
(Subtract, [-2.0, 3.0, 8.0, 13.0], [2.0, -3.0, -8.0, -13.0]),
(Multiply, [0, 10, 20, 30], [0, 10, 20, 30]),
(Divide, [0, 2.5, 5, 7.5], [np.inf, 0.4, 0.2, 2 / 15.0],
[np.nan, np.inf, np.inf, np.inf])]
features = []
logs = es['log']
for test in to_test:
features.append(test[0](logs['value'], 2))
features.append(test[0](2, logs['value']))
features.append(Divide(logs['value'], 0))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[2 * i].get_name()].values.tolist()
assert v == test[1]
v = df[features[2 * i + 1].get_name()].values.tolist()
assert v == test[2]
test = to_test[-1][-1]
v = df[features[-1].get_name()].values.tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1:]
def test_arithmetic_two_vals_fails(es):
with pytest.raises(ValueError):
Add(2, 2)
def test_arithmetic_of_identity(es):
logs = es['log']
to_test = [(Add, [0., 7., 14., 21.]),
(Subtract, [0, 3, 6, 9]),
(Multiply, [0, 10, 40, 90]),
(Divide, [np.nan, 2.5, 2.5, 2.5])]
features = []
for test in to_test:
features.append(test[0](logs['value'], logs['value_2']))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test[:-1]):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
i, test = 3, to_test[-1]
v = df[features[i].get_name()].values.tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1][1:]
def test_arithmetic_of_direct(es):
rating = es['products']['rating']
log_rating = DirectFeature(rating,
child_entity=es['log'])
customer_age = es['customers']['age']
session_age = DirectFeature(customer_age,
child_entity=es['sessions'])
log_age = DirectFeature(session_age,
child_entity=es['log'])
to_test = [(Add, [38, 37, 37.5, 37.5]),
(Subtract, [28, 29, 28.5, 28.5]),
(Multiply, [165, 132, 148.5, 148.5]),
(Divide, [6.6, 8.25, 22. / 3, 22. / 3])]
features = []
for test in to_test:
features.append(test[0](log_age, log_rating))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 3, 5, 7],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
# P TODO: rewrite this test
def test_arithmetic_of_transform(es):
diff1 = Diff(IdentityFeature(es['log']['value']),
IdentityFeature(es['log']['product_id']))
diff2 = Diff(IdentityFeature(es['log']['value_2']),
IdentityFeature(es['log']['product_id']))
to_test = [(Add, [np.nan, 14., -7., 3.]),
(Subtract, [np.nan, 6., -3., 1.]),
(Multiply, [np.nan, 40., 10., 2.]),
(Divide, [np.nan, 2.5, 2.5, 2.])]
features = []
for test in to_test:
features.append(test[0](diff1, diff2))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 2, 11, 13],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert np.isnan(v.pop(0))
assert np.isnan(test[1].pop(0))
assert v == test[1]
def test_not_feature(es):
likes_ice_cream = es['customers']['loves_ice_cream']
not_feat = Not(likes_ice_cream)
features = [not_feat]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1],
time_last=None)
v = df[not_feat.get_name()].values
assert not v[0]
assert v[1]
def test_arithmetic_of_agg(es):
customer_id_feat = es['customers']['id']
store_id_feat = es['stores']['id']
count_customer = Count(customer_id_feat,
parent_entity=es[u'régions'])
count_stores = Count(store_id_feat,
parent_entity=es[u'régions'])
to_test = [(Add, [6, 2]),
(Subtract, [0, -2]),
(Multiply, [9, 0]),
(Divide, [1, 0])]
features = []
for test in to_test:
features.append(test[0](count_customer, count_stores))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(
instance_ids=['United States', 'Mexico'], time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
# TODO latlong is a string in entityset. Asserts in test_latlong fail
# def latlong_unstringify(latlong):
# lat = float(latlong.split(", ")[0].replace("(", ""))
# lon = float(latlong.split(", ")[1].replace(")", ""))
# return (lat, lon)
def test_latlong(es):
log_latlong_feat = es['log']['latlong']
latitude = Latitude(log_latlong_feat)
longitude = Longitude(log_latlong_feat)
features = [latitude, longitude]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
latvalues = df[latitude.get_name()].values
lonvalues = df[longitude.get_name()].values
assert len(latvalues) == 15
assert len(lonvalues) == 15
real_lats = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
real_lons = [0, 2, 4, 6, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6]
for i, v, in enumerate(real_lats):
assert v == latvalues[i]
for i, v, in enumerate(real_lons):
assert v == lonvalues[i]
def test_haversine(es):
log_latlong_feat = es['log']['latlong']
log_latlong_feat2 = es['log']['latlong2']
haversine = Haversine(log_latlong_feat, log_latlong_feat2)
features = [haversine]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
values = df[haversine.get_name()].values
real = [0., 524.15585776, 1043.00845747, 1551.12130243,
2042.79840241, 0., 137.86000883, 275.59396684,
413.07563177, 0., 0., 524.15585776,
0., 739.93819145, 1464.27975511]
assert len(values) == 15
for i, v in enumerate(real):
assert v - values[i] < .0001
def test_cum_sum(es):
log_value_feat = es['log']['value']
cum_sum = CumSum(log_value_feat, es['log']['session_id'])
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 50, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_min(es):
log_value_feat = es['log']['value']
cum_min = CumMin(log_value_feat, es['log']['session_id'])
features = [cum_min]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_min.get_name()].values
assert len(cvalues) == 15
cum_min_values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i, v in enumerate(cum_min_values):
assert v == cvalues[i]
def test_cum_max(es):
log_value_feat = es['log']['value']
cum_max = CumMax(log_value_feat, es['log']['session_id'])
features = [cum_max]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_max.get_name()].values
assert len(cvalues) == 15
cum_max_values = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
for i, v in enumerate(cum_max_values):
assert v == cvalues[i]
def test_cum_sum_use_previous(es):
log_value_feat = es['log']['value']
cum_sum = CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 45, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_use_previous_integer_time(int_es):
es = int_es
log_value_feat = es['log']['value']
with pytest.raises(AssertionError):
CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'm'))
cum_sum = CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 45, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat,
where=compare_feat)
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 50, 50, 50, 50, 50, 50,
0, 5, 5, 12, 26]
for i, v in enumerate(cum_sum_values):
if not np.isnan(v):
assert v == cvalues[i]
else:
assert (np.isnan(cvalues[i]))
def test_cum_sum_use_previous_and_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
# todo should this be cummean?
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat,
where=compare_feat,
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_sum_values = [0, 5, 15, 30, 45, 45, 45, 45, 45, 45,
0, 5, 5, 12, 26]
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_group_on_nan(es):
log_value_feat = es['log']['value']
es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +
['toothpaste'] * 3 + ['brown bag'] * 2 +
['shoes'] +
[np.nan] * 4 +
['coke_zero'] * 2)
cum_sum = CumSum(log_value_feat, es['log']['product_id'])
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15,
15, 35,
0, 1, 3,
3, 3,
0,
np.nan, np.nan, np.nan, np.nan]
for i, v in enumerate(cum_sum_values):
if np.isnan(v):
assert (np.isnan(cvalues[i]))
else:
assert v == cvalues[i]
def test_cum_sum_use_previous_group_on_nan(es):
# TODO: Figure out how to test where `df`
# in pd_rolling get_function() has multiindex
log_value_feat = es['log']['value']
es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +
['toothpaste'] * 3 + ['brown bag'] * 2 +
['shoes'] +
[np.nan] * 4 +
['coke_zero'] * 2)
cum_sum = CumSum(log_value_feat,
es['log']['product_id'],
es["log"]["datetime"],
use_previous=Timedelta(40, 'seconds'))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15,
15, 35,
0, 1, 3,
3, 0,
0,
np.nan, np.nan, np.nan, np.nan]
for i, v in enumerate(cum_sum_values):
if np.isnan(v):
assert (np.isnan(cvalues[i]))
else:
assert v == cvalues[i]
def test_cum_sum_use_previous_and_where_absolute(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat, es["log"]["datetime"],
where=compare_feat,
use_previous=Timedelta(40, 'seconds'))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_sum_values = [0, 5, 15, 30, 50, 0, 0, 0, 0, 0,
0, 5, 0, 7, 21]
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_mean(es):
log_value_feat = es['log']['value']
cum_mean = CumMean(log_value_feat, es['log']['session_id'])
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 2.5, 5, 7.5, 10, 0, .5, 1, 1.5, 0, 0, 2.5, 0, 3.5, 7]
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_mean_use_previous(es):
log_value_feat = es['log']['value']
cum_mean = CumMean(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 2.5, 5, 10, 15, 0, .5, 1, 2, 0, 0, 2.5, 0, 3.5, 7]
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_mean_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_mean = CumMean(log_value_feat, dfeat,
where=compare_feat)
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 5, 7.5, 10, 12.5, 12.5, 12.5, 12.5, 12.5, 12.5,
0, 5, 5, 6, 26. / 3]
for i, v in enumerate(cum_mean_values):
if not np.isnan(v):
assert v == cvalues[i]
else:
assert (np.isnan(cvalues[i]))
def test_cum_mean_use_previous_and_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
# todo should this be cummean?
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_mean = CumMean(log_value_feat, dfeat,
where=compare_feat,
use_previous=Timedelta(2, 'observations',
entity=es['log']))
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_mean_values = [0, 5, 7.5, 12.5, 17.5, 17.5, 17.5, 17.5, 17.5, 17.5,
0, 5, 5, 6, 10.5]
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_count(es):
log_id_feat = es['log']['id']
cum_count = CumCount(log_id_feat, es['log']['session_id'])
features = [cum_count]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_count.get_name()].values
assert len(cvalues) == 15
cum_count_values = [1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 1, 2, 1, 2, 3]
for i, v in enumerate(cum_count_values):
assert v == cvalues[i]
def test_text_primitives(es):
words = NumWords(es['log']['comments'])
chars = NumCharacters(es['log']['comments'])
features = [words, chars]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
word_counts = [514, 3, 3, 644, 1268, 1269, 177, 172, 79,
240, 1239, 3, 3, 3, 3]
char_counts = [3392, 10, 10, 4116, 7961, 7580, 992, 957,
437, 1325, 6322, 10, 10, 10, 10]
word_values = df[words.get_name()].values
char_values = df[chars.get_name()].values
assert len(word_values) == 15
for i, v in enumerate(word_values):
assert v == word_counts[i]
for i, v in enumerate(char_values):
assert v == char_counts[i]
def test_overrides(es):
value = Feature(es['log']['value'])
value2 = Feature(es['log']['value_2'])
feats = [Add, Subtract, Multiply, Divide]
compare_ops = [GreaterThan, LessThan, Equals, NotEquals,
GreaterThanEqualTo, LessThanEqualTo]
assert Negate(value).hash() == (-value).hash()
compares = [(value, value),
(value, value2),
(value2, 2)]
overrides = [
value + value,
value - value,
value * value,
value / value,
value > value,
value < value,
value == value,
value != value,
value >= value,
value <= value,
value + value2,
value - value2,
value * value2,
value / value2,
value > value2,
value < value2,
value == value2,
value != value2,
value >= value2,
value <= value2,
value2 + 2,
value2 - 2,
value2 * 2,
value2 / 2,
value2 > 2,
value2 < 2,
value2 == 2,
value2 != 2,
value2 >= 2,
value2 <= 2,
]
i = 0
for left, right in compares:
for feat in feats:
f = feat(left, right)
o = overrides[i]
assert o.hash() == f.hash()
i += 1
for compare_op in compare_ops:
f = compare_op(left, right)
o = overrides[i]
assert o.hash() == f.hash()
i += 1
our_reverse_overrides = [
2 + value2,
2 - value2,
2 * value2,
2 / value2]
i = 0
for feat in feats:
if feat != Mod:
f = feat(2, value2)
o = our_reverse_overrides[i]
assert o.hash() == f.hash()
i += 1
python_reverse_overrides = [
2 < value2,
2 > value2,
2 == value2,
2 != value2,
2 <= value2,
2 >= value2]
i = 0
for compare_op in compare_ops:
f = compare_op(value2, 2)
o = python_reverse_overrides[i]
assert o.hash() == f.hash()
i += 1
def test_override_boolean(es):
count = Count(es['log']['value'], es['sessions'])
count_lo = GreaterThan(count, 1)
count_hi = LessThan(count, 10)
to_test = [[True, True, True],
[True, True, False],
[False, False, True]]
features = []
features.append(count_lo.OR(count_hi))
features.append(count_lo.AND(count_hi))
features.append(~(count_lo.AND(count_hi)))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test
def test_override_cmp_from_variable(es):
count_lo = IdentityFeature(es['log']['value']) > 1
to_test = [False, True, True]
features = [count_lo]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
v = df[count_lo.get_name()].values.tolist()
for i, test in enumerate(to_test):
assert v[i] == test
def test_override_cmp(es):
count = Count(es['log']['value'], es['sessions'])
_sum = Sum(es['log']['value'], es['sessions'])
gt_lo = count > 1
gt_other = count > _sum
ge_lo = count >= 1
ge_other = count >= _sum
lt_hi = count < 10
lt_other = count < _sum
le_hi = count <= 10
le_other = count <= _sum
ne_lo = count != 1
ne_other = count != _sum
to_test = [[True, True, False],
[False, False, True],
[True, True, True],
[False, False, True],
[True, True, True],
[True, True, False],
[True, True, True],
[True, True, False]]
features = [gt_lo, gt_other, ge_lo, ge_other, lt_hi,
lt_other, le_hi, le_other, ne_lo, ne_other]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test
def test_isin_feat(es):
isin = IsIn(es['log']['product_id'],
list_of_outputs=["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_other_syntax(es):
isin = Feature(es['log']['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_other_syntax_int(es):
isin = Feature(es['log']['value']).isin([5, 10])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_custom(es):
def pd_is_in(array, list_of_outputs=None):
if list_of_outputs is None:
list_of_outputs = []
return pd.Series(array).isin(list_of_outputs)
def isin_generate_name(self):
return u"%s.isin(%s)" % (self.base_features[0].get_name(),
str(self.kwargs['list_of_outputs']))
IsIn = make_trans_primitive(
pd_is_in,
[Variable],
Boolean,
name="is_in",
description="For each value of the base feature, checks whether it is "
"in a list that is provided.",
cls_attributes={"generate_name": isin_generate_name})
isin = IsIn(es['log']['product_id'],
list_of_outputs=["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
isin = Feature(es['log']['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
isin = Feature(es['log']['value']).isin([5, 10])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isnull_feat(es):
value = IdentityFeature(es['log']['value'])
diff = Diff(value, es['log']['session_id'])
isnull = IsNull(diff)
features = [isnull]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(15), None)
# correct_vals_diff = [
# np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7]
correct_vals = [True, False, False, False, False, True, False, False,
False, True, True, False, True, False, False]
values = df[isnull.get_name()].values.tolist()
assert correct_vals == values
def test_init_and_name(es):
from featuretools import calculate_feature_matrix
log = es['log']
features = [Feature(v) for v in log.variables] +\
[GreaterThan(Feature(es["products"]["rating"], es["log"]), 2.5)]
# Add Timedelta feature
features.append(pd.Timestamp.now() - Feature(log['datetime']))
for transform_prim in get_transform_primitives().values():
# use the input_types matching function from DFS
input_types = transform_prim.input_types
if type(input_types[0]) == list:
matching_inputs = match(input_types[0], features)
else:
matching_inputs = match(input_types, features)
if len(matching_inputs) == 0:
raise Exception(
"Transform Primitive %s not tested" % transform_prim.name)
for s in matching_inputs:
instance = transform_prim(*s)
# try to get name and calculate
instance.get_name()
calculate_feature_matrix([instance], entityset=es).head(5)
def test_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
pandas_backend = PandasBackend(es, [p])
df = pandas_backend.calculate_all_features(range(10, 17), None)
true = es['log'].df[v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_dependent_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
p2 = Percentile(p - 1)
pandas_backend = PandasBackend(es, [p, p2])
df = pandas_backend.calculate_all_features(range(10, 17), None)
true = es['log'].df[v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_agg_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
agg = Sum(p, es['sessions'])
pandas_backend = PandasBackend(es, [agg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby('session_id')['percentile'].sum()[[0, 1]]
for t, a in zip(true_p.values, df[agg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
agg = Sum(p, es['sessions'])
pagg = Percentile(agg)
pandas_backend = PandasBackend(es, [pagg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby('session_id')['percentile'].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg(es):
v = Feature(es['log']['value'])
agg = Sum(v, es['sessions'])
pagg = Percentile(agg)
pandas_backend = PandasBackend(es, [pagg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
true_p = log_vals.groupby('session_id')[v.get_name()].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_direct_percentile(es):
v = Feature(es['customers']['age'])
p = Percentile(v)
d = Feature(p, es['sessions'])
pandas_backend = PandasBackend(es, [d])
df = pandas_backend.calculate_all_features([0, 1], None)
cust_vals = es['customers'].df[[v.get_name()]]
cust_vals['percentile'] = cust_vals[v.get_name()].rank(pct=True)
true_p = cust_vals['percentile'].loc[[0, 0]]
for t, a in zip(true_p.values, df[d.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_direct_agg_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
agg = Sum(p, es['customers'])
d = Feature(agg, es['sessions'])
pandas_backend = PandasBackend(es, [d])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
log_vals['customer_id'] = [0] * 10 + [1] * 5 + [2] * 2
true_p = log_vals.groupby('customer_id')['percentile'].sum().fillna(0)
true_p = true_p[[0, 0]]
for t, a in zip(true_p.values, df[d.get_name()].values):
assert ( | pd.isnull(t) | pandas.isnull |
import pandas as pd
import numpy as np
from .QCBase import VarNames
class Exporter(object):
""" Export class which writes parsed data to a certain format"""
valid_formats = ["pdf", "xlsx", "txt", "csv", "dataframe"]
def __init__(self, data=None):
self.data = data
# for later: add pandas independent functions to export arrays to file
def arrays_to_dframe(self, **kwargs):
""" Using keyworded arguments, expects arrays """
try:
df = pd.DataFrame(kwargs)
except ValueError: #if arrays do not have the same length
d = {}
for key, value in kwargs.items():
d[key] = pd.Series(value)
df = pd.DataFrame(d)
return df
def ExcitedStateSummary(self, results, fname="es_smry", fmt="csv",
ground_state=False):
""" Exports energy related excited state quantities to file
Parameters
----------
results : CCParser.ParseContainer
Parsing container that holds parsed values.
fname : string
Filename prefix.
fmt : string
Output format ('csv', 'xlsx'/'xls' or 'df' for pandas.DataFrame).
ground_state : bool
Whether to include an empty line in the table for the ground state.
"""
if fmt not in Exporter.valid_formats:
raise ValueError("File format '{0:}' not recognized or supported!".format(fmt))
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
d = {}
# (1) Excitation energies (default minimum)
#if hasattr(results, VarNames.exc_energy_rel):
d[VarNames.exc_energy_rel] = getattr(results, VarNames.exc_energy_rel).data
n_states = len(d[VarNames.exc_energy_rel])
# (2) Oscillator strengths
if hasattr(results, VarNames.osc_str):
d[VarNames.osc_str] = getattr(results, VarNames.osc_str).data
# (3) Amplitudes
if hasattr(results, VarNames.amplitudes):
ampl = getattr(results, VarNames.amplitudes)
pieces = [a.to_dataframe() for a in ampl]
key = [x for x in range(1,len(pieces)+1)]
amp_df = pd.concat(pieces, keys=key, names=["State", "Row ID"])
# prepare MultiIndex (there has to be a better way to do that...)
arrays = [[x for x in range(1, n_states+1)],
[0 for x in range(n_states)]]
tuples = list(zip(*arrays))# asterisk unpacks
df1 = pd.DataFrame(d)
df1.index = pd.MultiIndex.from_tuples(tuples, names=["State", "Row ID"])
df = pd.concat([df1, amp_df], axis=1)
# add row to MultiIndex, see https://stackoverflow.com/q/24917700
if ground_state:
df.loc[(0,0),:] = np.nan
df.sort_index(level=0, inplace=True)
# EXPORT TO FILE or dataframe
fout = fname + "." + fmt
if fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == ("xlsx" or "xls"):
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
elif fmt.lower() == ("dataframe" or "df"):
return df
def ReducedWeights(self, results, nbsfA, extern=None, fmt="print",
fname="AmplAnl", silent=False):
""" Calculate reduced weights based on fragment information.
The reduced weight for a single excitation :math:`i \\rightarrow a` is defined as
:math:`v_{i}^{a} = 0.5\\cdot(c_{i,A}^{2} + c_{a,A}^{2})\\cdot w_{i}^{a}`, with
c and w being the molecular orbital coefficient and transition weight,
respectively.
The MO coefficients from the output first have to be transformed to an
orthonormal basis.
Parameters
----------
results : CCParser.ParseContainer
Container object which contains excited state amplitudes
nbsfA : int
Number of basis functions on System A (assumes system A comes first!)
extern : CCParser.ParseContainer
Optional second container which contains orthonormalisation matrix and/or MO coefficients
fmt : string
Output format. Available are "print", "dataframe", "xlsx" or "csv"
fname : string
Output file name (basename only).
silent : bool
Whether to ignore lengthy printouts.
"""
# consistency
has_extern = True if extern != None else False
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
if not has_extern and not hasattr(results, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif has_extern and not hasattr(extern, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif not has_extern and not hasattr(results, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif has_extern and not hasattr(extern, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif not hasattr(results, VarNames.amplitudes):
raise AttributeError("Could not find amplitudes! Were they parsed?")
elif not hasattr(results, VarNames.n_bas):
raise AttributeError("Could not find number of basis functions! Was it parsed?")
else:
# (1) Orthonormalization matrix, hardcoded last
X = getattr(results, VarNames.orthonorm_matrix).get_last() if not \
has_extern else getattr(extern, VarNames.orthonorm_matrix).get_last()
X_inv = np.linalg.inv(X)
# (2) MO coeffiecients, hardcoded last
C = getattr(results, VarNames.mo_coefficients).get_last() if not \
has_extern else getattr(extern, VarNames.mo_coefficients).get_last()
C_prime = C * X_inv # Szabo, Ostlund, page 142
max_mo = C.shape[0]
# (3) Amplitudes
ampl = getattr(results, VarNames.amplitudes)
n_states = len(ampl)
# (4) Number of basis functions
nbsf = getattr(results, VarNames.n_bas).get_last()
# (4) Output variables
sum_weights = [0 for i in range(n_states)]
sum_redweights = [0 for i in range(n_states)]
# --------------
sos_A = [0 for a in range(C_prime.shape[0])]
sos_B = [0 for a in range(C_prime.shape[0])]
for c, vect in enumerate(C_prime):
for n in range(nbsf):
if n < nbsfA:
sos_A[c] += vect[0,n]**2
else:
sos_B[c] += vect[0,n]**2
for i,a in enumerate(ampl):#state
for t in range(len(a.occ)):#transition
if max(a.virt[t]) > max_mo:
if not silent:
print("State {0:>2d}: Omitting transition with weight \
{1:.1%} due to missing MO coefficients.".format(i+1, a.weights[t]))
continue
if len(a.occ[t]) == 1:#single amplitudes
rw = 0.5*(sos_A[a.occ[t][0]-1] + sos_A[a.virt[t][0]-1]) * a.weights[t]
elif len(a.occ[t]) == 2:#double amplitudes
rw = 0.25*(sos_A[a.occ[t][0]-1] + sos_A[a.occ[t][1]-1] +
sos_A[a.virt[t][0]-1] + sos_A[a.virt[t][1]-1]
)*a.weights[t]
else:
raise IndexError("Currently no more than double \
amplitudes are supported!")
sum_weights[i] += a.weights[t]
sum_redweights[i] += rw
#----------------
# Export as
fout = fname + "." + fmt
d = {"State": [i+1 for i in range(n_states)],
"sum_weight" : sum_weights,
"sum_red_weight" : sum_redweights}
df = | pd.DataFrame(d) | pandas.DataFrame |
import numpy as np
import pandas as pd
from datetime import datetime
def string_date(mnthDay, year):
"""Return a string date as 'mm/dd/yyyy'.
Argument format:
'mm/dd' string
'yyyy'"""
return(mnthDay + '/' + str(year))
class TouRate(object):
"""Object for Utility Time Of Use Tariff.
Class for a utilty time of use tariff structure. TouRate provides
methods to slice a dataframe with an year long hourly datetime axis (8760)
by the time periods defined by a time of use (TOU) utility rate.
Instance Variables:
year - integer year for the anlaysis; the year effects holidays
cal - an instance of pandas holiday calendar
deliveryPrice - float price for the base elec. price ($/kWh)
periods - dictionary defining the time of use periods
Periods dict format days dict can be empty
SCE_TOU_GS_2 = {'holWkndsWinter': {'dates': ('10/01', '5/31'),
'times': ('00:00', '23:00'),
'days': {'dropHol': True, 'dropWknd': True, 'inverse': True},
'price': 0.04064}
}
Methods:
spans_year - returns bool indicating if tou period spans calendar year
filter_days - Returns dataframe without holidays or weekends or inverse
get_period - Returns dataframe with only hours and days from the give tou period
get_all_periods - Returns dataframe with a column for each tou period.
get_rates - Returns dataframe with TOU period rates from TouRate object.
"""
def __init__(self,
year,
cal,
deliveryPrice,
periods={}):
self.year = year
self.cal = cal
self.deliveryPrice = deliveryPrice
self.periods = periods
def spans_year(self, key):
"""Returns bool indicating spanning calendar year end.
Arguments:
dict key (string) for the TOU period to check
"""
startDate = string_date(self.periods[key]['dates'][0], self.year)
endDate = string_date(self.periods[key]['dates'][1], self.year)
startDay = pd.date_range(startDate, periods=1).dayofyear
endDay = pd.date_range(endDate, periods=1).dayofyear
return(endDay[0] - startDay[0] < 0)
def filter_days(self, df, dropWknd=True, dropHol=True, inverse=False):
"""Returns dataframe without holidays or weekends or inverse.
Arguments:
df - dataframe of energy production (hourly)
Keyword arguments:
dropWknd=bool default is True
dropHol=bool, default is True
inverse=bool, default is False
"""
# create function to check this and return error?
# if df.index.date[0].year == self.year:
df_int = pd.DataFrame()
if df_int.empty:
if dropWknd & dropHol:
df_int = df.loc[~np.in1d(df.index.date, df.index[df.index.weekday > 4].date)]
holExcp = self.cal.holidays(datetime(self.year, 1, 1), datetime(self.year, 12, 31), return_name=False)
df_int = df_int.loc[~np.in1d(df_int.index.date, holExcp.date)]
elif dropWknd:
df_int = df.loc[~np.in1d(df.index.date, df.index[df.index.weekday > 4].date)]
elif dropHol:
holExcp = self.cal.holidays(datetime(self.year, 1, 1), datetime(self.year, 12, 31), return_name=False)
df_int = df.loc[~np.in1d(df.index.date, holExcp.date)]
else:
df_int = df
if inverse:
return(df.loc[~np.in1d(df.index.date, df_int.index.date)])
else:
return(df_int)
def get_period(self, df, key):
"""Returns dataframe with only hours and days from the give tou period.
Arguments:
df - dataframe of hourly energy production data
key - dict key of TOU period defined in the
"""
daysFiltered = self.filter_days(df, **self.periods[key]['days'])
perStart = string_date(self.periods[key]['dates'][0], df.index[0].date().year)
perEnd = string_date(self.periods[key]['dates'][1], df.index[0].date().year)
if self.spans_year(key):
# slice period for beginning of the year
fall = daysFiltered.ix[perStart:string_date('12/31', df.index[0].date().year)]
fall = fall.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])
fall = fall.sort_index()
# slice period for end of the year
spring = fall.append(daysFiltered.ix[string_date('1/1', df.index[0].date().year):perEnd])
spring = spring.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])
spring = spring.sort_index()
return(spring.rename_axis({'data': key}, axis=1))
else:
df_temp = daysFiltered.ix[perStart:perEnd]
df_temp = df_temp.between_time(self.periods[key]['times'][0], self.periods[key]['times'][1])
return(df_temp.rename_axis({'data': key}, axis=1))
def get_all_periods(self, df):
"""Returns dataframe with a column for each tou period.
Arguments:
df - dataframe of hourly energy production data
"""
df_append = | pd.DataFrame() | pandas.DataFrame |
# Copyright (C) 2018 GuQiangJs.
# Licensed under Apache License 2.0 <see LICENSE file>
import pandas as pd
from pandas import read_excel
def get_stock_holdings(index: str):
""" 从 中证指数有限公司 获取指数的成分列表
Args:
index: 指数代码
Returns:
``pandas.DataFrame``:
Examples:
.. code-block:: python
>>> from finance_datareader_py.csindex import get_stock_holdings
>>> print(get_stock_holdings('000300').tail())
symbol name
295 300136 信维通信
296 300144 宋城演艺
297 300251 光线传媒
298 300408 三环集团
299 300433 蓝思科技
"""
if not index:
raise ValueError()
url = 'http://www.csindex.com.cn/uploads/file/autofile/cons/{0}' \
'cons.xls'.format(index)
df = read_excel(url, convert_float=False, dtype=object, usecols=[4, 5])
df.rename(columns={'成分券代码Constituent Code': 'symbol',
'成分券名称Constituent Name': 'name'}, inplace=True)
# df.set_index("symbol", inplace=True)
return df
def get_stock_holdings_weight(index: str):
""" 从 中证指数有限公司 获取指数的成分权重
Args:
index: 指数代码
Returns:
``pandas.DataFrame``:
Examples:
.. code-block:: python
>>> from finance_datareader_py.csindex import get_stock_holdings_weight
>>> print(get_stock_holdings_weight('000300').tail())
symbol name 权重(%)Weight(%)
295 300136 信维通信 0.25
296 300144 宋城演艺 0.17
297 300251 光线传媒 0.08
298 300408 三环集团 0.24
299 300433 蓝思科技 0.09
"""
if not index:
raise ValueError()
url = 'http://www.csindex.com.cn/uploads/file/autofile/' \
'closeweight/{0}closeweight.xls'.format(index)
df = | read_excel(url, convert_float=False, dtype=object, usecols=[4, 5, 8]) | pandas.read_excel |
from __future__ import print_function
import os
import sys
import logging
import pandas as pd
import numpy as np
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path2 = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path2)
import candle
logger = logging.getLogger(__name__)
candle.set_parallelism_threads()
additional_definitions = [
{'name': 'latent_dim',
'action': 'store',
'type': int,
'help': 'latent dimensions'},
{'name': 'residual',
'type': candle.str2bool,
'default': False,
'help': 'add skip connections to the layers'},
{'name': 'reduce_lr',
'type': candle.str2bool,
'default': False,
'help': 'reduce learning rate on plateau'},
{'name': 'warmup_lr',
'type': candle.str2bool,
'default': False,
'help': 'gradually increase learning rate on start'},
{'name': 'base_lr',
'type': float,
'help': 'base learning rate'},
{'name': 'epsilon_std',
'type': float,
'help': 'epsilon std for sampling latent noise'},
{'name': 'use_cp',
'type': candle.str2bool,
'default': False,
'help': 'checkpoint models with best val_loss'},
{'name': 'use_tb',
'type': candle.str2bool,
'default': False,
'help': 'use tensorboard'},
{'name': 'tsne',
'type': candle.str2bool,
'default': False,
'help': 'generate tsne plot of the latent representation'}
]
required = [
'activation',
'batch_size',
'dense',
'dropout',
'epochs',
'initialization',
'learning_rate',
'loss',
'optimizer',
'rng_seed',
'scaling',
'val_split',
'latent_dim',
'batch_normalization',
'epsilon_std',
'timeout'
]
class BenchmarkAttn(candle.Benchmark):
def set_locals(self):
"""Functionality to set variables specific for the benchmark
- required: set of required parameters for the benchmark.
- additional_definitions: list of dictionaries describing the additional parameters for the
benchmark.
"""
if required is not None:
self.required = set(required)
if additional_definitions is not None:
self.additional_definitions = additional_definitions
def extension_from_parameters(params, framework=''):
"""Construct string for saving model with annotation of parameters"""
ext = framework
for i, n in enumerate(params['dense']):
if n:
ext += '.D{}={}'.format(i + 1, n)
ext += '.A={}'.format(params['activation'][0])
ext += '.B={}'.format(params['batch_size'])
ext += '.E={}'.format(params['epochs'])
ext += '.L={}'.format(params['latent_dim'])
ext += '.LR={}'.format(params['learning_rate'])
ext += '.S={}'.format(params['scaling'])
if params['epsilon_std'] != 1.0:
ext += '.EPS={}'.format(params['epsilon_std'])
if params['dropout']:
ext += '.DR={}'.format(params['dropout'])
if params['batch_normalization']:
ext += '.BN'
if params['warmup_lr']:
ext += '.WU_LR'
if params['reduce_lr']:
ext += '.Re_LR'
if params['residual']:
ext += '.Res'
return ext
def load_data(params, seed):
# start change #
if params['train_data'].endswith('h5') or params['train_data'].endswith('hdf5'):
print('processing h5 in file {}'.format(params['train_data']))
url = params['data_url']
file_train = params['train_data']
train_file = candle.get_file(file_train, url + file_train, cache_subdir='Pilot1')
df_x_train_0 = pd.read_hdf(train_file, 'x_train_0').astype(np.float32)
df_x_train_1 = pd.read_hdf(train_file, 'x_train_1').astype(np.float32)
X_train = pd.concat([df_x_train_0, df_x_train_1], axis=1, sort=False)
del df_x_train_0, df_x_train_1
df_x_test_0 = | pd.read_hdf(train_file, 'x_test_0') | pandas.read_hdf |
#!/usr/bin/env python
__author__ = '<NAME>'
import os
import pandas as pd
import argparse
from copy import deepcopy
from _collections import OrderedDict
import pandas as pd
from BCBio import GFF
from RouToolPa.Collections.General import SynDict, IdList
from RouToolPa.Parsers.VCF import CollectionVCF
from MACE.Routines import Visualization, StatsVCF
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True, type=lambda s: s.split(","),
help="Comma_separated_list of input file with precalculated coverage in windows.")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats", type=lambda s: s.split(","),
default=("png", ),
help="Comma-separated list of formats (supported by matlotlib) of "
"output figure.Default: svg,png")
"""
parser.add_argument("-g", "--draw_gaps", action="store_true", dest="draw_gaps",
help="Draw gaps, ignored if reference genome is not set. Default: False")
"""
parser.add_argument("-m", "--mean_coverage_file", action="store", dest="mean_coverage_file", required=True,
help="File with mean coverage for all samples")
parser.add_argument("-l", "--label_list", action="store", dest="label_list", required=True, type=lambda s: s.split(","),
help="Comma-separated list of labels to use for samples")
parser.add_argument("--scaffold_column_name", action="store", dest="scaffold_column_name", default="scaffold",
help="Name of column in coverage file with scaffold ids per window. Default: scaffold")
parser.add_argument("--window_column_name", action="store", dest="window_column_name", default="window",
help="Name of column in coverage file with window id. Default: window")
parser.add_argument("--coverage_column_name_list", action="store", dest="coverage_column_name_list",
default=["median", "mean"],
type=lambda s: s.split(","),
help="Coverage file with mean/median coverage per window. Default: median,mean")
parser.add_argument("--label_column_name", action="store", dest="label_column_name", default="label",
help="Name of column in mean coverage file with labels of samples. Default: label")
parser.add_argument("-w", "--window_size", action="store", dest="window_size", default=100000, type=int,
help="Size of the windows Default: 100000")
parser.add_argument("-s", "--window_step", action="store", dest="window_step", default=None, type=int,
help="Step of the sliding windows. Default: window size, i.e windows are staking")
parser.add_argument("-a", "--scaffold_white_list", action="store", dest="scaffold_white_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of the only scaffolds to draw. Default: all")
parser.add_argument("-b", "--scaffold_black_list", action="store", dest="scaffold_black_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of scaffolds to skip at drawing. Default: not set")
parser.add_argument("-y", "--sort_scaffolds", action="store_true", dest="sort_scaffolds", default=False,
help="Order scaffolds according to their names. Default: False")
parser.add_argument("-z", "--scaffold_ordered_list", action="store", dest="scaffold_ordered_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of scaffolds to draw first and exactly in same order. "
"Scaffolds absent in this list are drawn last and in order according to vcf file . "
"Default: not set")
parser.add_argument("-n", "--scaffold_length_file", action="store", dest="scaffold_length_file", required=True,
help="File with lengths of scaffolds")
parser.add_argument("--scaffold_syn_file", action="store", dest="scaffold_syn_file",
help="File with scaffold id synonyms")
parser.add_argument("--syn_file_key_column", action="store", dest="syn_file_key_column",
default=0, type=int,
help="Column(0-based) with key(current id) for scaffolds in synonym file. Default: 0")
parser.add_argument("--syn_file_value_column", action="store", dest="syn_file_value_column",
default=1, type=int,
help="Column(0-based) with value(synonym id) for scaffolds in synonym file synonym. Default: 1")
parser.add_argument("--colormap", action="store", dest="colormap",
help="Matplotlib colormap to use for SNP densities. Default: not set, "
"colors from HapMap article are used")
parser.add_argument("--coverage_thresholds", action="store", dest="coverage_thresholds",
default=(0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 2.0, 2.5),
type=lambda s: list(map(float, s.split(","))),
help="Comma-separated list of coverage thresholds(relative to mean/median) to use for "
"window coloring."
"Default: (0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 2.0, 2.5)")
parser.add_argument("--test_colormaps", action="store_true", dest="test_colormaps",
help="Test colormaps. If set --colormap option will be ignored")
parser.add_argument("--absolute_coverage_values", action="store_true", dest="absolute_coverage_values",
help="Use absolute coverage values. Default: False")
parser.add_argument("--subplots_adjust_left", action="store", dest="subplots_adjust_left", type=float,
help="Adjust left border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_top", action="store", dest="subplots_adjust_top", type=float,
help="Adjust top border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_right", action="store", dest="subplots_adjust_right", type=float,
help="Adjust right border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_bottom", action="store", dest="subplots_adjust_bottom", type=float,
help="Adjust bottom border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--figure_width", action="store", dest="figure_width", type=float, default=15,
help="Width of figure in inches. Default: 15")
parser.add_argument("--figure_height_per_scaffold", action="store", dest="figure_height_per_scaffold",
type=float, default=0.5,
help="Height of figure per chromosome track. Default: 0.5")
args = parser.parse_args()
mean_coverage_df = pd.read_csv(args.mean_coverage_file, sep='\t', header=0, index_col=0,
usecols=[args.label_column_name] + args.coverage_column_name_list)
chr_syn_dict = SynDict(filename=args.scaffold_syn_file,
key_index=args.syn_file_key_column,
value_index=args.syn_file_value_column)
chr_len_df = | pd.read_csv(args.scaffold_length_file, sep='\t', header=None, names=("scaffold", "length"), index_col=0) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = | Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True) | pandas.Categorical |
import pandas as pd
import sys
import numpy as np
def procesar(model, load, dates, results):
#Listas para guardar los valores
pv_result = []
dg_result = []
#Ebat_c_result = []
#Ebat_d_result = []
p_gf_result = []
LPSP_result = []
SOC_result = []
#Ciclo que obtiene el valor de cada resultado enlas iteraciones del modelo
for v in model.var_reales.items():
uni = v[0][0]
if uni == 'Pv':
pv_result.append(v[1].value)
elif uni == 'Dg':
dg_result.append(v[1].value)
# elif uni == 'Ebat_c':
# Ebat_c_result.append(v[1].value)
# elif uni == 'Ebat_d':
# Ebat_d_result.append(v[1].value)
elif uni == 'P_gf':
p_gf_result.append(v[1].value)
else:
print("Pass")
else:
print('Ciclo términado otros')
for x in model.soc_t.items():
SOC_result.append(x[1].value)
else:
print('Ciclo términado batería')
#concateno para mostrar en DataFrame
modelo_result = pd.concat([pd.DataFrame(pv_result),
pd.DataFrame(dg_result),
pd.DataFrame(Ebat_c_result),
pd.DataFrame(Ebat_d_result),
| pd.DataFrame(p_gf_result) | pandas.DataFrame |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Deferral.py
This Python class contains methods and attributes specific for service analysis within StorageVet.
"""
import numpy as np
import cvxpy as cvx
from storagevet.ValueStreams.ValueStream import ValueStream
import pandas as pd
import storagevet.Library as Lib
import random
from storagevet.ErrorHandling import *
from storagevet.Library import truncate_float
class Deferral(ValueStream):
""" Investment deferral. Each service will be daughters of the PreDispService class.
"""
def __init__(self, params):
""" Generates the objective function, finds and creates constraints.
Args:
params (Dict): input parameters
"""
# generate the generic service object
ValueStream.__init__(self, 'Deferral', params)
# add Deferral specific attributes
self.max_import = params['planned_load_limit'] # positive
self.max_export = params['reverse_power_flow_limit'] # negative
self.last_year = params['last_year'].year
self.year_failed = params['last_year'].year + 1
self.min_years = params.get('min_year_objective', 0)
self.load = params['load'] # deferral load
self.growth = params['growth']/100 # Growth Rate of deferral load (%/yr)
self.price = params['price'] # $/yr
self.p_min = 0
self.e_min = 0
self.deferral_df = None
self.e_walk = pd.Series()
self.power_requirement = pd.Series()
def check_for_deferral_failure(self, end_year, poi, frequency, opt_years, def_load_growth):
"""This functions checks the constraints of the storage system against any predispatch or user inputted constraints
for any infeasible constraints on the system.
The goal of this function is to predict the year that storage will fail to deferral a T&D asset upgrade.
Only runs if Deferral is active.
Args:
end_year:
poi:
frequency:
opt_years:
def_load_growth:
Returns: new list of optimziation years
"""
TellUser.info('Finding first year of deferral failure...')
current_year = self.load.index.year[-1]
additional_years = [current_year]
try:
find_failure_year = not poi.is_sizing_optimization
except AttributeError:
find_failure_year = True
# get list of RTEs
rte_lst = [der.rte for der in poi.der_list if der.technology_type == 'Energy Storage System']
ess_cha_max = 0
ess_dis_max = 0
ess_ene_max = 0
conventional_gen_max = 0
for der_isnt in poi.der_list:
if der_isnt.technology_type == "Energy Storage System":
ess_cha_max += der_isnt.ch_max_rated
ess_dis_max += der_isnt.dis_max_rated
ess_ene_max += der_isnt.ene_max_rated * der_isnt.ulsoc
if der_isnt.technology_type == 'Generator':
conventional_gen_max += der_isnt.discharge_capacity()
years_deferral_column = []
min_power_deferral_column = []
min_energy_deferral_column = []
while current_year <= end_year.year:
size = len(self.load)
years_deferral_column.append(current_year)
# TODO can we check the max year? or can we take the previous answer to calculate the next energy requirements?
positive_feeder_load = self.load.values
negative_feeder_load = np.zeros(size)
for der_isnt in poi.der_list:
if der_isnt.technology_type == "Load":
positive_feeder_load = positive_feeder_load + der_isnt.value.values
if der_isnt.technology_type == "Intermittent Resource" and not der_isnt.being_sized():
# TODO: should take PV variability into account here
negative_feeder_load = negative_feeder_load - der_isnt.maximum_generation()
positive_feeder_load += np.repeat(conventional_gen_max, size)
# Determine power requirement of the storage:
# (1) anytime the net_feeder_load goes above deferral_max_import (too much load)
positive_load_power_req = positive_feeder_load - self.max_import
positive_power_req = positive_load_power_req.clip(min=0)
# (2) anytime the net_feeder_load goes below deferral_max_exports
# (assumes deferral_max_export < 0) (too much generation)
negative_load_power_req = negative_feeder_load - self.max_export
negative_power_req = negative_load_power_req.clip(max=0)
# The sum of (1) and (2)
storage_power_requirement = positive_power_req + negative_power_req
e_walk, _ = self.precheck_failure(self.dt, rte_lst, storage_power_requirement)
TellUser.debug(f'In {current_year} -- min power: {truncate_float(self.p_min)} min energy: {truncate_float(self.e_min)}')
# save min power and energy requirements
min_power_deferral_column.append(self.p_min)
min_energy_deferral_column.append(self.e_min)
# save energy required as function of time & storage power required as function of time
self.e_walk = pd.Series(e_walk, index=self.load.index)
self.power_requirement = pd.Series(storage_power_requirement, index=self.load.index)
if find_failure_year and (self.p_min > ess_dis_max or self.p_min > ess_cha_max or self.e_min > ess_ene_max):
# then we predict that deferral will fail
last_deferral_yr = current_year - 1
self.set_last_deferral_year(last_deferral_yr, current_year)
opt_years = list(set(opt_years + additional_years))
find_failure_year = False
TellUser.info(f'{self.name} updating analysis years: {opt_years}')
# the current year we have could be the last year the deferral is possible, so we want
# to keep it in self.opt_results until we know the next is can be deferred as well
additional_years = [current_year, current_year + 1]
next_opt_years = list(set(opt_years + additional_years))
# add additional year of data to der data
for der in poi.der_list:
der.grow_drop_data(next_opt_years, frequency, def_load_growth)
# add additional year of data to deferred load
self.grow_drop_data(next_opt_years, frequency, def_load_growth)
# index the current year by one
current_year += 1
self.deferral_df = pd.DataFrame({'Year': years_deferral_column,
'Power Capacity Requirement (kW)': min_power_deferral_column,
'Energy Capacity Requirement (kWh)': min_energy_deferral_column})
self.deferral_df.set_index('Year', inplace=True)
return opt_years
def precheck_failure(self, tstep, rte_lst, sto_p_req):
"""
This function takes in a vector of storage power requirements (negative=charging and positive=discharging) [=] kW
that are required to perform the deferral as well as a time step (tstep) [=] hrs
Args:
tstep (float): timestep of the data in hours
rte_lst (list): round trip efficiency of storage
sto_p_req (list, ndarray): storage power requirement
Returns:
how much the energy in the ESS needs to wander as a function of time,
theoretical dispatch of the ESS to meet on feeder limits
Notes:
This algorithm can reliably find the last year deferral is possible, however the problem might still
be found INFEASIBLE if the ESS cannot use it's full range of SOC (ie. if LLSOC is too high or ULSOC is too low)
"""
# Loop through time steps. If the storage is forced to dispatch from the constraint,
# return to nominal SOC as soon as possible after.
self.p_min = max(abs(sto_p_req))
# TODO: determine min energy requirement in static recursive function to speed runtime --HN
sto_dispatch = np.zeros(sto_p_req.shape)
e_walk = np.zeros(sto_p_req.shape) # how much the energy in the ESS needs to wander #Definitely not a star wars pun
for step in range(len(sto_p_req)):
if step == 0:
e_walk[step] = -tstep * sto_p_req[0] # initialize at nominal SOC
sto_dispatch[step] = sto_p_req[0] # ignore constaints imposed by the first timestep of the year
elif sto_p_req[step] > 0: # if it is required to dispatch, do it
sto_dispatch[step] = sto_p_req[step]
e_walk[step] = e_walk[step - 1] - sto_dispatch[step] * tstep # kWh
elif sto_p_req[step] < 0:
sto_dispatch[step] = sto_p_req[step]
random_rte = random.choice(rte_lst)
e_walk[step] = e_walk[step - 1] - sto_dispatch[step] * tstep * random_rte
elif e_walk[step - 1] < 0: # Otherwise contribute its full power to returning energy to nominal
sto_dispatch[step] = -min(abs(self.p_min), abs(e_walk[step - 1] / tstep), abs(self.max_import - self.load.iloc[step]))
random_rte = random.choice(rte_lst)
e_walk[step] = e_walk[step - 1] - sto_dispatch[step] * tstep * random_rte # kWh
elif e_walk[step - 1] > 0:
sto_dispatch[step] = min(abs(self.p_min), abs(e_walk[step - 1] / tstep))
e_walk[step] = e_walk[step - 1] - sto_dispatch[step] * tstep # kWh
else:
sto_dispatch[step] = 0
e_walk[step] = e_walk[step - 1]
kwh_min = max(e_walk) - min(e_walk)
self.e_min = float(kwh_min)
return e_walk, sto_dispatch
def grow_drop_data(self, years, frequency, load_growth):
""" Adds data by growing the given data OR drops any extra data that might have slipped in.
Update variable that hold timeseries data after adding growth data. These method should be called after
add_growth_data and before the optimization is run.
Args:
years (List): list of years for which analysis will occur on
frequency (str): period frequency of the timeseries data
load_growth (float): percent/ decimal value of the growth rate of loads in this simulation
"""
self.load = Lib.fill_extra_data(self.load, years, self.growth, frequency)
self.load = Lib.drop_extra_data(self.load, years)
def set_last_deferral_year(self, last_year, failed_year):
"""Sets last year that deferral is possible
Args:
last_year (int): The last year storage can defer an T&D equipment upgrade
failed_year (int): the year that deferring an upgrade will fail
"""
self.last_year = last_year
self.year_failed = failed_year
TellUser.info(f'{self.name} year failed set to: ' + str(self.year_failed))
def constraints(self, mask, load_sum, tot_variable_gen, generator_out_sum, net_ess_power, combined_rating):
"""Default build constraint list method. Used by services that do not have constraints.
Args:
mask (DataFrame): A boolean array that is true for indices corresponding to time_series data included
in the subs data set
tot_variable_gen (Expression): the sum of the variable/intermittent generation sources
load_sum (list, Expression): the sum of load within the system
generator_out_sum (list, Expression): the sum of conventional generation within the system
net_ess_power (list, Expression): the sum of the net power of all the ESS in the system. [= charge - discharge]
combined_rating (Dictionary): the combined rating of each DER class type
Returns:
An empty list (for aggregation of later constraints)
"""
# adding constraints to ensure power dispatch does not violate thermal limits of transformer deferred
# only include them if deferral is not going to fail
constraints = []
year_of_optimization = mask.loc[mask].index.year[-1]
if year_of_optimization < self.year_failed:
load_beyond_poi = cvx.Parameter(value=self.load.loc[mask].values, name='deferral_load', shape=sum(mask))
# -(max export) >= dis - ch + generation - loads
constraints += [cvx.NonPos(self.max_export - load_sum - load_beyond_poi + (-1)*net_ess_power + generator_out_sum + tot_variable_gen)]
# max import >= loads - (dis - ch) - generation
constraints += [cvx.NonPos(load_sum + load_beyond_poi + net_ess_power + (-1)*generator_out_sum + (-1)*tot_variable_gen - self.max_import)]
# TODO make sure power does doesn't violate the constraints during dispatch service activity
else:
TellUser.debug(f"{self.name} did not add any constraints to our system of equations")
return constraints
def timeseries_report(self):
""" Summaries the optimization results for this Value Stream.
Returns: A timeseries dataframe with user-friendly column headers that summarize the results
pertaining to this instance
"""
report = pd.DataFrame(index=self.load.index)
report.loc[:, 'Deferral: Load (kW)'] = self.load
report.loc[:, 'Deferral: Energy Requirement (kWh)'] = -self.e_walk
report.loc[:, 'Deferral: Power Requirement (kW)'] = self.power_requirement
return report.sort_index()
def update_yearly_value(self, new_value: float):
""" Updates the attribute associated to the yearly value of this service. (used by CBA)
Args:
new_value (float): the dollar yearly value to be assigned for providing this service
"""
self.price = new_value
def proforma_report(self, opt_years, apply_inflation_rate_func, fill_forward_func, results):
""" Calculates the proforma that corresponds to participation in this value stream
Args:
opt_years (list): list of years the optimization problem ran for
apply_inflation_rate_func:
fill_forward_func:
results (pd.DataFrame): DataFrame with all the optimization variable solutions
Returns: A tuple of a DateFrame (of with each year in opt_year as the index and the corresponding
value this stream provided)
"""
years = results.index.year.unique()
start_year = min(years)
end_year = max(years)
yr_index = | pd.period_range(start=start_year, end=end_year, freq='y') | pandas.period_range |
import zlib
import base64
import json
import re
import fnmatch
import pendulum
import requests
from redis import Redis
import pandas as pd
from pymongo import MongoClient
import pymongo.errors as merr
from ..constants import YEAR
from .orm import Competition
def _val(v, s=None):
if s is None:
s = {"raw", "proc", "df"}
if v not in s:
raise ValueError(v)
def _dec(s):
return zlib.decompress(base64.b64decode(s)).decode("utf-8")
def odate(ts):
dt = pendulum.parse(ts).in_timezone("Europe/London")
dt = pendulum.timezone("Europe/Rome").convert(dt)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def mc(coll=None):
if coll:
return MongoClient().get_database("opta").get_collection(coll)
else:
return MongoClient().get_database("opta")
def bproc(j, which="teams"):
l = []
if which in {"teams", "comps"}:
a = {"id": "id", "1": "full", "2": "short", "3": "abbr"}
elif which == "team":
a = {"id": "id", "1": "first", "2": "last", "3": "known"}
else:
raise ValueError(which)
for d in j:
l.append({a[k]: d[k] for k in d})
return l
def bget(url, mongo=None, proc=None):
r = Redis()
coll = None
if url in r:
j = json.loads(r[url].decode("utf-8"))
if proc:
j = proc(j)
return j
if mongo:
coll = mc(mongo["coll"])
d = coll.find_one(mongo["key"])
if d:
return d["data"]
j = requests.get("http://127.0.0.1:9080" + url).text
if j:
j = _dec(j)
r[url] = j
jl = json.loads(j)
if proc:
jl = proc(jl)
exp = 3600 if "err" not in jl else 5
r.expire(url, exp)
if mongo and "err" not in jl:
doc = dict(data=jl, **mongo["key"])
coll.insert(doc)
return jl
else:
raise RuntimeError(j)
def _add_team(r, teams):
s = r["Standing"]
s["Team"] = teams[r["@attributes"]["TeamRef"]]
return s
def rget(feed, season=YEAR, cid="null", team="null", gid="null",
player="null"):
url = "/f/{}/{}/{}/{}/{}".format(feed, season, cid, team, gid, player)
return bget(url)
def comps(match=None, season=YEAR, raw=False):
url = "/comps/{}".format(season)
coll = mc("md_comps")
if coll.find_one({"season": season}) is None:
j = bget(url, proc=lambda x: bproc(x, "comps"))
try:
coll.insert_many(j, ordered=False)
except merr.BulkWriteError:
pass
else:
j = list(coll.find({"season": season}, {"_id": False}))
if match is not None:
if type(match) is int:
j = [c for c in j if c.get("id") == match]
else:
rxp = re.compile(fnmatch.translate(match),
flags=re.IGNORECASE)
j = [c for c in j if rxp.match(c.get("full")) or
("short" in c and rxp.match(c.get("short")))]
if raw:
return j
else:
if len(j) == 1:
j = j[0]
if "_id" in j:
del j["_id"]
return Competition(**j, season=season)
else:
return (
pd.DataFrame(j, columns=["id", "full", "short", "abbr"])
.set_index("id").sort_index()
)
def teams(cid, season=YEAR):
url = "/teams/{}/{}".format(cid, season)
mongo = {"key": {"cid": cid, "season": season}, "coll": "md_clubs"}
j = bget(url, mongo=mongo, proc=lambda x: bproc(x, "teams"))
return {int(d["id"]): {k: d[k] for k in d if k != "id"} for d in j}
def team(cid, team, season=YEAR):
"""This needs a custom Mongo importer"""
url = "/team/{}/{}/{}".format(cid, team, season)
j = bget(url, proc=lambda x: bproc(x, "team"))
coll = mc("md_players")
try:
coll.insert_many(j, ordered=False)
except merr.BulkWriteError:
pass
return {int(d["id"]): {k: d[k] for k in d if k not in {"id",
"_id"}} for d in j}
def player(pid, mconn=None):
mconn = (mconn or mc()).get_collection("md_players")
if type(pid) in {int, str}:
pid = int(pid)
d = mconn.find_one({"id": pid})
if d is None:
return None
del d["_id"]
return d
elif hasattr(pid, "__iter__"):
l = []
pid = [int(k) for k in pid]
for d in mconn.find({"id": {"$in": pid}}):
if d is not None:
del d["_id"]
l.append(d)
if len(l) == 1:
l = l[0]
return l
def stats(cid, team, season=YEAR):
url = "/stats/{}/{}/{}".format(cid, team, season)
return bget(url)
def parse_game(g, teams=None):
mi = g["MatchInfo"]
gid = int(g["@attributes"]["uID"][1:])
dt = odate(mi["dateObj"]["locale"])
d = {"gid": gid, "dt": dt, "day": int(mi["@attributes"]["MatchDay"])}
for sc in g["TeamData"]:
sc = sc["@attributes"]
s = sc["Side"].lower()
team = int(sc["TeamRef"][1:])
d[s + "_id"] = team
if teams:
d[s] = teams[team].get("short") or teams[team]["full"]
if sc["Score"] is not None:
d[s + "_score"] = int(sc["Score"])
return d
def games(cid=21, season=YEAR, ft=True, how="df"):
_val(how)
gms = bget(f"/games/{cid}/{season}")["OptaFeed"]["OptaDocument"]
if how == "raw":
return gms
gms = gms["MatchData"]
gms = [k for k in gms if
(ft and k["MatchInfo"]["@attributes"]["Period"] == "FullTime")
or (not ft)]
if how == "proc":
return gms
ts = teams(cid, season)
columns = ["gid", "day", "dt", "home", "home_score",
"away_score", "away", "home_id", "away_id"]
return pd.DataFrame([parse_game(k, ts) for k in gms],
columns=columns).set_index("gid")
def scorers(cid=21, season=YEAR, how="df"):
gs = games(cid, season=season, how="raw")
ts = {}
for g in gs["MatchData"]:
for t in g["TeamData"]:
a = t["@attributes"]
team = int(a["TeamRef"][1:])
side = a["Side"].lower()
goals = [{"pl": int(gl["@attributes"]["PlayerRef"][1:]),
"type": gl["@attributes"]["Type"].lower(),
"side": side} for gl in
t["Goal"]]
if team not in ts:
ts[team] = {}
for gl in goals:
if gl["type"] == "own":
continue
if gl["type"] not in {"penalty", "goal"}:
print(gl["type"])
if gl["pl"] not in ts[team]:
ts[team][gl["pl"]] = {"p": 0, "g": 0}
ts[team][gl["pl"]][
"p" if gl["type"] == "penalty" else "g"] += 1
if how == "raw":
return ts
l = []
for t in ts:
tn = teams(cid, season=season)[t]["full"]
for p in ts[t]:
pl = player(p)
if pl:
pl = pl.get("known") or pl.get("last")
else:
pl = p
l.append({"team": tn, "player": pl,
"g": ts[t][p]["g"] + ts[t][p]["p"],
"p": ts[t][p]["p"]})
return (
pd.DataFrame(l, columns=["player", "team", "g", "p"])
.sort_values("g", ascending=False)
.reset_index(drop=True)
)
def tab(cid=21, season=YEAR, how="df"):
_val(how)
f = bget(f"/table/{cid}/{season}")
if how == "raw":
return f
f = f["OptaFeed"]["OptaDocument"]
t = {k["@attributes"]["uID"]: k["nameObj"].get("short")
or k["Name"] for k in f["Team"]}
f = f["Competition"]["TeamStandings"]
cols = ["Position", "Team", "Points", "Played",
"Won", "Drawn", "Lost", "For", "Against",
"HomeWon", "HomeDrawn", "HomeLost", "HomeFor", "HomeAgainst",
"AwayWon", "AwayDrawn", "AwayLost", "AwayFor", "AwayAgainst",
"RelegationAverage"]
if type(f) is dict:
f = f["TeamRecord"]
l = [_add_team(r, t) for r in f]
if how == "proc":
return l
else:
return pd.DataFrame(l, columns=cols).set_index("Position")
else:
l = {}
for g in f:
gr = g["Round"]["Name"]["@value"]
rs = g["TeamRecord"]
l[gr] = [_add_team(r, t) for r in rs]
if how == "proc":
return l
else:
# noinspection PyUnresolvedReferences
return pd.concat([ | pd.DataFrame(l[k], columns=cols) | pandas.DataFrame |
# Visualize streamflow time series and fill missing data
# Script written in Python 3.7
import config as config
import numpy as np
import pandas as pd
import tempfile
import datetime
from sklearn.svm import SVR
import geopandas as gpd
from sklearn.metrics import mean_squared_error as mse
import matplotlib.pyplot as plt
import importlib
# ======================================================================================================================
tmp_dir = tempfile.mkdtemp()
# =======================================================================
# Visualizing streamflow
# =======================================================================
flow_path = config.streamflow
# flow = pd.read_csv(str(flow_path))
# flow = pd.read_csv(str(flow_path), header=0, squeeze=True)
flow = pd.read_csv(str(flow_path), usecols=['Date', 'Flow_cfs'], parse_dates=True, index_col=0)
quality = pd.read_csv(str(flow_path), usecols=['Date', 'Quality'], parse_dates=True, index_col=0)
# Convert streamflow from cfs to mm/day
# 2.446576 ft3/sec = 1m3/35.314667ft3 * 1/km2 * 86400sec/1day * 1km2/1000000m2 * 1000mm/1m
ft3_sec = (1/35.314667) * 86400 * (1/1000000) * 1000
area = 13.7393 # area of upstream Ellsworth watershed, sq. km
flow['flow_mm_day'] = (flow['Flow_cfs'] / area) * ft3_sec
flow.drop('Flow_cfs', axis=1, inplace=True)
# Expand date range to include every day of all the years present
begin = '01-01-{}'.format(flow.index.to_frame()['Date'].min().year)
end = '12-31-{}'.format(flow.index.to_frame()['Date'].max().year)
rng = pd.date_range(begin, end)
df = | pd.DataFrame(index=rng) | pandas.DataFrame |
# coding:utf-8
# This file is part of Alkemiems.
#
# Alkemiems is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
__author__ = '<NAME>'
__version__ = 1.0
__maintainer__ = '<NAME>'
__email__ = "<EMAIL>"
__date__ = '2021/06/10 16:29:05'
import numpy as np
import pandas as pd
def read_data(csv_file):
data = pd.read_csv(csv_file)
return data
def norepeat_randint(data: list, ratio=0.3):
"""
输入一个list,将其且分为训练集和测试集
:param data: 原始数据
:param ratio: 测试集比例
:return: 测试集
"""
num = round(len(data) * ratio)
a = np.random.randint(0, len(data), num)
if a.shape[0] != 0:
if len(list(set(a.tolist()))) != num:
dd = list(range(len(data)))
np.random.shuffle(dd)
return dd[:num]
else:
return a
else:
return None
def get_train_test_index(data, column_index, ratio, to_data=False):
"""
读取csv数据,根据指定列先分组,并在将每一组都分成训练集和测试集,返回测试集的索引
:param data
:param column_index: 列表,列名构成的列表
:param ratio: 测试集比例
:param retrun_data: 决定返回数据还是索引
:return: 训练集和测试集的索引 或者 训练集和测试集数据
"""
dd = data.groupby(column_index)
np.random.seed(31)
test_index = []
for i, j in dd.groups.items():
d = j[norepeat_randint(j, ratio=ratio)]
test_index.extend(d.tolist())
c = np.ma.array(data.index.tolist(), mask=False)
c.mask[test_index] = True
train_index = c.compressed().tolist()
if to_data:
return | pd.DataFrame(data.iloc[train_index].values, columns=data.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Created by <NAME> (<EMAIL>)
# Created On: 2020-2-24
# ------------------------------------------------------------------------------
import cv2
import random
import json
import numpy as np
import os
import os.path as osp
import pandas as pd
import torch
from torch.utils.data import Dataset
class TrackletpairDataset(Dataset):
"""get tracklet pair batch for tracklet pair connectivity training.
Args:
crop_data_root: abs path for best.
will there be only blanks between the two traklets in one pair, not at the side?
"""
def __init__(self,
crop_data_root,
transform=None,
window_length=64,
stride = 4,
is_train=True,):
self.transform = transform
self.window_length = window_length # temporal window length to cover the tracklet pair.
self.stride = stride # temporal window sliding stride
if is_train:
self.mode = 'train'
self.data_root = osp.join(crop_data_root, 'training')
else:
self.mode = 'eval'
self.data_root = osp.join(crop_data_root, 'eval')
tracklet_pair_path = osp.join(self.data_root, 'tracklet_pair.txt')
# read track json file.
track_json_file = osp.join(self.data_root, f'gt_{self.mode}_track_dict.json')
with open(track_json_file, 'r') as f:
self.track_dict = json.load(f)
if not osp.exists(tracklet_pair_path):
pair_f = open(tracklet_pair_path, 'a')
pair_count = 0
# generate tracklet pairs [video_name_1, track_id_1, start_frame_id_1, end_frame_id_1,
# video_name_2, track_id_2, start_frame_id_2, end_frame_id_2, connectivity(0 or 1)]
for video_name in self.track_dict.keys():
for track_id in self.track_dict[video_name].keys():
now_frame_list = sorted(list(map(int, self.track_dict[video_name][track_id][1])))
frame_window_list = []
# sliding temporal window for tracklet pair sampling
while len(now_frame_list) >= self.window_length:
frame_window_list.append([now_frame_list[0], now_frame_list[self.window_length-1]])
now_frame_list = now_frame_list[self.stride:]
for frame_window in frame_window_list:
for i in range((frame_window[1]-frame_window[0])*4):
start_frame_id_1 = frame_window[0]
end_frame_id_1, start_frame_id_2, end_frame_id_2 = sorted(random.sample(range(frame_window[0]+1, frame_window[1]+1), 3))
# write connected pair to tracklet_pair_path
pair_f.write(f'{video_name},{track_id},{start_frame_id_1},{end_frame_id_1},{video_name},{track_id},{start_frame_id_2},{end_frame_id_2},{1}\n')
pair_count += 1
# non connected pair in other track
other_video_name = random.sample(self.track_dict.keys(), 1)[0]
other_track_id_set = set(self.track_dict[other_video_name].keys())
if other_video_name == video_name:
other_track_id_set = other_track_id_set - set(track_id)
other_track_id = random.sample(list(other_track_id_set), 1)[0]
other_frame_window = sorted(self.track_dict[other_video_name][other_track_id][1])
if start_frame_id_1 >= other_frame_window[0] and end_frame_id_1 <= other_frame_window[-1]:
pair_f.write(f'{other_video_name},{other_track_id},{start_frame_id_1},{end_frame_id_1},{video_name},{track_id},{start_frame_id_2},{end_frame_id_2},{0}\n')
pair_count += 1
if start_frame_id_2 >= other_frame_window[0] and end_frame_id_2 <= other_frame_window[-1]:
pair_f.write(f'{video_name},{track_id},{start_frame_id_1},{end_frame_id_1},{other_video_name},{other_track_id},{start_frame_id_2},{end_frame_id_2},{0}\n')
pair_count += 1
pair_f.close()
print("Having written %d tracklet pairs" %(pair_count))
with open(tracklet_pair_path, 'r') as f:
self.tracklet_path_list = f.readlines()
print(f'Loading {self.mode} tracklet pairs %d' %(len(self.tracklet_path_list)))
def get_crop_path(self, video_name, track_id, frame_id, class_name):
track_path = osp.join(self.data_root, video_name, str(track_id))
crop_name = f'{class_name}_{frame_id}_crop.jpg'
crop_path = osp.join(track_path, crop_name)
return crop_path
def __len__(self):
return len(self.tracklet_path_list)
def __getitem__(self, index):
"""
Return:
img_1: (frame_window_len, size, size, 3).
img_2: (frame_window_len_2, size, size, 3).
loc_mat: (frame_window_len, 4)
tracklet_mask_1: (frame_window_len, 1).
tracklet_mask_2: (frame_window_len, 1).
real_window_len: <int>
connectivity: (1, 1) LongTensor
"""
tracklet_info = self.tracklet_path_list[index].split()[0].split(',')
video_name_1,track_id_1,start_frame_id_1,end_frame_id_1 = tracklet_info[:4]
video_name_2,track_id_2,start_frame_id_2,end_frame_id_2 = tracklet_info[4:8]
connectivity = tracklet_info[8]
img_1 = []
img_2 = []
loc_mat = np.zeros((self.window_length, 4))
tracklet_mask_1 = np.zeros((self.window_length, 1))
tracklet_mask_2 = np.zeros((self.window_length, 1))
# get img_1, loc_mat for img1 and tracklet_mask_1
tracklet_info_1 = self.track_dict[video_name_1][track_id_1]
class_name_1 = tracklet_info_1[0]
assert len(tracklet_info_1[1]) == len(tracklet_info_1[2]) == len(tracklet_info_1[3]) == len(tracklet_info_1[4]) == len(tracklet_info_1[5])
start_frame_id_1 = int(start_frame_id_1)
end_frame_id_1 = int(end_frame_id_1)
for frame_id in range(start_frame_id_1, end_frame_id_1+1): # frame_id start from 0
img_path = self.get_crop_path(video_name_1, track_id_1, frame_id, class_name_1)
if not osp.exists(img_path):
logging.error("Cannot found image data: " + img_path)
raise FileNotFoundError
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if self.transform is not None:
img_1.append(self.transform(img))
frame_idx = tracklet_info_1[1].index(frame_id)
loc_mat[frame_id-start_frame_id_1][0] = float(tracklet_info_1[2][frame_idx])
loc_mat[frame_id-start_frame_id_1][1] = float(tracklet_info_1[3][frame_idx])
loc_mat[frame_id-start_frame_id_1][2] = float(tracklet_info_1[4][frame_idx]) - float(tracklet_info_1[2][frame_idx])
loc_mat[frame_id-start_frame_id_1][3] = float(tracklet_info_1[5][frame_idx]) - float(tracklet_info_1[3][frame_idx])
tracklet_mask_1[frame_id-start_frame_id_1] = 1
# get img_2, loc_mat for img2 and tracklet_mask_2
tracklet_info_2 = self.track_dict[video_name_2][track_id_2]
class_name_2 = tracklet_info_2[0]
assert len(tracklet_info_2[1]) == len(tracklet_info_2[2]) == len(tracklet_info_2[3]) == len(tracklet_info_2[4]) == len(tracklet_info_2[5])
start_frame_id_2 = int(start_frame_id_2)
end_frame_id_2 = int(end_frame_id_2)
for frame_id in range(start_frame_id_2, end_frame_id_2+1): # frame_id start from 0
img_path = self.get_crop_path(video_name_2, track_id_2, frame_id, class_name_2)
if not osp.exists(img_path):
logging.error("Cannot found image data: " + img_path)
raise FileNotFoundError
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if self.transform is not None:
img_2.append(self.transform(img))
frame_idx = tracklet_info_2[1].index(frame_id)
loc_mat[frame_id-start_frame_id_1][0] = float(tracklet_info_2[2][frame_idx])
loc_mat[frame_id-start_frame_id_1][1] = float(tracklet_info_2[3][frame_idx])
loc_mat[frame_id-start_frame_id_1][2] = float(tracklet_info_2[4][frame_idx]) - float(tracklet_info_2[2][frame_idx])
loc_mat[frame_id-start_frame_id_1][3] = float(tracklet_info_2[5][frame_idx]) - float(tracklet_info_2[3][frame_idx])
tracklet_mask_2[frame_id-start_frame_id_1] = 1
img_1 = torch.stack(img_1)
img_2 = torch.stack(img_2)
real_window_len = min(self.window_length, end_frame_id_2-start_frame_id_1+1)
loc_mat[0][np.where(loc_mat[0]==0)] = 1e-3
loc_mat[-1][np.where(loc_mat[-1]==0)] = 1e-3
loc_mat= | pd.DataFrame(data=loc_mat) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Revolving credit.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1g-iUOJyARAnpOuEepyI7-N48uzW1oHYL
# Financial Project
## The Data
Revolving credit
### Business Objective:
Revolving credit means you're borrowing against a line of credit. Let's say a lender extends a
certain amount of credit to you, against which you can borrow repeatedly. The amount of
credit you're allowed to use each month is your credit line, or credit limit. You're free to use as
much or as little of that credit line as you wish on any purchase you could make with cash. Its
just like a credit card and only difference is they have lower interest rate and they are secured
by business assets.
At the end of each statement period, you receive a bill for the balance. If you don't pay it
off in full, you carry the balance, or revolve it, over to the next month and pay interest on
any remaining balance. As you pay down the balance, more of your credit line becomes
available and usually its useful for small loans
As a bank or investor who are into this revolving balance here they can charge higher
interest rates and convenience fees as there is lot of risk associated in customer paying
the amount. Our company wants to predict the revolving balance maintained by the
customer so that they can derive marketing strategies individually.
### Acceptance criteria:
Should get the least possible RMSE and the model should be deployed
using Flask/ RShiny/Heroku.
### Data Overview
----
-----
Here is the information on this particular Revolving credit data set:
Data Set Details: This dataset consists of 2300 observations
###### member_id ---- unique ID assigned to each member
###### loan_amnt ---- loan amount (doller) applied by the member
###### terms: ---- term of loan (in months)
###### batch_ID ---- batch numbers allotted to members
###### Rate_of_intrst: ---- interest rate (%) on loan
###### Grade: ---- grade assigned by the bank
###### sub_grade: ---- grade assigned by the bank
###### emp_designation ---- job / Employer title of member
###### Experience: ---- employment length, where 0 means less than one year and 10 means ten or more years
###### home_ownership ---- status of home ownership
###### annual_inc: ---- annual income (doller) reported by the member
###### verification_status ---- status of income verified by the bank
###### purpose ---- purpose of loan
###### State: ---- living state of member
###### debt-to-income ratio : ---- ratio of member's total monthly debt
###### Delinquency of past 2 years: ---- ( failure to pay an outstanding debt by due date)
###### inq_6mths: ---- Inquiries made in past 6 months
###### total_months_delinq : ---- number of months since last delinq
###### Nmbr_months_last_record: ---- number of months since last public record
###### Numb_credit_lines: ---- number of open credit line in member's credit line
###### pub_rec ---- number of derogatory public records
###### Tota_credit_revolving_balance: ---- total credit revolving balance
###### total_credits: ---- total number of credit lines available in members credit line
###### list_status ---- unique listing status of the loan - W(Waiting),F(Forwarded)
###### int_rec: ---- Total interest received till date
###### late_fee_rev: ---- Late fee received till date
###### recov_chrg: ---- post charge off gross recovery
###### collection_recovery_fee ---- post charge off collection fee
###### exc_med_colle_12mon: ---- number of collections in last 12 months excluding medical collections
###### since_last_major_derog: ---- months since most recent 90 day or worse rating
###### application_type ---- indicates when the member is an individual or joint
###### verification_status_joint ---- indicates if the joint members income was verified by the bank
###### last_pay_week: ---- indicates how long (in weeks) a member has paid EMI after batch enrolled
###### nmbr_acc_delinq: ---- number of accounts on which the member isdelinquent
###### colle_amt: ---- total collection amount ever owed
###### curr_bal: ---- total current balance of all accounts
# **Starter Code**
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from google.colab import drive
drive.mount('/content/drive')
"""# **Getting the Data**
Using pandas to read Report.csv as a dataframe called dataset.
"""
dataset = pd.read_csv("/content/drive/My Drive/3 Data Science/Projects/Excelr Projects/2. Financial Analysis/Report.csv",encoding='latin1')
"""** Check out the info(), head(),columns,shape,type,len and describe() methods on dataset.**"""
dataset.head()
dataset.describe().T
dataset.info()
dataset.nunique()
type(dataset)
len(dataset)
dataset.shape
df = dataset.sample(frac=0.5,random_state=101)
df_1 = dataset.sample(frac=0.5,random_state=101)
print(len(df),len(df_1))
"""# **Columns Names**
## Original Dataset
"""
dataset.columns
dataset.select_dtypes(['object']).columns
"""## df Dataset"""
df.columns
df.select_dtypes(exclude='object').columns
df.select_dtypes(include='object').columns
"""## df_1 Dataset"""
df_1.columns
df_1.select_dtypes(exclude='object').columns
df_1.select_dtypes(include='object').columns
df_1.shape
"""# **Project Starts**
## **Section 1:** ***Exploratory Data Analysis***
**OVERALL GOAL: Get an understanding for which variables are important, view summary statistics, and visualize the data**
----
### Feature Understanding - Describe
1. total revol_bal -> Output Variable (y)
2. total revol_bal -> Input Variable (X)
3. annual_inc -> Input Variable (X)
4. debt_income_ratio -> Input Variable (X)
5. delinq_2yrs -> Input Variable (X)
6. And Many more
"""
df['total revol_bal'].describe()
df.loc[(df['total revol_bal'] >= 2.077800e+04)].describe().T
2.568995e+06
# df.loc[(df['delinq_2yrs'] >= 9)].count()#,'delinq_2yrs']#.sort_values(ascending=True)
df.loc[(df['debt_income_ratio'] >= 125.25 )] #,'debt_income_ratio']
"""### Outlier Correction - Balancing The Data Points
1. total revol_bal
2. annual_inc
3. debt_income_ratio
4. delinq_2yrs
##### **total revol_bal**
### 40350.00 => This value has been derived by 75% (Q3) of the dataset whose (Values were greater than 75% (Q3) of the actual dataset)
"""
df.loc[(df['total revol_bal'] >= 40350.00),'total revol_bal']=40350.00
"""##### **annual_inc**
### 120000 => This value has been derived by 75% (Q3) of the dataset whose (Values were greater than 75% (Q3) of the actual dataset)
"""
df.loc[(df['annual_inc'] >= 120000),'annual_inc']=120000
"""##### **debt_income_ratio**
125.25 => There has been an abrupt increase post this walues, the increase is almost more than 3 fold.
Total Number of observation => 3
"""
df.loc[(df['debt_income_ratio'] >= 125.25),'debt_income_ratio']=125.25
"""##### **delinq_2yrs**
10 => There has been an abrupt increase post this walues, the increase is almost more than 3 fold.
Total Number of observation => 283
"""
df.loc[(df['delinq_2yrs'] >= 10),'delinq_2yrs']=10
"""## **Section 2:** ***Feature Engineering***
### **last_week_pay**
##### Extracting the No's of weeks
##### Eventually droping the last last_week_pay
"""
df['last_week_pay_nos'] = df['last_week_pay'].apply(lambda last_week_pay:last_week_pay[:2])
df['last_week_pay_nos'] = df['last_week_pay_nos'].replace(['9t','4t','0t','8t','NA'],['9','4','0','8','0'])
df['last_week_pay_nos'].unique()
df.drop(['last_week_pay'],axis=1,inplace=True)
df["last_week_pay_nos"]= df["last_week_pay_nos"].astype(int)
df["last_week_pay_nos"].describe()
"""### **Experience**
##### Converting the Experience into numerical values
##### Eventually droping the last Experience
"""
df['Experience'].unique()
df['Experience_status'] = df['Experience'].map({'9 years':9, '< 1 year':0, '2 years':2, '10+ years':10, '5 years':5,
'8 years':8, '7 years':7, '4 years':4,'1 year':1, '3 years':3,
'6 years':6})
df[['Experience_status','Experience']]
"""### **home_ownership**
##### Replacing the "None nad Any" values to "Others" by using the replace function
"""
df['home_ownership']=df['home_ownership'].replace(['NONE', 'ANY'],'OTHER')
"""## **Section 3:** ***Plots***
Will try to understand the variables are distributed
### Initial Plots **(df)**
#### Heat Map - Correlation
Original Dataset
"""
plt.figure(figsize=(22,18))
sns.heatmap(dataset.corr(),annot=True,cmap='viridis')
"""df dataset"""
plt.figure(figsize=(22,18))
sns.heatmap(df.corr(),annot=True,cmap='viridis')
"""#### Heat Map - Null Values
**Original Dataset**
"""
plt.figure(figsize=(20,12))
sns.heatmap(dataset.isnull(),cmap='viridis')
"""**df Dataset**"""
plt.figure(figsize=(20,12))
sns.heatmap(df.isnull(),cmap='viridis')
"""#### Distribution Plot"""
fig, axes = plt.subplots(3, 3,figsize=(20,12))
fig.suptitle('Continous features')
axes[0, 0].hist(df['loan_amnt '],bins=10,label='loan_amnt')
axes[0, 1].hist(df['Rate_of_intrst'],bins=10)
axes[0, 2].hist(df['annual_inc'],bins=10)
axes[1, 0].hist(df['debt_income_ratio'],bins=10,log=True)
axes[1, 1].hist(df['total revol_bal'],bins=10)
axes[1, 2].hist(df['total_credits'],bins=10)
axes[2, 0].hist(df['tot_curr_bal'],bins=10,log=True)
axes[2, 1].hist(df['last_week_pay_nos'],bins=10)
axes[2, 2].hist(df['Experience_status'],bins=10)
"""#### Subplot - Seaborn - Mix Plot"""
fig, axes = plt.subplots(3, 3, figsize=(24, 12))
fig.suptitle('loan_amnt & Rate_of_intrst &annual_inc with total revol_bal')
sns.distplot(df['loan_amnt '],bins=30,ax=axes[0, 0],color='darkorchid')
sns.scatterplot(x='loan_amnt ', y='total revol_bal',data=df,hue='terms', ax=axes[0, 1],color='darkslateblue')
sns.countplot(x='grade',data=df,hue='terms', ax=axes[0, 2],color='greenyellow')
sns.distplot(df['Rate_of_intrst'],bins=30, ax=axes[1, 0],color='darkorchid')
sns.scatterplot(x='Rate_of_intrst', y='total revol_bal',data=df,hue='terms', ax=axes[1, 1],color='darkslateblue')
sns.countplot(x='home_ownership',data=df,hue='terms', ax=axes[1, 2],color='darkorange')
sns.distplot(df['annual_inc'],bins=30,ax=axes[2, 0],color='darkorchid')
sns.scatterplot(x='annual_inc', y='total revol_bal',data=df,hue='terms', ax=axes[2, 1],color='darkslateblue')
sns.countplot(x='initial_list_status',data=df,hue='terms', ax=axes[2, 2],color='saddlebrown')
"""#### Subplot - Seaborn - Box Plot"""
fig, axes = plt.subplots(4, 3, figsize=(24, 12))
fig.suptitle('loan_amnt & Rate_of_intrst &annual_inc with few of the catagorical variables')
sns.boxplot(x='Experience_status',y='loan_amnt ',data=df,ax=axes[0, 0],color='darkorchid',hue='terms')
sns.boxplot(x='Experience_status',y='total revol_bal',data=df, ax=axes[0, 1],hue='terms')
sns.boxplot(x='Experience_status',y='annual_inc',data=df, ax=axes[0, 2],color='greenyellow',hue='terms')
sns.boxplot(x='grade',y='loan_amnt ',data=df, ax=axes[1, 0],color='darkorchid',hue='terms')
sns.boxplot(x='grade',y='total revol_bal',data=df, ax=axes[1, 1],hue='terms')
sns.boxplot(x='grade',y='annual_inc',data=df, ax=axes[1, 2],color='darkorange',hue='terms')
sns.boxplot(x='home_ownership',y='loan_amnt ',data=df,ax=axes[2, 0],color='darkorchid',hue='terms')
sns.boxplot(x='home_ownership',y='total revol_bal',data=df, ax=axes[2, 1],hue='terms')
sns.boxplot(x='home_ownership',y='annual_inc',data=df, ax=axes[2, 2],color='saddlebrown',hue='terms')
sns.boxplot(x='initial_list_status',y='loan_amnt ',data=df,ax=axes[3, 0],color='darkorchid',hue='terms')
sns.boxplot(x='initial_list_status',y='total revol_bal',data=df, ax=axes[3, 1],hue='terms')
sns.boxplot(x='initial_list_status',y='annual_inc',data=df, ax=axes[3, 2],color='saddlebrown',hue='terms')
"""#### Subplot - Seaborn - Scatter Plot"""
fig, axes = plt.subplots(3,5, figsize=(24, 12))
fig.suptitle('total revol_bal & annual_inc & loan_amnt')
sns.scatterplot(x='total revol_bal', y='debt_income_ratio',data=df,hue='terms', ax=axes[0, 0],color='darkslateblue')
sns.scatterplot(x='total revol_bal', y='delinq_2yrs',data=df,hue='terms', ax=axes[0, 1],color='darkslateblue')
sns.scatterplot(x='total revol_bal', y='inq_last_6mths',data=df,hue='terms', ax=axes[0, 2],color='darkslateblue')
sns.scatterplot(x='total revol_bal', y='numb_credit',data=df,hue='terms', ax=axes[0, 3],color='darkslateblue')
sns.scatterplot(x='total revol_bal', y='tot_curr_bal',data=df,hue='terms', ax=axes[0, 4],color='darkslateblue')
sns.scatterplot(x='annual_inc', y='debt_income_ratio',data=df,hue='terms', ax=axes[1, 0],color='darkslateblue')
sns.scatterplot(x='annual_inc', y='delinq_2yrs',data=df,hue='terms', ax=axes[1, 1],color='darkslateblue')
sns.scatterplot(x='annual_inc', y='inq_last_6mths',data=df,hue='terms', ax=axes[1, 2],color='darkslateblue')
sns.scatterplot(x='annual_inc', y='numb_credit',data=df,hue='terms', ax=axes[1, 3],color='darkslateblue')
sns.scatterplot(x='annual_inc', y='tot_curr_bal',data=df,hue='terms', ax=axes[1, 4],color='darkslateblue')
sns.scatterplot(x='loan_amnt ', y='debt_income_ratio',data=df,hue='terms', ax=axes[2, 0],color='darkslateblue')
sns.scatterplot(x='annual_inc', y='delinq_2yrs',data=df,hue='terms', ax=axes[2, 1],color='darkslateblue')
sns.scatterplot(x='loan_amnt ', y='inq_last_6mths',data=df,hue='terms', ax=axes[2, 2],color='darkslateblue')
sns.scatterplot(x='loan_amnt ', y='numb_credit',data=df,hue='terms', ax=axes[2, 3],color='darkslateblue')
sns.scatterplot(x='loan_amnt ', y='tot_curr_bal',data=df,hue='terms', ax=axes[2, 4],color='darkslateblue')
"""#### Subplot - Seaborn - Distrubution Plot / Count Plot"""
fig, axes = plt.subplots(4,5, figsize=(24, 16))
fig.suptitle('total revol_bal & annual_inc & loan_amnt')
sns.distplot(df['loan_amnt '],bins=50,ax=axes[0, 0],color='darkslateblue')
sns.distplot(df['Rate_of_intrst'],bins=50,ax=axes[0, 1],color='darkslateblue')
sns.distplot(df['annual_inc'],bins=50,ax=axes[0, 2],color='darkslateblue')
sns.distplot(df['debt_income_ratio'],bins=50,ax=axes[0, 3],color='darkslateblue')
sns.distplot(df['delinq_2yrs'],bins=50,ax=axes[0, 4],color='darkslateblue')
sns.distplot(df['inq_last_6mths'],bins=50,ax=axes[1, 0],color='darkorchid')
sns.distplot(df['mths_since_last_delinq'],bins=50,ax=axes[1, 1],color='darkorchid')
sns.distplot(df['mths_since_last_record'],bins=50,ax=axes[1, 2],color='darkorchid')
sns.distplot(df['numb_credit'],bins=50,ax=axes[1, 3],color='darkorchid')
sns.distplot(df['total revol_bal'],bins=50,ax=axes[1, 4],color='darkorchid')
sns.distplot(df['total_credits'],bins=50,ax=axes[2, 0],color='saddlebrown')
sns.distplot(df['total_rec_int'],bins=50,ax=axes[2, 1],color='saddlebrown')
sns.distplot(df['total_rec_late_fee'],bins=50,ax=axes[2, 2],color='saddlebrown')
sns.distplot(df['recoveries'],bins=50,ax=axes[2, 3],color='saddlebrown')
sns.distplot(df['collection_recovery_fee'],bins=50,ax=axes[2, 4],color='saddlebrown')
sns.distplot(df['collections_12_mths_ex_med'],bins=50,ax=axes[3, 0])
sns.distplot(df['mths_since_last_major_derog'],bins=50,ax=axes[3, 1])
sns.distplot(df['acc_now_delinq'],bins=50,ax=axes[3, 2])
sns.distplot(df['tot_colle_amt'],bins=50,ax=axes[3, 3])
sns.distplot(df['tot_curr_bal'],bins=50,ax=axes[3, 4])
"""### After Cleanup Plots (df_1)
#### Pair Plot - Seaborn
"""
"""### Testing"""
g = sns.FacetGrid(df,row='terms', col="Experience_status",)
g.map(sns.scatterplot, "total revol_bal", "annual_inc", alpha=.7)
sns.countplot(x='verification_status',hue='terms',data=df)
"""###### Source verified and verified must be having a similar understanding so just we need to rename the source verified data"""
sns.countplot(x='grade',hue='initial_list_status',data=dataset)
"""## **Section 4:** ***Missing Data Points***
### **Understanding The Missing Data Points**
##### Understanding the impact of all the missing datapoints. Then taking the appropiate action based on the understanding
1. Droping the missing values
2. Imputation
"""
df.isnull().sum()
100*df.isnull().sum()/len(df)
"""### **Imputation**
##### Imputation based on Mean, Median & Mode
#### **Experience_status**
##### based on **annual_inc (Mean)**
"""
df.groupby('annual_inc')['Experience_status'].mean()
total_annual_inc_Experience_status_avg = df.groupby('annual_inc').mean()['Experience_status']
def fill_Experience_status(annual_inc,Experience_status):
if np.isnan(Experience_status):
return total_annual_inc_Experience_status_avg[annual_inc]
else:
return Experience_status
df['Experience_status'] = df.apply(lambda x: fill_Experience_status(x['annual_inc'], x['Experience_status']), axis=1)
df['Experience_status'].dropna().astype(int)
df['Experience_status'] = df['Experience_status'].dropna().astype(int)
df.shape
"""Still after imputation around 4K missing values were remaining which were drop as it was less than 1% of the total dataset"""
df.dropna(subset=['Experience_status'],inplace=True)
"""#### **tot_curr_bal**
##### based on **Experience_status (median)**
As there is a good corelation among annual_inc and tot_curr_bal, so we will try to impute the total current balance based on the annual income. Which even logically seem right as maintaining current balance depends on the income of the person. As persons income increases with there nos of experiance so we will be using Experiance.
"""
df.groupby('Experience_status')['tot_curr_bal','annual_inc'].median()
total_Experience_status_tot_curr_bal_avg = df.groupby('Experience_status').mean()['tot_curr_bal']
def fill_tot_curr_bal(Experience_status,tot_curr_bal):
if np.isnan(tot_curr_bal):
return total_Experience_status_tot_curr_bal_avg[Experience_status]
else:
return tot_curr_bal
df['tot_curr_bal'] = df.apply(lambda x: fill_tot_curr_bal(x['Experience_status'], x['tot_curr_bal']), axis=1)
"""#### **tot_colle_amt**
##### based on **loan_amnt (Mean)**
Total amount collected is usually dependend on the loan amount and the number of credits that are usually allocated to the customer. So we used the loan amount for the imputation.
"""
df.groupby('loan_amnt ')['tot_colle_amt'].mean()
total_Loan_amnt_tot_colle_amt_avg = df.groupby('loan_amnt ').mean()['tot_colle_amt']
def fill_tot_colle_amt(Loan_amnt,tot_colle_amt):
if np.isnan(tot_colle_amt):
return total_Loan_amnt_tot_colle_amt_avg[Loan_amnt]
else:
return tot_colle_amt
df['tot_colle_amt'] = df.apply(lambda x: fill_tot_colle_amt(x['loan_amnt '], x['tot_colle_amt']), axis=1)
"""#### **mths_since_last_delinq**
##### based on **Experience_status (Mean)**
"""
df.groupby('Experience_status')['mths_since_last_delinq'].median()
total_Experience_status_mths_since_last_delinq_avg = df.groupby('Experience_status').median()['mths_since_last_delinq']
def fill_mths_since_last_delinq(Experience_status,mths_since_last_delinq):
if np.isnan(mths_since_last_delinq):
return total_Experience_status_mths_since_last_delinq_avg[Experience_status]
else:
return mths_since_last_delinq
df['mths_since_last_delinq'] = df.apply(lambda x: fill_mths_since_last_delinq(x['Experience_status'], x['mths_since_last_delinq']), axis=1)
"""### **Droping the missing values**
##### Droping the feature sets based on the number of observation available
"""
df_1 = df.copy()
"""#### Droping the features with more than 75% percentage of missing values
##### **mths_since_last_record**, **mths_since_last_major_derog**, **verification_status_joint**
"""
df_1.drop(['mths_since_last_record',
'mths_since_last_major_derog',
'verification_status_joint'],axis=1,inplace=True)
"""#### Droping the features which are not relevent to the analysis or Duplicate featues
##### **member_id**, **batch_ID**, **grade**, **Experience**,**purpose**
"""
df_1.drop(['member_id ','batch_ID ','grade','Experience'],axis=1,inplace=True)
df_1.drop(['purpose'],axis=1,inplace=True)
"""#### Droping **Emp_designation** => Categorical Feature
##### We examined Emp_designation which has more than 25K unique variables.
Realistically there are too many unique job titles to try to convert them to a dummy variable feature.
"""
df['Emp_designation'].value_counts()
df_1.drop(['Emp_designation'],axis=1,inplace=True)
"""### **Final Drop**
##### Droping all the remaining NA values
"""
df_1.dropna(inplace=True)
df_1.isnull().sum()
"""## **Section 5:** ***One Hot Encoding***
##### **Categorical Variables and Dummy Variables**
##### As we are done working with the missing data! Now we just need to deal with the string values due to the categorical columns.
### **State => Categorical Feature**
##### State has the least relevence with the revolving balance - So we will be droping it
"""
# dummies = pd.get_dummies(df['State'],drop_first=True)
df_1 = df_1.drop(['State'],axis=1)
# df = pd.concat([df,dummies],axis=1)
"""### **Verification_status => Categorical Feature**
##### Verification_status has three catagories which is eqivelent to 2 catagories. So will be using the map function to convert into a 0 or 1 feature set
"""
df_1['verification_status'] = df_1['verification_status'].map({'Verified':1, 'Not Verified':0, 'Source Verified':1})
"""### **Terms => Categorical Feature**
##### Terms has two catagories which will be converted into 0 or 1 feature set
"""
df_1['terms'].unique()
df_1['terms'] = df_1['terms'].map({'60 months':1, '36 months':0})
"""### **home_ownership => Categorical Feature**"""
df['home_ownership'].value_counts()
dummies_1 = pd.get_dummies(df_1['home_ownership'],drop_first=True)
df_1 = df_1.drop('home_ownership',axis=1)
df_1 = | pd.concat([df_1,dummies_1],axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from whylogs.core.types import TypedDataConverter
_TEST_NULL_DATA = [
([None, np.nan, None] * 3, 9),
([pd.Series(data={"a": None, "b": None}, index=["x", "y"]), | pd.Series(data={"c": None, "d": 1}, index=["x", "y"]) | pandas.Series |
# -*- coding: UTF-8 -*-
"""
collector.aggregation - 聚合数据采集
聚合数据采集是指一次性采集模型分析所需要的数据
====================================================================
"""
import os
import traceback
from tqdm import tqdm
import pandas as pd
import tma
# tma.DEBUG = True
from tma.utils import debug_print
from tma.collector.ts import get_klines
from tma.collector.ts import get_all_codes
def agg_market_klines(k_freq="D", refresh=True, cache=True):
"""获取整个市场全部股票的K线
:param k_freq: str 默认值 D
K线周期,可选值参考 `tma.collector.ts.get_klines`
:param refresh: bool 默认值 True
是否刷新数据。
全市场所有股票K线的获取需要较长的时间,默认情况下,获取的数据会
缓存到用户目录下的`.tma/data`文件夹。当 refresh 为 False 且
存在对应k_freq的K线数据时,直接读取缓存数据。
:param cache: bool 默认值 True
是否缓存数据到用户目录下。
:return: :class: `pd.DataFrame`
字段列表:
['date', 'open', 'close', 'high', 'low', 'volume', 'code']
"""
FILE_CACHE = os.path.join(tma.DATA_PATH,
"market_klines_%s.csv" % k_freq)
if os.path.exists(FILE_CACHE) and not refresh:
df = | pd.read_csv(FILE_CACHE, encoding='utf-8', dtype={"code": str}) | pandas.read_csv |
"""
Base and utility classes for pandas objects.
"""
import textwrap
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas.compat as compat
from pandas.compat import PYPY, OrderedDict, builtins, map, range
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
is_datetime64tz_dtype, is_datetimelike, is_extension_array_dtype,
is_extension_type, is_list_like, is_object_dtype, is_scalar)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, common as com
from pandas.core.accessor import DirNamesMixin
import pandas.core.nanops as nanops
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin, DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (getattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
getattr(self, key, None) is not None)):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict((
(builtins.sum, np.sum),
(builtins.max, np.max),
(builtins.min, np.min),
))
_cython_table = OrderedDict((
(builtins.sum, 'sum'),
(builtins.max, 'max'),
(builtins.min, 'min'),
(np.all, 'all'),
(np.any, 'any'),
(np.sum, 'sum'),
(np.nansum, 'sum'),
(np.mean, 'mean'),
(np.nanmean, 'mean'),
(np.prod, 'prod'),
(np.nanprod, 'prod'),
(np.std, 'std'),
(np.nanstd, 'std'),
(np.var, 'var'),
(np.nanvar, 'var'),
(np.median, 'median'),
(np.nanmedian, 'median'),
(np.max, 'max'),
(np.nanmax, 'max'),
(np.min, 'min'),
(np.nanmin, 'min'),
(np.cumprod, 'cumprod'),
(np.nancumprod, 'cumprod'),
(np.cumsum, 'cumsum'),
(np.nancumsum, 'cumsum'),
))
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError('Column(s) {selection} already selected'
.format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.format(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".format(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
warnings.warn(
("using a dict with renaming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = ('cannot perform renaming for {key} with a '
'nested dictionary').format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
elif (isinstance(obj, ABCDataFrame) and
k not in obj.columns):
raise KeyError(
"Column '{col}' does not exist!".format(col=k))
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCSeries)
for r in compat.itervalues(result))
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame)
for r in compat.itervalues(result))
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys],
keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError("cannot perform both aggregation "
"and transformation operations "
"simultaneously")
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result,
name=getattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
"""
return a new object with the replacement attributes
"""
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
"""
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="Return the transpose, which is by "
"definition self.")
@property
def _is_homogeneous_type(self):
"""
Whether the object has a single dtype.
By definition, Series and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
DataFrame._is_homogeneous_type
MultiIndex._is_homogeneous_type
"""
return True
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
@property
def ndim(self):
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a python scalar.
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
"""
Return the data pointer of the underlying data.
"""
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
"""
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def strides(self):
"""
Return the strides of the underlying data.
"""
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
"""
Return the number of elements in the underlying data.
"""
return self._values.size
@property
def flags(self):
"""
Return the ndarray.flags for the underlying data.
"""
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def array(self):
# type: () -> Union[np.ndarray, ExtensionArray]
"""
The actual Array backing this Series or Index.
.. versionadded:: 0.24.0
Returns
-------
array : numpy.ndarray or ExtensionArray
This is the actual array stored within this object. This differs
from ``.values`` which may require converting the data
to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be the :class:`numpy.ndarray`
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
.. note::
``.array`` will always return the underlying object backing the
Series or Index. If a future version of pandas adds a specialized
extension type for a data type, then the return type of ``.array``
for that data type will change from an object-dtype ndarray to the
new ExtensionArray.
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
[a, b, a]
Categories (2, object): [a, b]
"""
return self._values
def to_numpy(self, dtype=None, copy=False):
"""
A NumPy ndarray representing the values in this Series or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if (is_extension_array_dtype(self.dtype) or
is_datetime64tz_dtype(self.dtype)):
# TODO(DatetimeArray): remove the second clause.
# TODO(GH-24345): Avoid potential double copy
result = np.asarray(self._values, dtype=dtype)
else:
result = self._values
if copy:
result = result.copy()
return result
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""
The data as an ndarray, possibly losing information.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.array._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def max(self):
"""
Return the maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
Return a ndarray of the maximum argument indexer.
See Also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
"""
Return the minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
Return a ndarray of the minimum argument indexer.
See Also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
# We are explicity making element iterators.
if is_datetimelike(self._values):
return map(com.maybe_box_datetimelike, self._values)
elif is_extension_array_dtype(self._values):
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self):
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if isinstance(mapper, dict):
if hasattr(mapper, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from pandas import Series
mapper = | Series(mapper) | pandas.Series |
from datetime import timezone
from functools import lru_cache, wraps
from typing import List, Optional
import numpy as np
from pandas import Index, MultiIndex, Series, set_option
from pandas.core import algorithms
from pandas.core.arrays import DatetimeArray, datetimes
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.core.base import IndexOpsMixin
from pandas.core.dtypes import common
from pandas.core.dtypes.cast import maybe_cast_to_datetime
from pandas.core.dtypes.common import is_dtype_equal
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import na_value_for_dtype
import pandas.core.internals.construction
from pandas.core.internals.construction import DtypeObj, lib, Scalar
from pandas.core.reshape.merge import _MergeOperation, _should_fill
def nan_to_none_return(func):
"""Decorate to replace returned NaN-s with None-s."""
@wraps(func)
def wrapped_nan_to_none_return(*args, **kwargs):
r = func(*args, **kwargs)
if r != r:
return None
return r
return wrapped_nan_to_none_return
def patch_pandas():
"""
Patch pandas internals to increase performance on small DataFrame-s.
Look: Pandas sucks. I mean it. Every minor release breaks the public API, performance is awful,
maintainers are ignorant, etc. But we don't have an alternative given our human resources.
So:
- Patch certain functions to improve the performance for our use-cases.
- Backport bugs.
- Dream about a better package with a similar API.
"""
set_option("mode.chained_assignment", "raise")
obj_dtype = np.dtype("O")
# nor required for 1.3.0+
# backport https://github.com/pandas-dev/pandas/pull/34414
_MergeOperation._maybe_add_join_keys = _maybe_add_join_keys
def _convert_object_array(
content: List[Scalar], coerce_float: bool = False, dtype: Optional[DtypeObj] = None,
) -> List[Scalar]:
# safe=True avoids converting nullable integers to floats
def convert(arr):
if dtype != obj_dtype:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float, safe=True)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
pandas.core.internals.construction._convert_object_array = _convert_object_array
IndexOpsMixin.nonemin = nan_to_none_return(IndexOpsMixin.min)
IndexOpsMixin.nonemax = nan_to_none_return(IndexOpsMixin.max)
common.pandas_dtype = lru_cache()(common.pandas_dtype)
datetimes.pandas_dtype = common.pandas_dtype
common.is_dtype_equal = lru_cache()(common.is_dtype_equal)
datetimes.is_dtype_equal = common.is_dtype_equal
DatetimeTZDtype.utc = DatetimeTZDtype(tz=timezone.utc)
def cached_utc_new(cls, *args, **kwargs):
if not args and not kwargs:
return object.__new__(cls)
if not args and kwargs == {"tz": timezone.utc}:
return cls.utc
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
DatetimeTZDtype.__new__ = cached_utc_new
original_take = DatetimeLikeArrayMixin.take
def fast_take(self, indices, allow_fill=False, fill_value=None):
if len(indices) and indices.min() < 0:
return original_take(self, indices, allow_fill=allow_fill, fill_value=fill_value)
return original_take(self, indices, allow_fill=False)
DatetimeLikeArrayMixin.take = fast_take
original_tz_convert = DatetimeArray.tz_convert
def fast_tz_convert(self, tz):
if tz is None:
return self
return original_tz_convert(self, tz)
DatetimeArray.tz_convert = fast_tz_convert
original_get_take_nd_function = algorithms._get_take_nd_function
cached_get_take_nd_function = lru_cache()(algorithms._get_take_nd_function)
def _get_take_nd_function(ndim: int, arr_dtype, out_dtype, axis: int = 0, mask_info=None):
if mask_info is None or not mask_info[1]:
return cached_get_take_nd_function(ndim, arr_dtype, out_dtype, axis)
return original_get_take_nd_function(ndim, arr_dtype, out_dtype, axis, mask_info)
algorithms._get_take_nd_function = _get_take_nd_function
datetimes._validate_dt64_dtype = lru_cache()(datetimes._validate_dt64_dtype)
# https://github.com/pandas-dev/pandas/issues/35768
original_series_take = Series.take
def safe_take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:
kwargs.pop("fill_value", None)
kwargs.pop("allow_fill", None)
return original_series_take(self, indices, axis=axis, is_copy=is_copy, **kwargs)
Series.take = safe_take
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.left[name].dtype,
):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.right[name].dtype,
):
take_right = self.right[name]._values
elif left_indexer is not None and | is_array_like(self.left_join_keys[i]) | pandas.core.dtypes.inference.is_array_like |
"""
Prelim script for looking at netcdf files and producing some trends
These estimates can also be used for P03 climate estimation
"""
#==============================================================================
__title__ = "Global Climate Trends"
__author__ = "<NAME>"
__version__ = "v1.0(13.02.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date, date2num
from scipy import stats
import xarray as xr
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Import debugging packages
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main():
# =========== Create the summary of the datasets to be analyised ==========
data= OrderedDict()
data["tas"] = ({
'fname':"./data/cli/1.TERRACLIMATE/TerraClimate_stacked_tmean_1958to2017_GIMMSremapbil_yearmean.nc",
'var':"tmean", "gridres":"GIMMS", "region":"Global", "Periods":["Annual"]
})
data["pre"] = ({
'fname':"./data/cli/1.TERRACLIMATE/TerraClimate_stacked_ppt_1958to2017_GIMMSremapbil_yearsum.nc",
'var':"ppt", "gridres":"GIMMS", "region":"Global", "Periods":["Annual"]
})
# ========== loop over each dataset ==========
for dt in data:
# ========== set up the params for the trend ==========
# st_yrs = [1960, 1970, 1982, 1990, 1999]
st_yrs = [1982]
# windows = [20, 15, 10, 5]
windows = [20]
# ========== Set the ploting and overwite params ==========
plot = False #True
# force = True
for period in data[dt]["Periods"]:
# ========== Perform the rolling window smoothing ==========
RollingWindow(
data[dt]["fname"], data[dt]["var"], "polyfit", windows,
period, data[dt]["gridres"], data[dt]["region"],
yr_start=1982, yr_end=2017, force=False, plot=plot)
RollingWindow(
data[dt]["fname"], data[dt]["var"], "scipyols", windows,
period, data[dt]["gridres"], data[dt]["region"],
yr_start=1982, yr_end=2017, force=False, plot=plot)
RollingWindow(
data[dt]["fname"], data[dt]["var"], "theilsen", windows,
period, data[dt]["gridres"], data[dt]["region"],
yr_start=1982, yr_end=2017, force=False, plot=plot)
# ========== Perform the uncorrected trend detection ==========
# trendmapper(
# data[dt]["fname"], data[dt]["var"], "polyfit",
# period, data[dt]["gridres"], data[dt]["region"],
# st_yrs, plot = plot)#, force=True)
# trendmapper(
# data[dt]["fname"], data[dt]["var"], "scipyols",
# period, data[dt]["gridres"], data[dt]["region"],
# st_yrs, plot = plot)#, force=True)
# trendmapper(
# data[dt]["fname"], data[dt]["var"], "theilsen",
# period, data[dt]["gridres"], data[dt]["region"],
# st_yrs, plot = plot)#, force=True)
# sys.exit()
# Reshape to an array with as many rows as years and as many columns as there are pixels
# ipdb.set_trace()
#==============================================================================
# ============================= Primary functions =============================
#==============================================================================
def plotmaker():
# ========== Build all the plots ==========
if not plot:
return True
# +++++ Plot number +++++
pn = 1
for styp in range(0, len(start_years)):
for num in range(0, len(kys)):
# ========== create the colormap ==========
cmap, vmin, vmax = cbvals(var, kys[num])
if any ([cm is None for cm in [cmap, vmin, vmax]]):
warn.warn("no colorbar exists for %s, skipping" % (kys[num]))
ipdb.set_trace()
# continue
print(styp, num)
ax = plt.subplot(len(start_years),len(kys), pn, projection=ccrs.PlateCarree())
ax.add_feature(cpf.BORDERS, linestyle='--', zorder=102)
ax.add_feature(cpf.LAKES, alpha=0.5, zorder=103)
ax.add_feature(cpf.RIVERS, zorder=104)
# add lat long linse
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
if num == 0:
gl.xlabels_bottom = False
if not ((pn-1) % len(start_years)):
gl.ylabels_left = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# ========== Make the map ==========
# ipdb.set_trace()
ds_trend[kys[num]].isel(time=styp).plot(ax=ax, transform=ccrs.PlateCarree(),
cmap=cmap, vmin=vmin, vmax=vmax, cbar_kwargs={
"extend":"both"})#, "pad":0.0075,"fraction":0.125, "shrink":0.74}) #"fraction":0.05,
pn += 1
# ax.set_title=seasons[num]
# for vas, cl in zip(RFinfo.RF17.unique().tolist(), ['yx', "r*","k."]):
# ax.plot(RFinfo.lon.values, RFinfo.lat.values,
# "kx", markersize=4, transform=ccrs.PlateCarree())
# plt.subplots_adjust(
# top=0.98,
# bottom=0.02,
# left=0.038,
# right=0.989,
# hspace=0.05,
# wspace=0.037)
fig = plt.gcf()
# fig.set_size_inches(len(start_years)*3, len(kys)*6)
fig.set_size_inches(41, 20)
# plt.tight_layout()
plt.savefig("./%s_Testplotv2.png" % var)
# plt.colose
# ipdb.set_trace()
# plt.savefig("./Testplot.pdf")
# plt.show()
# plt.coloes
ipdb.set_trace()
def RollingWindow(
fname, var, method, window, period, gridres, region,
yr_start=1982, yr_end = 2015, force = False, plot=True):
"""Function to perform a rolling window smoothing on the precipitation and climate data
args
fname: String
string of the netcdf to be opened
var: string
string of the variable name within the netcdf
window: int
the number of time periods to be used
period: str
description of the accumulation period
gridres: str
description of the resolution of the grid
region: str
descript of the data region
yr_start
the first year to be included in trend analysis
yr_end
the last year to be included in trend analysis
force: bool
force the creation of new netcdf files
plot: bool
true makes plots
"""
# ========== Open the dataset ==========
ds = xr.open_dataset(fname)
print("Starting rolling window calculations for %s" % var)
# ========== build an output file name ==========
fout = (
'./results/netcdf/TerraClimate_%s_RollingMean_%s_%sto%d_%s%s.nc' % (
period, var, method, yr_end, region, gridres))
# ========== Test if a file alread exists ==========
if all([os.path.isfile(fout), not force]):
warn.warn("Loading existing file, force is needed to overwrite")
ds_trend = xr.open_dataset(fout)
kys = [n for n in ds_trend.data_vars]
else:
# ========== Create the global attributes ==========
global_attrs = GlobalAttributes(ds, var)
# ========== Create the rolling window means ==========
results = []
years = []
# ========== Pull out the data seasonality ==========
annual = ds[var]
# ========== Loop over each of the mooving windows ==========
for win in window:
rmean = annual.rolling(time=win).mean()
dst = rmean.sel(time=slice('%d-01-01' % yr_start, '%d-12-31' % yr_end))
# ========== Get the trend ==========
trends, kys = _fitvals(dst, method=method)
# ========== add a correction for multiple comparisons ==========
if "pvalue" in kys:
trends, kys = MultipleComparisons(trends, kys, aplha = 0.10, MCmethod="fdr_bh")
results.append(trends)
years.append(yr_start-win)
# ========== convert data to netcdf format ==========
layers, encoding = dsmaker(ds, var, results, kys, years, method)
ds_trend = xr.Dataset(layers, attrs= global_attrs)
try:
print("Starting write of data")
ds_trend.to_netcdf(fout,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"])
print(".nc file created")
ipdb.set_trace()
except Exception as e:
print(e)
warn.warn(" \n something went wrong with the save, going interactive")
ipdb.set_trace()
#
if plot:
warn.warn("plotting has not been implemented in this function yet. Going interactive")
ipdb.set_trace()
def trendmapper(
fname, var, method, period, gridres, region,
start_years, endyr = 2015, fdpath="", force = False, plot=True):
ds = xr.open_dataset(fname)
# ========== Create the outfile name ==========
fout = './results/netcdf/TerraClimate_%s_%s_%sto%d_%s%s.nc' % (
period, var, method, endyr,region, gridres)
# ========== Check if the file already exists ==========
if all([os.path.isfile(fout), not force]):
warn.warn("Loading existing file, force is needed to overwrite")
ds_trend = xr.open_dataset(fout)
kys = [n for n in ds_trend.data_vars]
else:
results = []
# ========== Create the global attributes ==========
global_attrs = GlobalAttributes(ds, var)
if period == "OptimalAccumulated":
annual = ds[var]
else:
if period == "Annual":
man_annual = ds[var].groupby('time.year')
else:
# Grouping by the season
man_annual = ds[var].where(ds[var]['time.season'] == period).groupby('time.year')
# Account for the different variables
if var == "tmean":
annual = man_annual.mean(dim='time')
else:
annual = man_annual.sum(dim='time')
for styr in start_years:
if period == "OptimalAccumulated":
dst = annual.sel(time=slice('%d-01-01' % styr, '%d-12-31' % endyr))
else:
dst = annual.sel(year=slice('%d-01-01' % styr, '%d-12-31' % endyr))
trends, kys = _fitvals(dst, method=method)
# Correct for multiple comparisons
if "pvalue" in kys:
trends, kys = MultipleComparisons(trends, kys, aplha = 0.10)
results.append(trends)
layers, encoding = dsmaker(ds, var, results, kys, start_years, method)
ds_trend = xr.Dataset(layers, attrs= global_attrs)
try:
print("Starting write of data")
ds_trend.to_netcdf(fout,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"])
except Exception as e:
print(e)
warn.warn(" \n something went wrong with the save, going interactive")
ipdb.set_trace()
# get the value
#==============================================================================
# ========================= Netcdf Creation Functions =========================
#==============================================================================
def GlobalAttributes(ds, var):
"""
Creates the global attributes for the netcdf file that is being written
these attributes come from :
https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html
args
ds: xarray ds
Dataset containing the infomation im intepereting
var: str
name of the variable
returns:
attributes Ordered Dictionary cantaining the attribute infomation
"""
# ========== Create the ordered dictionary ==========
attr = OrderedDict()
# fetch the references for my publications
# pubs = puplications()
# ========== Fill the Dictionary ==========
# ++++++++++ Highly recomended ++++++++++
attr["title"] = "Trend in Climate (%s)" % (var)
attr["summary"] = "Annual and season trends in %s" % var
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__)
attr["history"] += ds.history
attr["creator_name"] = __author__
attr["creator_url"] = "ardenburrell.com"
attr["creator_email"] = __email__
attr["institution"] = "University of Leicester"
attr["date_created"] = str(pd.Timestamp.now())
# ++++++++++ Netcdf Summary infomation ++++++++++
attr["time_coverage_start"] = str(dt.datetime(ds['time.year'].min(), 1, 1))
attr["time_coverage_end"] = str(dt.datetime(ds['time.year'].max() , 12, 31))
return attr
def dsmaker(ds, var, results, keys, start_years, method):
"""
Build a summary of relevant paramters
args
ds: xarray ds
Dataset containing the infomation im intepereting
var: str
name of the variable
return
ds xarray dataset
"""
# sys.exit()
# date = [dt.datetime(ds['time.year'].max() , 12, 31)]
times = OrderedDict()
tm = [dt.datetime(yr , 12, 31) for yr in start_years]
times["time"] = pd.to_datetime(tm)
times["calendar"] = 'standard'
times["units"] = 'days since 1900-01-01 00:00'
times["CFTime"] = date2num(
tm, calendar=times["calendar"], units=times["units"])
dates = times["CFTime"]
try:
lat = ds.lat.values
lon = ds.lon.values
except AttributeError:
lat = ds.latitude.values
lon = ds.longitude.values
# dates = [dt.datetime(yr , 12, 31) for yr in start_years]
# ipdb.set_trace()
# ========== Start making the netcdf ==========
layers = OrderedDict()
encoding = OrderedDict()
# ========== loop over the keys ==========
try:
for pos in range(0, len(keys)):
# ipdb.set_trace()
if type(results[0]) == np.ndarray:
Val = results[pos][np.newaxis,:, :]
else:
# multiple variables
Val = np.stack([res[pos] for res in results])
ky = keys[pos]
# build xarray dataset
DA=xr.DataArray(Val,
dims = ['time', 'latitude', 'longitude'],
coords = {'time': dates,'latitude': lat, 'longitude': lon},
attrs = ({
'_FillValue':9.96921e+36,
'units' :"1",
'standard_name':ky,
'long_name':"%s %s" % (method, ky)
}),
)
DA.longitude.attrs['units'] = 'degrees_east'
DA.latitude.attrs['units'] = 'degrees_north'
DA.time.attrs["calendar"] = times["calendar"]
DA.time.attrs["units"] = times["units"]
layers[ky] = DA
encoding[ky] = ({'shuffle':True,
# 'chunksizes':[1, ensinfo.lats.shape[0], 100],
'zlib':True,
'complevel':5})
return layers, encoding
except Exception as e:
warn.warn("Code failed with: \n %s \n Going Interactive" % e)
ipdb.set_trace()
raise e
#===============================================================================
# ============================= Internal Functions =============================
#===============================================================================
def MultipleComparisons(trends, kys, aplha = 0.10, MCmethod="fdr_by"):
"""
Takes the results of an existing trend detection aproach and modifies them to
account for multiple comparisons.
args
trends: list
list of numpy arrays containing results of trend analysis
kys: list
list of what is in results
years:
years of accumulation
"""
if MCmethod == "fdr_by":
print("Adjusting for multiple comparisons using Benjamini/Yekutieli")
elif MCmethod == "fdr_bh":
print("Adjusting for multiple comparisons using Benjamini/Hochberg")
else:
warn.warn("unknown MultipleComparisons method, Going Interactive")
ipdb.set_trace()
# ========== Locate the p values and reshape them into a 1d array ==========
# ++++++++++ Find the pvalues ++++++++++
index = kys.index("pvalue")
pvalue = trends[index]
isnan = np.isnan(pvalue)
# ++++++++++ pull out the non nan pvalus ++++++++++
# pvalue1d = pvalue.flatten()
pvalue1d = pvalue[~isnan]
# isnan1d = isnan.flatten()
# =========== Perform the MC correction ===========
pvalue_adj = smsM.multipletests(pvalue1d, method=MCmethod, alpha=0.10)
# ++++++++++ reformat the data into array ++++++++++
MCR = ["Significant", "pvalue_adj"]
for nm in MCR:
# make an empty array
re = np.zeros(pvalue.shape)
re[:] = np.NAN
if nm == "Significant":
re[~isnan] = pvalue_adj[MCR.index(nm)].astype(int).astype(float)
else:
re[~isnan] = pvalue_adj[MCR.index(nm)]
# +++++ add the significant and adjusted pvalues to trends+++++
trends.append(re)
kys.append(nm)
return trends, kys
def cbvals(var, ky):
"""Function to store all the colorbar infomation i need """
cmap = None
vmin = None
vmax = None
if ky == "slope":
if var == "tmean":
vmax = 0.07
vmin = -0.07
cmap = mpc.ListedColormap(palettable.cmocean.diverging.Balance_20.mpl_colors)
elif var =="ppt":
vmin = -3.0
vmax = 3.0
cmap = mpc.ListedColormap(palettable.cmocean.diverging.Curl_20_r.mpl_colors)
elif ky == "pvalue":
cmap = mpc.ListedColormap(palettable.matplotlib.Inferno_20.hex_colors)
vmin = 0.0
vmax = 1.0
elif ky == "rsquared":
cmap = mpc.ListedColormap(palettable.matplotlib.Viridis_20.hex_colors)
vmin = 0.0
vmax = 1.0
# cmap =
elif ky == "intercept":
cmap = mpc.ListedColormap(palettable.cmocean.sequential.Ice_20_r.mpl_colors)
if var == "tmean":
# vmax = 0.07
# vmin = -0.07
# cmap = mpc.ListedColormap(palettable.cmocean.diverging.Balance_20.mpl_colors)
# ipdb.set_trace()
pass
elif var =="ppt":
vmin = 0
vmax = 1000
# cmap = mpc.ListedColormap(palettable.cmocean.diverging.Curl_20_r.mpl_colors)
return cmap, vmin, vmax
# @jit
def _fitvals(dvt, method="polyfit"):
"""
Takes the ds[var] and performs some form of regression on it
"""
vals = dvt.values
try:
years = pd.to_datetime(dvt.time.values).year
t0 = pd.Timestamp.now()
print("testing with %s from %d to %d starting at: %s" % (
method, pd.to_datetime(dvt.time.values).year.min(),
pd.to_datetime(dvt.time.values).year.max(), str(t0)))
except AttributeError:
years = pd.to_datetime(dvt.year.values).year
t0 = pd.Timestamp.now()
print("testing with %s from %d to %d starting at: %s" % (
method, pd.to_datetime(dvt.year.values).year.min(),
pd.to_datetime(dvt.year.values).year.max(), str(t0)))
vals2 = vals.reshape(len(years), -1)
if method=="polyfit":
# Do a first-degree polyfit
vals2[np.isnan(vals2)] = 0
regressions = np.polyfit(years, vals2, 1)
regressions[regressions== 0] = np.NAN
trends = [regressions[0,:].reshape(vals.shape[1], vals.shape[2])]
kys = ["slope"]
elif method == "theilsen":
regressions = alongaxFAST(vals2, scipyTheilSen)
trds = regressions.reshape(4, vals.shape[1], vals.shape[2])
trends = []
for n in range(0, trds.shape[0]):
trends.append(trds[n, :, :])
kys = ["slope", "intercept", "rho", "pvalue"]
elif method == "scipyols":
# regressions = alongax(vals2, scipyols)
regressions = alongaxFAST(vals2, scipyols)
trds = regressions.reshape(4, vals.shape[1], vals.shape[2])
trends = []
for n in range(0, trds.shape[0]):
trends.append(trds[n, :, :])
kys = ["slope", "intercept", "rsquared", "pvalue"]
tdelta = pd.Timestamp.now() - t0
print("\n Time taken to get regression coefficients using %s: %s" % (method, str(tdelta)))
# ipdb.set_trace()
return trends, kys
# @jit
def _lnflick(line, line_max, t0, lineflick=100000):
if (line % lineflick == 0):
string = ("\rLine: %d of %d" %
(line, line_max))
if line > 0:
# TIME PER LINEFLICK
lfx = (pd.Timestamp.now()-t0)/line
lft = str((lfx*lineflick))
trm = str(((line_max-line)*(lfx)))
string += (" t/%d lines: %s. ETA: %s" % (
lineflick,lft, trm) )
sys.stdout.write(string)
sys.stdout.flush()
else:
pass
# @jit
def alongaxFAST(array, myfunc, t0= | pd.Timestamp.now() | pandas.Timestamp.now |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from pylab import *
from scipy.ndimage import measurements
import matplotlib.colors as colors
import os
fec_ini ='2019-05-15'
fec_fin ='2019-12-31'
Recorte_Rad = 'no' ##---> Será 'si' para que recorte el set de Reflectancias original a las fechas de COD y los enmascare.
## En 'si consume mucha ram y debe correrse este programa por partes'
fi = datetime.datetime.strptime(fec_ini,"%Y-%m-%d")
ff =datetime. datetime.strptime(fec_fin,"%Y-%m-%d")
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##--------------------SECCION UNO: RELACION AREAS Y COD-----------------------##
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"""
Programa para analizar la relacion entre la COD y las refelctancias, asi como el
area de las nubes y las COD. En amos casos se grafica el scatter.
"""
###############################################################################
##---------------LECTURA DEL NETCDF CON LOS DATOS GOES COD-------------------##
###############################################################################
ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_COD_05-2019-12-2019.nc')
COD = ds.variables['COD'][:, :, :]
tiempo = ds.variables['time']
fechas_horas_COD = nc.num2date(tiempo[:], units=tiempo.units)
for i in range(len(fechas_horas_COD)):
fechas_horas_COD[i] = fechas_horas_COD[i].strftime('%Y-%m-%d %H:%M')
fechas_horas_COD = pd.to_datetime(fechas_horas_COD, format="%Y-%m-%d %H:%M", errors='coerce')
lat_COD = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Lat_COD_Junio.npy')
lon_COD = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Lon_COD_Junio.npy')
COD = np.ma.filled(COD, fill_value=0.)
COD[COD ==0.] =np.nan
Path_save = '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/'
np.save(Path_save[0:45]+'Array_COD_05-2019-12-2019', COD)
np.save(Path_save[0:45]+'Array_FechasHoras_COD_05-2019-12-2019',fechas_horas_COD )
if Recorte_Rad == 'si':
###############################################################################
## -------------LECTURA DE LOS DATOS DE GOES CH2 MALLA GENERAL-------------- ##
###############################################################################
Rad_origin = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_2018_2019CH2.npy')
fechas_horas_Rad_origin = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_FechasHoras_CH2__2018_2019.npy')
fechas_horas_Rad_origin = pd.to_datetime(fechas_horas_Rad_origin, format="%Y-%m-%d %H:%M", errors='coerce')
Rad = Rad_origin[(fechas_horas_Rad_origin>= fi)&(fechas_horas_Rad_origin<=ff)]
fechas_horas_Rad = fechas_horas_Rad_origin[(fechas_horas_Rad_origin>= fi)&(fechas_horas_Rad_origin<=ff)]
################################################################################
## -----------------LECTURA DE LOS UMBRALES DE LAS REFLECTANCIAS------------- ##
################################################################################
df_UmbralH_Nube_348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_348_Nuba.csv', sep=',', index_col =0, header = None)
df_UmbralH_Nube_350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_350_Nuba.csv', sep=',', index_col =0, header = None)
df_UmbralH_Nube_975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_975_Nuba.csv', sep=',', index_col =0, header = None)
df_UmbralH_Nube = pd.concat([df_UmbralH_Nube_348, df_UmbralH_Nube_350, df_UmbralH_Nube_975], axis=1)
df_UmbralH_Nube = df_UmbralH_Nube.mean(axis = 1, skipna = True)
df_UmbralH_Nube = | pd.DataFrame(df_UmbralH_Nube, columns=['Umbral']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.spatial.transform import Rotation as R
import os
# We use only folders 1-20. This code creates a robot state csv for a case when one camera is used for testing and 5 for training
# save the dataset size of first 20 folders
path1 = '/home/kiyanoush/UoLincoln/Projects/DeepIL Codes/DatasetSize.csv'
df1 = | pd.read_csv(path1, header=None) | pandas.read_csv |
from fbprophet import Prophet
import numpy as np
import os
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, AdaBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
import streamlit as st
def main():
# title
st.title('Wiley OA Publishing Analysis')
# summary
with st.expander('About This Project'):
st.markdown('''
This project aims to provide actionable intelligence for contract negotiation with Wiley. Analysis performed on article data to determine extent of open access availability, scope, and frequencies.
#### Data Source:
1. Article data from Web of Science
2. Open Access availability from Unpaywall
#### Limitations
* No data on articles being potentially *removed* from open access catalogs. May need to employ web-scraping/API query or similar methodology to keep track of those data.
* Nulls present in ID column for Dataset 1, reducing dataset upon merging.
* No pricing information on journals.
* Formatting -- although data pulled contain subject information, articles often are multidisciplinary, which creates difficulty in analysing trends for discrete research fields.
#### Technologies
* Pandas: Cleaning, Aggregation, Exploration
* SciKit Learn: Regression, Basic Classification
* Facebook 'Prophet': Time Series Analysis
* Statsmodels API: Time Series Analysis and Projection
* Plotly: Visualizations
* Streamlit: Interface
#### Analysis
* [x] Categorical
* [x] Time Series
* [x] Trend *and* seasonality
* [x] Time Series Analysis
* [x] Regression
''')
# sidebar
st.sidebar.title('Plots')
st.sidebar.text('Categorical')
plot_bar_oa = st.sidebar.checkbox('Open Access Categories', False)
plot_bar_oaev = st.sidebar.checkbox('Open Access Evidence', True)
st.sidebar.text('Time Series')
plot_line_m = st.sidebar.checkbox('Articles Published by Month', False)
plot_line_q = st.sidebar.checkbox('Articles Published by Quarter', False)
option_yhat_sarimax = st.sidebar.checkbox('Articles Published Projection (SARIMAX)', False)
option_prophet = st.sidebar.checkbox('Articles Published Projection (Prophet)', False)
st.sidebar.text('Regression')
reg = st.sidebar.checkbox('Regression Analyses', False)
st.sidebar.text('Data')
table = st.sidebar.checkbox('Show Data', False)
# @st.cache
def init():
# files
path_dir = '.'
print(os.getcwd())
files = [x for x in os.listdir(path_dir) if x.endswith('.csv')]
print('files: ', files)
file_oa = files[[idx for idx, s in enumerate(files) if 'OA' in s][0]]
file_art = files[[idx for idx, s in enumerate(files) if 'Article' in s][0]]
# df setup
df_article = pd.read_csv(file_art)
df_oa = pd.read_csv(file_oa)
print(df_oa.keys())
# perform merge on DOI
df = pd.merge(left=df_article, right=df_oa,
left_on='DI', right_on='doi',
how='inner')
# date coersion
df['published_date'] = pd.to_datetime(df['published_date'])
df['published_date'] = df['published_date'].dt.date
# quarter
quarter = pd.PeriodIndex(df['published_date'], freq='Q')
df['Quarter'] = [f'{x.year}Q{x.quarter}' for x in quarter]
# month
month_key = {
'1':'Jan',
'2':'Feb',
'3':'Mar',
'4':'Apr',
'5':'May',
'6':'Jun',
'7':'Jul',
'8':'Aug',
'9':'Sept',
'10':'Oct',
'11':'Nov',
'12':'Dec',
}
month = pd.PeriodIndex(df['published_date'], freq='M')
df['Month'] = [f'{x.year}, {month_key[str(x.month)]}' for x in month]
# st.balloons()
return(df)
# regressions
def evaluate_model(model, col1, col2):
X = col1
y = col2
X = [np.array([x]) for x in X]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
model.fit(X_train_scaled, y_train)
score = model.score(X_test_scaled, y_test)
predictions = model.predict(X_test_scaled)
return ({"model":f'{model}'.split('(')[0], 'score':score, 'predictions': predictions})
#init
df=init()
# groupby OA status
if plot_bar_oa:
df_groupby_oastatus = df.groupby(df['oa_status']).agg({'doi':'count'})
df_groupby_oastatus.reset_index(inplace=True)
df_groupby_oastatus.rename(
columns={'doi':'Article Count', 'oa_status': 'Open Access Status'}, inplace=True)
df_groupby_oastatus['Open Access Status'] = df_groupby_oastatus['Open Access Status'].str.capitalize()
fig_gb_oastat = px.bar(df_groupby_oastatus, x='Open Access Status', y="Article Count", title='Articles Published By OA Status')
# render
st.markdown('##### Open Access Status')
st.plotly_chart(fig_gb_oastat)
st.markdown('---')
st.markdown('#### Data')
st.dataframe(df_groupby_oastatus, width=800)
st.markdown('---')
if plot_bar_oaev:
df['OA Evidence'] = df['best_oa_evidence'].str.rsplit('(', expand=True)[0]
df['Via'] = df['best_oa_evidence'].str.rsplit('(', expand=True)[1].str.replace(')','')
df['Via'] = df['Via'].str.replace('via','')
df_groupby_oa_ev = pd.DataFrame(df.groupby([df['OA Evidence'],df['Via']]).agg({'doi':'count'}))
df_groupby_oa_ev.reset_index(inplace=True)
df_groupby_oa_ev.rename(columns={0:'OA Evidence', 1:'Via', 'doi':'Article Count'}, inplace=True)
fig_gb_oaev = px.bar(df_groupby_oa_ev, x='OA Evidence', y="Article Count", color='Via', title='Articles with OA Evidence')
fig_gb_oaev.update_xaxes(tickangle=-90)
# render
st.markdown('#### OA Evidence')
st.plotly_chart(fig_gb_oaev)
st.markdown('---')
st.markdown('#### Data')
st.dataframe(df_groupby_oa_ev, width=800)
st.markdown('---')
# groupby month and plot
if plot_line_m:
df_groupby_month = pd.DataFrame(df.groupby('Month')
.agg({'doi':'count'})).rename(columns={'doi':'Article Count'})
fig_gb_m = px.line(df_groupby_month, y="Article Count", title='Articles Published By Month')
fig_gb_m.update_xaxes(tickangle=-90)
# render
st.markdown('#### Monthly')
st.plotly_chart(fig_gb_m)
st.markdown('---')
st.markdown('#### Data')
st.dataframe(df_groupby_month, width=800)
st.markdown('---')
# groupby q and plot
if plot_line_q:
df_groupby_quarter = pd.DataFrame(df.groupby('Quarter')
.agg({'doi':'count'})).rename(columns={'doi':'Article Count'})
fig_gb_q = px.line(df_groupby_quarter, y="Article Count", title='Articles Published By Quarter')
# render
st.markdown('##### Quarterly')
st.plotly_chart(fig_gb_q)
st.markdown('---')
st.markdown('#### Data')
st.dataframe(df_groupby_quarter, width=800)
st.markdown('---')
if option_yhat_sarimax:
# groupby date
df_groupby_daily = pd.DataFrame(df.groupby('published_date').agg({'doi':'count'}))
# reset index
df_groupby_daily.reset_index(inplace=True)
# coerce to datetime
df_groupby_daily['published_date'] = pd.to_datetime(df_groupby_daily['published_date'])
# set index
df_groupby_daily.set_index('published_date', inplace=True)
# combine into monthly (end of month)
y_sarimax = df_groupby_daily['doi'].resample('M').sum()
# y = y.cumsum() (for testing with cumulative sum)
data_sarimax = y_sarimax
# fit
model = SARIMAX(data_sarimax, order=(1, 1, 1), seasonal_order=(0, 0, 0, 0))
model_fit = model.fit(disp=False)
# make prediction -- new y
yhat_sarimax = model_fit.predict(len(data_sarimax)-4, len(data_sarimax)+4)
fig_sarimax = px.line(pd.DataFrame({'predicted':yhat_sarimax,'actual':data_sarimax}))
fig_sarimax.update_layout(
title='Predicted Open Access Journal Counts (New, Monthly)<br><i>SARIMAX</i>',
xaxis_title='Monthly',
yaxis_title='New OA Count',
legend_title='Value'
)
st.markdown('#### Time Series Prediction')
st.plotly_chart(fig_sarimax)
if option_prophet:
# groupby date
df_groupby_daily = pd.DataFrame(df.groupby('published_date').agg({'doi':'count'}))
# reset index
df_groupby_daily.reset_index(inplace=True)
# coerce to datetime
df_groupby_daily['published_date'] = | pd.to_datetime(df_groupby_daily['published_date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
def test_compact_ints_use_unsigned(self):
# see gh-13323
data = 'a,b,c\n1,9,258'
# sanity check
expected = DataFrame({
'a': np.array([1], dtype=np.int64),
'b': np.array([9], dtype=np.int64),
'c': np.array([258], dtype=np.int64),
})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.int8),
'b': np.array([9], dtype=np.int8),
'c': np.array([258], dtype=np.int16),
})
# default behaviour for 'use_unsigned'
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True)
tm.assert_frame_equal(out, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=False)
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.uint8),
'b': np.array([9], dtype=np.uint8),
'c': np.array([258], dtype=np.uint16),
})
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=True)
tm.assert_frame_equal(out, expected)
def test_compact_ints_as_recarray(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
assert result.dtype == ex_dtype
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
assert result.dtype == ex_dtype
def test_as_recarray(self):
# basic test
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# index_col ignored
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True, index_col=0)
tm.assert_numpy_array_equal(out, expected)
# respects names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = '1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# header order is respected even though it conflicts
# with the natural ordering of the column names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'b,a\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('b', '=i8'), ('a', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# overrides the squeeze parameter
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a\n1'
expected = np.array([(1,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True)
tm.assert_numpy_array_equal(out, expected)
# does data conversions before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
conv = lambda x: int(x) + 1
expected = np.array([(2, 'a'), (3, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True,
converters={'a': conv})
tm.assert_numpy_array_equal(out, expected)
# filters by usecols before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1,), (2,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True,
usecols=['a'])
tm.assert_numpy_array_equal(out, expected)
def test_memory_map(self):
mmap_file = os.path.join(self.dirpath, 'test_mmap.csv')
expected = DataFrame({
'a': [1, 2, 3],
'b': ['one', 'two', 'three'],
'c': ['I', 'II', 'III']
})
out = self.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(out, expected)
def test_null_byte_char(self):
# see gh-2741
data = '\x00,foo'
cols = ['a', 'b']
expected = DataFrame([[np.nan, 'foo']],
columns=cols)
if self.engine == 'c':
out = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
from pathlib import Path
from sparta.ab.portfolio_metrics import portfolio_metrics, yearly_returns
import pandas as pd
from sparta.tomer.alpha_go.consts import LOCAL_PATH
import pdb
class ReportBuilder(object):
def __init__(self):
self.returns = {}
def set_args(self, year, predictions, portfolio_size, model_type):
self.year = year
self.y_pred = predictions.y_pred
self.output = predictions.reset_index()
self.portfolio_size = portfolio_size
self.model_type = model_type
self.output_folder = LOCAL_PATH+f'/results/{model_type}/'
print(f'{year} {model_type}')
def calc_portfolio_returns(self):
"""
calculate the returns for the predictions
:return: None
"""
# sort values by date and prediction
self.predictions = self.output.sort_values(['date', 'y_pred']).reset_index(drop=True)
# rank prediction by date and return
self.predictions['rank'] = self.predictions.groupby(['date', 'y_pred']).ngroup()
self.predictions['rank'] = self.predictions.groupby('date')['rank'].rank()
# take the top and bottom index
top_index = self.predictions.groupby('date')['rank'].nlargest(self.portfolio_size).index.levels[1]
btm_index = self.predictions.groupby('date')['rank'].nsmallest(self.portfolio_size).index.levels[1]
self.top_index = top_index
self.btm_index = btm_index
# check overlapping stocks between top and btm
intersection = set(top_index).intersection(set(btm_index))
if len(intersection) > 0:
print('The top and the btm portfolio share the same stocks')
print(self.predictions[self.predictions.index.isin(intersection)])
# take the top and btm returns
#pdb.set_trace()
#print(self.predictions.iloc[top_index])
#print(self.predictions.iloc[btm_index])
self.top_returns = self.predictions[self.predictions.index.isin(top_index)].groupby('date')['y_test'].mean()
self.btm_returns = self.predictions[self.predictions.index.isin(btm_index)].groupby('date')['y_test'].mean()
self.top_btm_returns = self.top_returns - self.btm_returns
self.returns.setdefault('top',[]).append(self.top_returns)
self.returns.setdefault('btm',[]).append(self.btm_returns)
self.returns.setdefault('top_btm',[]).append(self.top_btm_returns)
def calc_portfolio_metrics(self):
"""
calculate the portfolio returns
:return: None
"""
# calc top and btm portfolio metrics
top_metrics = portfolio_metrics(pd.concat(self.returns['top']), self.model_type, f'TOP {self.portfolio_size}')
btm_metrics = portfolio_metrics(pd.concat(self.returns['btm']), self.model_type, f'BTM {self.portfolio_size}')
top_btm_retruns = portfolio_metrics(pd.concat(self.returns['top_btm']), self.model_type,
f'TOP {self.portfolio_size}-BTM {self.portfolio_size}')
self.portfolio_metrics = pd.concat([top_metrics, btm_metrics, top_btm_retruns])
# calc top and btm yearly returns
top_yearly = yearly_returns(pd.concat(self.returns['top']), self.model_type, f'TOP {self.portfolio_size}')
btm_yearly = yearly_returns( | pd.concat(self.returns['btm']) | pandas.concat |
import datetime
import backtrader as bt
import pandas as pd
class MyStrategy(bt.Strategy):
def __init__(self):
print('init')
def start(self):
print('start')
def prenext(self):
print('prenext')
def nextstart(self):
print('next start')
def next(self):
print('next')
def stop(self):
print('stop')
# 1. Create a cerebro
cerebro = bt.Cerebro()
# 2. Add data feed
# 2.1 Create a data feed
df = pd.read_csv('../data/TSLA.csv')
# print(df.tail())
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import Normalizer, StandardScaler, MinMaxScaler, RobustScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error
#%% Set data paths:
path_BTC_Data = '/Users/dennisgrobe/Nextcloud/Documents/Coding/Portfolio Management/Bitcoin-Fork/data_main/BTC_Data.csv'
# read data and interpolate
data = | pd.read_csv(path_BTC_Data, sep=',', index_col='Date') | pandas.read_csv |
import numpy as np
import pandas as pd
import pystan as ps
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import plotly.express as px
import glob
import arviz
from tqdm import tqdm
import matplotlib
import os
import sys
import datetime
# load the 10xv3 results with 30x sampling for each cell/depth combination
dfs={}
for item in glob.glob('./scvi_output/10xv3_final_summaries/*'):
dfs[item.split('/')[3].split('-final_summary.csv')[0]] = pd.read_csv(item).sort_values(["sampled_cells", "total_UMIs"], ascending = (True, True))
print(dfs.keys())
stan_model = ps.StanModel(file="./stan_models/seqdepth_2predictors_piecewise_v5.stan",
model_name = "seqdepth_2s_piecewise")
results_folder='./pystan_results/'
# for dataset in dfs:
# set environmental variable STAN_NUM_THREADS
# Use 4 cores per chain
os.environ['STAN_NUM_THREADS'] = "10"
for dataset in tqdm(['10x_genomics_data-pbmc_1k_v3']):
print(dataset)
begin = datetime.datetime.now()
print ("Start fit time : ")
print (begin.strftime("%Y-%m-%d %H:%M:%S"))
df = dfs[dataset]
data_dict = {"ncells": np.log2(df["sampled_cells"]), "umis_per_cell": np.log2(df["UMIs_per_cell"]), "validation_error": np.log2(df["validation_error"]), "N": len(df)}
# use the default 4 chains == 4 parallel process
# used cores = min(cpu_cores, 4*STAN_NUM_THREADS)
stan_fit = stan_model.sampling(data=data_dict,
iter=25000,
warmup = 15000,
# n_jobs=10,
chains=3,
refresh = 10,
verbose=True,
control={'adapt_delta':0.8, 'max_treedepth': 15},
)
print(stan_model.model_code)
print ("Finished fit time: ")
now = datetime.datetime.now()
print (now.strftime("%Y-%m-%d %H:%M:%S"))
print('Time taken:')
delta=now - begin
print(str(delta))
s = stan_fit.summary()
summary = | pd.DataFrame(s['summary'], columns=s['summary_colnames'], index=s['summary_rownames']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 期权-中金所-沪深300指数
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
新浪财经-中金所-沪深300指数-所有合约, 返回的第一个合约为主力合约
目前新浪财经-中金所只有 沪深300指数 一个品种的数据
:return: 中金所-沪深300指数-所有合约
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> pd.DataFrame:
"""
中金所-沪深300指数-指定合约-实时行情
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: 合约代码; 用 option_cffex_hs300_list_sina 函数查看
:type symbol: str
:return: 中金所-沪深300指数-指定合约-看涨看跌实时行情
:rtype: pd.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.getOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_call_df = pd.DataFrame(
data_json["result"]["data"]["up"],
columns=[
"看涨合约-买量",
"看涨合约-买价",
"看涨合约-最新价",
"看涨合约-卖价",
"看涨合约-卖量",
"看涨合约-持仓量",
"看涨合约-涨跌",
"行权价",
"看涨合约-标识",
],
)
option_put_df = pd.DataFrame(
data_json["result"]["data"]["down"],
columns=[
"看跌合约-买量",
"看跌合约-买价",
"看跌合约-最新价",
"看跌合约-卖价",
"看跌合约-卖量",
"看跌合约-持仓量",
"看跌合约-涨跌",
"看跌合约-标识",
],
)
data_df = pd.concat([option_call_df, option_put_df], axis=1)
data_df['看涨合约-买量'] = pd.to_numeric(data_df['看涨合约-买量'])
data_df['看涨合约-买价'] = pd.to_numeric(data_df['看涨合约-买价'])
data_df['看涨合约-最新价'] = pd.to_numeric(data_df['看涨合约-最新价'])
data_df['看涨合约-卖价'] = pd.to_numeric(data_df['看涨合约-卖价'])
data_df['看涨合约-卖量'] = pd.to_numeric(data_df['看涨合约-卖量'])
data_df['看涨合约-持仓量'] = pd.to_numeric(data_df['看涨合约-持仓量'])
data_df['看涨合约-涨跌'] = pd.to_numeric(data_df['看涨合约-涨跌'])
data_df['行权价'] = pd.to_numeric(data_df['行权价'])
data_df['看跌合约-买量'] = pd.to_numeric(data_df['看跌合约-买量'])
data_df['看跌合约-买价'] = pd.to_numeric(data_df['看跌合约-买价'])
data_df['看跌合约-最新价'] = pd.to_numeric(data_df['看跌合约-最新价'])
data_df['看跌合约-卖价'] = pd.to_numeric(data_df['看跌合约-卖价'])
data_df['看跌合约-卖量'] = pd.to_numeric(data_df['看跌合约-卖量'])
data_df['看跌合约-持仓量'] = pd.to_numeric(data_df['看跌合约-持仓量'])
data_df['看跌合约-涨跌'] = pd.to_numeric(data_df['看跌合约-涨跌'])
return data_df
def option_cffex_hs300_daily_sina(symbol: str = "io2202P4350") -> pd.DataFrame:
"""
新浪财经-中金所-沪深300指数-指定合约-日频行情
:param symbol: 具体合约代码(包括看涨和看跌标识), 可以通过 ak.option_cffex_hs300_spot_sina 中的 call-标识 获取
:type symbol: str
:return: 日频率数据
:rtype: pd.DataFrame
"""
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
url = f"https://stock.finance.sina.com.cn/futures/api/jsonp.php/var%20_{symbol}{year}_{month}_{day}=/FutureOptionAllService.getOptionDayline"
params = {"symbol": symbol}
r = requests.get(url, params=params)
data_text = r.text
data_df = pd.DataFrame(
eval(data_text[data_text.find("[") : data_text.rfind("]") + 1])
)
data_df.columns = ["open", "high", "low", "close", "volume", "date"]
data_df = data_df[[
"date",
"open",
"high",
"low",
"close",
"volume",
]]
data_df['date'] = pd.to_datetime(data_df['date']).dt.date
data_df['open'] = pd.to_numeric(data_df['open'])
data_df['high'] = pd.to_numeric(data_df['high'])
data_df['low'] = pd.to_numeric(data_df['low'])
data_df['close'] = pd.to_numeric(data_df['close'])
data_df['volume'] = pd.to_numeric(data_df['volume'])
return data_df
# 期权-上交所-50ETF
def option_sse_list_sina(symbol: str = "50ETF", exchange: str = "null") -> List[str]:
"""
新浪财经-期权-上交所-50ETF-合约到期月份列表
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: 合约到期时间
:rtype: list
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getStockName"
params = {"exchange": f"{exchange}", "cate": f"{symbol}"}
r = requests.get(url, params=params)
data_json = r.json()
date_list = data_json["result"]["data"]["contractMonth"]
return ["".join(i.split("-")) for i in date_list][1:]
def option_sse_expire_day_sina(
trade_date: str = "202102", symbol: str = "50ETF", exchange: str = "null"
) -> Tuple[str, int]:
"""
指定到期月份指定品种的剩余到期时间
:param trade_date: 到期月份: 202002, 20203, 20206, 20209
:type trade_date: str
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: (到期时间, 剩余时间)
:rtype: tuple
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.get(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
if int(data["remainderDays"]) < 0:
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{'XD' + symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.get(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
return data["expireDay"], int(data["remainderDays"])
def option_sse_codes_sina(symbol: str = "看涨期权", trade_date: str = "202202", underlying: str = "510050") -> pd.DataFrame:
"""
上海证券交易所-所有看涨和看跌合约的代码
:param symbol: choice of {"看涨期权", "看跌期权"}
:type symbol: str
:param trade_date: 期权到期月份
:type trade_date: "202002"
:param underlying: 标的产品代码 华夏上证 50ETF: 510050 or 华泰柏瑞沪深 300ETF: 510300
:type underlying: str
:return: 看涨看跌合约的代码
:rtype: Tuple[List, List]
"""
if symbol == "看涨期权":
url = "".join(
["http://hq.sinajs.cn/list=OP_UP_", underlying, str(trade_date)[-4:]]
)
else:
url = "".join(
["http://hq.sinajs.cn/list=OP_DOWN_", underlying, str(trade_date)[-4:]]
)
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_temp = data_text.replace('"', ",").split(",")
temp_list = [i[7:] for i in data_temp if i.startswith("CON_OP_")]
temp_df = pd.DataFrame(temp_list)
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = [
'序号',
'期权代码',
]
return temp_df
def option_sse_spot_price_sina(symbol: str = "10003720") -> pd.DataFrame:
"""
新浪财经-期权-期权实时数据
:param symbol: 期权代码
:type symbol: str
:return: 期权量价数据
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list=CON_OP_{symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"买量",
"买价",
"最新价",
"卖价",
"卖量",
"持仓量",
"涨幅",
"行权价",
"昨收价",
"开盘价",
"涨停价",
"跌停价",
"申卖价五",
"申卖量五",
"申卖价四",
"申卖量四",
"申卖价三",
"申卖量三",
"申卖价二",
"申卖量二",
"申卖价一",
"申卖量一",
"申买价一",
"申买量一 ",
"申买价二",
"申买量二",
"申买价三",
"申买量三",
"申买价四",
"申买量四",
"申买价五",
"申买量五",
"行情时间",
"主力合约标识",
"状态码",
"标的证券类型",
"标的股票",
"期权合约简称",
"振幅",
"最高价",
"最低价",
"成交量",
"成交额",
]
data_df = pd.DataFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_df
def option_sse_underlying_spot_price_sina(symbol: str = "sh510300") -> pd.DataFrame:
"""
期权标的物的实时数据
:param symbol: sh510050 or sh510300
:type symbol: str
:return: 期权标的物的信息
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list={symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"证券简称",
"今日开盘价",
"昨日收盘价",
"最近成交价",
"最高成交价",
"最低成交价",
"买入价",
"卖出价",
"成交数量",
"成交金额",
"买数量一",
"买价位一",
"买数量二",
"买价位二",
"买数量三",
"买价位三",
"买数量四",
"买价位四",
"买数量五",
"买价位五",
"卖数量一",
"卖价位一",
"卖数量二",
"卖价位二",
"卖数量三",
"卖价位三",
"卖数量四",
"卖价位四",
"卖数量五",
"卖价位五",
"行情日期",
"行情时间",
"停牌状态",
]
data_df = pd.DataFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_df
def option_sse_greeks_sina(symbol: str = "10003045") -> pd.DataFrame:
"""
期权基本信息表
:param symbol: 合约代码
:type symbol: str
:return: 期权基本信息表
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list=CON_SO_{symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1: data_text.rfind('"')].split(",")
field_list = [
"期权合约简称",
"成交量",
"Delta",
"Gamma",
"Theta",
"Vega",
"隐含波动率",
"最高价",
"最低价",
"交易代码",
"行权价",
"最新价",
"理论价值",
]
data_df = pd.DataFrame(
list(zip(field_list, [data_list[0]] + data_list[4:])), columns=["字段", "值"]
)
return data_df
def option_sse_minute_sina(symbol: str = "10003720") -> pd.DataFrame:
"""
指定期权品种在当前交易日的分钟数据, 只能获取当前交易日的数据, 不能获取历史分钟数据
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的当前交易日的分钟数据
:rtype: pandas.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionDaylineService.getOptionMinline"
params = {"symbol": f"CON_OP_{symbol}"}
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = data_json["result"]["data"]
data_df = | pd.DataFrame(temp_df) | pandas.DataFrame |
import geopandas as gpd
import matplotlib.pyplot as plt
import matplotlib
from pandas import Series
import numpy as np
import os
from cargador_datos_csv_population import *
from cargador_datos_csv_area import *
municipiosLetMeHelp=["25041",\
"25068",\
"25048",\
"25052",\
"25068",\
"25093",\
"25099",\
"25113",\
"25122",\
"25135",\
"25137",\
"25158",\
"25168",\
"25205",\
"25230",\
"25248",\
"25252",\
"25006",\
"25009",\
"25029",\
"25170",\
"25058",\
"25056",\
"25067",\
"25073",\
"25076",\
"25081",\
"25092",\
"25097",\
"25101",\
"25105",\
"25118",\
"25119",\
"25153",\
"25155",\
"25169",\
"25180",\
"25206",\
"25218",\
"25224",\
"25253",\
"25255",\
"43021",\
"43029",\
"43046",\
"43054",\
"43061",\
"43073",\
"43086",\
"43101",\
"43105",\
"43107",\
"43141",\
"43130",\
"43139",\
"43142",\
"43143",\
"43146",\
"43147",\
"43158",\
"43159",\
"43168",\
"43172",\
"43176",\
"25055",\
"25072",\
"25085",\
"25103",\
"25104",\
"25110",\
"25114",\
"25132",\
"25141",\
"25143",\
"25152",\
"25911",\
"25905",\
"25191",\
"25192",\
"25197",\
"25194",\
"25216",\
"25219",\
"25907",\
"25223",\
"25003",\
"25027",\
"25046",\
"25050",\
"25070",\
"25074",\
"25096",\
"25109",\
"25130",\
"25145",\
"25154",\
"25157",\
"25176",\
"25181",\
"25902",\
"25225",\
"25217",\
"25238",\
"25242",\
"25244",\
"25055",\
"25072",\
"25085",\
"25103",\
"25104",\
"25110",\
"25114",\
"25132",\
"25141",\
"25143",\
"25152",\
"25911",\
"25905",\
"25191",\
"25192",\
"25197",\
"25194",\
"25216",\
"25219",\
"25907",\
"25223"]
def pop_density_binning(return_colors=False):
"""Return bins and color scheme for population density"""
bins = np.array([0, 1, 2, 4, 6, 8, 10, 50, 100, 200, 500, 1000, 1500, 2000, 2500, 5000, 10000])
cmap = plt.cm.get_cmap("Reds", len(bins)+1)
if not return_colors:
return bins, cmap
else:
colors = []
for i in range(cmap.N):
rgb = cmap(i)[:3]
colors.append(matplotlib.colors.rgb2hex(rgb))
return bins, colors
def risk_binning(return_colors=False):
"""Return bins and color scheme for relative median risk"""
bins = np.arange(-10000,18000,2000)
cmap = plt.cm.get_cmap("RdBu", len(bins))
if not return_colors:
return bins, cmap
else:
colors = []
for i in range(cmap.N):
rgb = cmap(i)[:3]
colors.append(matplotlib.colors.rgb2hex(rgb))
return bins, colors
url="/Volumes/Disco Externo/municipios_ign.geojson"
keep_columns = ["nameunit", "codigoine", "geometry"]
df=gpd.read_file(url)
df=df.loc[(df["codigoine"].isin(municipiosLetMeHelp))]
cols = list(df)
for col in keep_columns:
cols.remove(col)
data=df.drop(columns=cols)
data.rename(columns={"nameunit": "name","codigoine":"zip"}, inplace=True)
# Join population and data
data=data.set_index('zip').join(population.set_index('Codi'))
# Join area and data
data=data.join(area.set_index('Codi'))
data.rename(columns={"(km2)": "area"}, inplace=True)
data["pop2020"] = data["Total. De 0 a 14 anys"]+data["Total. De 15 a 64 anys"]+data["Total. De 65 anys i més"]
data["pop_density"] = data["pop2020"]/data["area"]
data.loc[:, "risk"] = 000
avg_risk = np.nanmean(data["risk"])
data["risk_relative"] = data["risk"]-avg_risk
bins, cmap = pop_density_binning()
colors = []
for i, row in data.iterrows():
index = bins.searchsorted(row["pop_density"])
colors.append(matplotlib.colors.rgb2hex(cmap(index)[:3]))
data["fill_density"] = Series(colors, dtype="str", index=data.index)
bins, cmap = risk_binning()
colors = []
for i, row in data.iterrows():
index = bins.searchsorted(row["risk_relative"])
colors.append(matplotlib.colors.rgb2hex(cmap(index)[:3]))
data["fill"] = | Series(colors, dtype="str", index=data.index) | pandas.Series |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = pd.Series(num_simulations * ['NaN'], dtype='float')
depth_result = pd.Series(num_simulations * ['NaN'], dtype='float')
agdrift_empty.out_area_width = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_length = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_depth = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.sqft_per_hectare = 107639
for i in range(num_simulations):
width_result[i], length_result[i], depth_result[i] = agdrift_empty.determine_area_dimensions(i)
npt.assert_allclose(width_result, expected_width, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(length_result, expected_length, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(depth_result, expected_depth, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [width_result, expected_width, length_result, expected_length, depth_result, expected_depth]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa(self):
"""
:description calculation of average deposition over width of water body
:param integration_result result of integration of deposition curve across the distance
: beginning at the near distance and extending to the far distance of the water body
:param integration_distance effectively the width of the water body
:param avg_dep_foa average deposition rate across the width of the water body
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.1538462, 0.5, 240.])
try:
integration_result = pd.Series([1.,125.,3e5], dtype='float')
integration_distance = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa(integration_result, integration_distance)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([6.5, 3.125e4, 3.75e8])
try:
avg_dep_foa = pd.Series([1.,125.,3e5], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_lbac(avg_dep_foa, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa_from_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.553846e-01, 8.8e-06, 4.e-08])
try:
avg_dep_lbac = pd.Series([1.01, 0.0022, 0.00005], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa_from_lbac(avg_dep_lbac, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_gha(self):
"""
Deposition calculation.
:param avg_dep_gha: average deposition over width of water body in units of grams/hectare
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert hectares to acres
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.01516739, 0.111524, 0.267659])
try:
avg_dep_gha = pd.Series([17., 125., 3e2], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_gha(avg_dep_gha)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_waterconc_ngl(self):
"""
:description calculate the average deposition onto the pond/wetland/field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.311455e-05, 2.209479e-03, 2.447423e-03])
try:
avg_waterconc_ngl = pd.Series([17., 125., 3e2], dtype='float')
area_width = pd.Series([50., 200., 500.], dtype='float')
area_length = pd.Series([6331., 538., 215.], dtype='float')
area_depth = pd.Series([0.5, 6.5, 3.], dtype='float')
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_waterconc_ngl(avg_waterconc_ngl, area_width,
area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field in lbs/acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.676538e-02, 2.2304486, 44.608973])
try:
avg_fielddep_mgcm2 = pd.Series([3.e-4, 2.5e-2, 5.e-01])
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.cm2_per_ft2 = 929.03
agdrift_empty.mg_per_gram = 1.e3
result = agdrift_empty.calc_avg_dep_lbac_from_mgcm2(avg_fielddep_mgcm2)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_gha(self):
"""
:description average deposition over width of water body in grams per acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert acres to hectares
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401061, 0.3648362, 0.03362546])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.47105
result = agdrift_empty.calc_avg_dep_gha(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_waterconc_ngl(self):
"""
:description calculate the average concentration of pesticide in the pond/wetland
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([70.07119, 18.24654, 22.41823])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
area_width = pd.Series([6.56, 208.7, 997.], dtype='float')
area_length = pd.Series([1.640838e4, 515.7595, 107.9629], dtype='float')
area_depth = pd.Series([6.56, 6.56, 0.4921], dtype='float')
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
result = agdrift_empty.calc_avg_waterconc_ngl(avg_dep_lbac ,area_width, area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_fielddep_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401063e-5, 3.648369e-6, 3.362552e-7])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.mg_per_gram = 1.e3
agdrift_empty.cm2_per_ft2 = 929.03
result = agdrift_empty.calc_avg_fielddep_mgcm2(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
x_dist = 6.56
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
# write output arrays to excel file -- just for debugging
agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "output_array_generate.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg1(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = | pd.Series([], dtype='float') | pandas.Series |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), | pd.offsets.MonthBegin(1) | pandas.offsets.MonthBegin |
__author__ = 'thor'
import pandas as pd
import numpy as np
import ut.daf.manip as daf_manip
import ut.daf.ch as daf_ch
# from ut.pstr.trans import toascii as strip_accents
from sklearn.feature_extraction.text import strip_accents_unicode as strip_accents
def to_lower_ascii(d):
if isinstance(d, pd.DataFrame):
d = d.copy()
d = d.convert_objects(convert_dates=True, convert_numeric=True)
lower_ascii = lambda x: strip_accents(x).lower()
d.columns = list(map(lower_ascii, d.columns))
for c in d.columns:
if d[c].dtype == 'O':
d[c].fillna('', inplace=True)
if d[c].dtype != 'float' and d[c].dtype != 'int':
try:
d[c] = list(map(lower_ascii, list(map(str, d[c]))))
except TypeError as e:
print(e.message)
return d
else:
raise NotImplementedError("the input format '{}' is not handled".format(type(d)))
def smallest_unik_prefix(tok_lists, min_prefix_len=1, tok_list_col=None, list_sep=' '):
if isinstance(tok_lists, pd.DataFrame):
dataframe_input = True
assert tok_list_col in tok_lists.columns, "dataframe doesn't have column: %s" % tok_list_col
tok_lists = tok_lists.copy()
else:
dataframe_input = False
tok_list_col = tok_list_col or 'tok_lists'
tok_lists = pd.DataFrame({'tok_lists': tok_lists})
original_cols = tok_lists.columns
tok_lists['len_of_tok_lists'] = list(map(len, tok_lists[tok_list_col]))
tok_lists['list_idx'] = [np.min(min_prefix_len, x) for x in tok_lists['len_of_tok_lists']]
tok_lists['tok_str'] = list(map(lambda tok, idx: ' '.join(tok[:idx])))
tok_lists['look_further'] = [x > min_prefix_len for x in tok_lists['len_of_tok_lists']]
original_cols = tok_lists.columns
tok_lists['tok_lists_original_order'] = list(range(len(tok_lists)))
tok_lists['len_of_tok_lists'] = list(map(len, tok_lists[tok_list_col]))
tok_lists['is_unik'] = False
def add_is_unik():
tok_str_count = daf_ch.ch_col_names(tok_lists[['tok_str']].groupby('tok_str').count(), 'tok_str')
def expand_tok_prefix(idx):
list_idx = [np.min(idx, x) for x in tok_lists['len_of_tok_lists']]
lidx = tok_lists['is_unik'] == False
tok_lists['tok_str'][lidx] = \
list(map(lambda tok, idx: list_sep.join(tok[:idx]), tok_lists[tok_list_col][lidx], list_idx[lidx]))
expand_tok_prefix(min_prefix_len)
extra_cols_df = \
daf_manip.rm_cols_if_present(tok_lists,
set(tok_lists.columns).difference([tok_list_col, 'tok_lists_original_order']))
max_tok_list_len = np.max(tok_lists['len_of_tok_lists'])
work_in_progress = pd.DataFrame()
result = pd.DataFrame()
for i in range(min_prefix_len - 1):
too_small_lidx = tok_lists['len_of_tok_lists'] < (i + 1)
result = | pd.concat([result, tok_lists[too_small_lidx]]) | pandas.concat |
"""Analyze and plot cell motility from tracks"""
from collections import OrderedDict
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import AgglomerativeClustering
from scipy.spatial import ConvexHull
from statsmodels.stats.proportion import proportion_confint
from lana.utils import equalize_axis3d
from lana.utils import track_identifiers
def _uniquize_tracks(tracks, verbose=False):
"""Cluster tracks, if not unique"""
if "Time" not in tracks.columns:
return
tracks["Orig. Index"] = tracks.index
if not tracks.index.is_unique:
tracks.reset_index(drop=True, inplace=True)
if "Track_ID" in tracks.columns:
max_track_id = tracks["Track_ID"].max()
else:
max_track_id = 0
for identifiers, track in tracks.groupby(track_identifiers(tracks)):
if sum(track["Time"].duplicated()) != 0:
n_clusters = track["Time"].value_counts().max()
track = track.copy()
index = track.index
if "Track_ID" in track.columns:
tracks.loc[index, "Orig. Track_ID"] = track["Track_ID"]
clusters = AgglomerativeClustering(n_clusters).fit(track[["X", "Y", "Z"]])
track.loc[:, "Cluster"] = clusters.labels_
if sum(track[["Cluster", "Time"]].duplicated()) != 0:
clusters = AgglomerativeClustering(n_clusters).fit(
track[["Orig. Index"]]
)
track.loc[:, "Cluster"] = clusters.labels_
if sum(track[["Cluster", "Time"]].duplicated()) == 0:
tracks.loc[index, "Track_ID"] = max_track_id + 1 + clusters.labels_
max_track_id += n_clusters
| pd.set_option("display.max_rows", 1000) | pandas.set_option |
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import pandas as pd
import numpy as np
def get_logreg_output(features_df, target_df, active_norm):
non_num_features = [col for col, dt in features_df.dtypes.items() if dt == object]
likely_cat = {}
for var in features_df.columns:
likely_cat[var] = features_df[var].nunique() <= 100
likely_cat = [k for k, v in likely_cat.items() if v is True]
non_num_features = list(set(non_num_features + likely_cat))
if list(non_num_features):
lb_results_df = pd.DataFrame( | pd.get_dummies(features_df[non_num_features]) | pandas.get_dummies |
import itertools
from logging import log
import os
import json
import numpy as np
# import snowballstemmer
# import requests
# response = requests.get(url)
# response.raise_for_status() # raises exception when not a 2xx response
from streamlit_lottie import st_lottie
from io import StringIO
import spacy
from spacy_streamlit import visualize_parser
import pandas as pd
import streamlit as st
import utils
import time
author_textrazor_token = os.getenv("TEXTRAZOR_TOKEN")
author_google_key = os.getenv("GOOGLE_KEY")
# print(author_google_key)
st.set_page_config(
page_title="The Entities Swissknife",
page_icon="https://cdn.shortpixel.ai/spai/q_lossy+ret_img+to_auto/https://studiomakoto.it/wp-content/uploads/2021/08/cropped-favicon-16x16-1-192x192.png",
menu_items={
"Get help": None,
"Report a bug": None,
"About": None
}
)
hide_st_style = """
<style>
footer {visibility: hidden;}
[title^='streamlit_lottie.streamlit_lottie'] {
margin-bottom: -35px;
margin-top: -90px;
}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
if "en_nlp" not in st.session_state:
st.session_state.en_nlp = spacy.load("en_core_web_sm")
if "it_nlp" not in st.session_state:
st.session_state.it_nlp = spacy.load("it_core_news_sm")
# @st.cache(suppress_st_warning=True)
# def logo():
# @st.cache(allow_output_mutation=True)
# def logo():
# # x= "anim"
# if 'anim' not in st.session_state:
# with open("data.json") as f:
# st.session_state.anim = json.loads(f.read())
# with st.sidebar:
# st_lottie(st.session_state.anim, width=280, height=230, loop=False, key="anim_makoto")
# # # logo()
st.markdown(
"###### [](https://studiomakoto.it/?utm_source=streamlit&utm_medium=app&utm_campaign=Entities-swissknife)"
)
st.markdown(
"###### Made in [](https://www.streamlit.io/) , with ❤️ by [@max_geraci](https://studiomakoto.it/makoto_member/massimiliano-geraci/)   |   [](https://twitter.com/max_geraci)   |   [](https://www.buymeacoffee.com/MaxG.SEO)"
)
@st.cache(allow_output_mutation=True)
def load_lottifile(filepath: str):
with open(filepath, 'r') as f:
return json.load(f)
loti_path = load_lottifile('data.json')
# st.title('Lotti')
with st.sidebar:
# time.sleep(3)
st_lottie(loti_path, width=280, height=180, loop=False)
df = None
texts = None # initialize for
language_option = None
# response2 = None
with st.form("my_form"):
api_selectbox = st.sidebar.selectbox(
"Choose the API you wish to use",
("TextRazor", "Google NLP")
)
input_type_selectbox = st.sidebar.selectbox(
"Choose what you want to analyze",
("URL", "Text", "URL vs URL")
)
st.sidebar.info(
'##### Read this article to [learn more about how to use The Entities Swissknife](https://studiomakoto.it/digital-marketing/entity-seo-semantic-publishing/).')
st.sidebar.info(
'##### Register on the [TextRazor website](https://www.textrazor.com/) to obtain a free API keyword (🙌 500 calls/day 🙌) or activate the [NLP API](https://cloud.google.com/natural-language) inside your Google Cloud Console, and export the JSON authentication file.')
st.sidebar.info('##### Knowledge Graph Entity ID is extracted only using the Google NLP API.')
st.sidebar.info(
'##### Categories and Topics - by [IPTC Media Topics](https://iptc.org/standards/media-topics/) - are avalaible only using the TextRazor API.')
# loti_path = load_lottifile('lotti/seo.json')
# with st.sidebar:
# st_lottie(loti_path, width=280, height=130)
# st.title('Lotti')
with st.expander("ℹ️ - About this app "):
st.markdown(
"""
This app, devoted to ✍️[Semantic Publishing](https://en.wikipedia.org/wiki/Semantic_publishing)✍️, relies on:
- [Text Razor API](https://www.textrazor.com/) for Named-Entity Recognition ([NER](https://en.wikipedia.org/wiki/Named-entity_recognition)) and Linking ([NEL](https://en.wikipedia.org/wiki/Entity_linking));
- [Google NLP API](https://cloud.google.com/natural-language) for NER and NEL;
- Wikipedia API for scraping entities description;
- For everything else, the beauty and power of 🐍Python🐍 and Steamlit.
"""
)
with st.expander("✍️ - Semantic Publishing "):
st.write(
"""
The Entities Swissknife (TES) is a 100% 🐍Python🐍 app for Semantic publishing, i.e., publishing information on the web as documents accompanied by semantic markup (using the [schema.org](https://schema.org) vocabulary in JSON-LD format). Semantic publication provides a way for machines to understand the structure and meaning of the published information, making information search and data integration more efficient.
Semantic publishing relies on Structured Data adoption and Entity Linking (Wikification). Named entities are then injected into the JSON-LD markup to make the Content Topics explicit and 🥰Search engines friendly🥰: declare the main topic with the '[about](https://schema.org/about)' property and the secondary topics with the '[mentions](https://schema.org/mentions)' property).
The 'about' property should refer to 1-2 entities/topics at most, and these entities should be present in your H1 title. The 'mentions' properties should be no more than 3-5 depending on the article's length; as a general rule, an entities/topics should be explicitly mentioned in your schema markup if there is at least one paragraph dedicated to them (and they are possibly present in the relative headline).
The table with the "Top Entities by Frequency" takes into account for the Frequency count also the normalized entities and not only the exact word with which the entities are present in the text.
"""
)
with st.expander("🔎 - How TES can support your Semantic SEO tasks "):
st.write(
"""
- Know how NLU (Natural Language Understanding) algorithms “understand” your text to optimize it until the topics which are more relevant to you have the best relevance/salience score;
- Analyze your SERP competitor’s main topics to discover possible topical gaps in your content;
- Generate the JSON-LD markup (and inject it into your page schema) to explicit which topics your page is about to search engines. Declare your main topic with the 'about' property. Use the 'mentions' property to declare your secondary topics. This is helpful for disambiguation purposes too;
- Analyze short texts such as a copy for an ad or a bio/description for an About-page (i.e., the [Entity Home](https://kalicube.com/faq/brand-serps/entity-home-in-seo-explainer/)).
"""
)
if api_selectbox == "TextRazor":
google_api = None
st.session_state.google_api = False
if not author_textrazor_token:
text_razor_key = st.text_input('Please enter a valid TextRazor API Key (Required)')
else:
text_razor_key = author_textrazor_token
elif api_selectbox == "Google NLP":
text_razor_key = None
st.session_state.text_razor = False
if not author_google_key:
google_api = st.file_uploader("Please upload a valid Google NLP API Key (Required)", type=["json"])
if google_api:
google_api = json.loads(google_api.getvalue().decode("utf-8"))
else:
google_api = json.loads(author_google_key)
# print(google_api)
if input_type_selectbox == "URL":
text_input = st.text_input('Please enter a URL', placeholder='https://gofishdigital.com/what-is-semantic-seo/')
# print('text_input 171 the first lien\n',text_input)
meta_tags_only = st.checkbox('Extract Entities only from meta tags (tag_title, meta_description & H1-4)')
# print('172 meta tag', meta_tags_only)
if "last_field_type" in st.session_state and st.session_state.last_field_type != input_type_selectbox:
st.session_state.text_razor = False
st.session_state.google_api = False
st.session_state.last_field_type = input_type_selectbox
elif input_type_selectbox == "Text":
if "last_field_type" not in st.session_state:
st.session_state.last_field_type = input_type_selectbox
st.session_state.text_razor = False
st.session_state.google_api = False
if st.session_state.last_field_type != input_type_selectbox:
st.session_state.text_razor = False
st.session_state.google_api = False
st.session_state.last_field_type = input_type_selectbox
meta_tags_only = False
text_input = st.text_area('Please enter a text',
placeholder='Posts involving Semantic SEO at Google include structured data, schema, and knowledge graphs, with SERPs that answer questions and rank entities - <NAME>.')
elif input_type_selectbox == "URL vs URL":
if "last_field_type" in st.session_state and st.session_state.last_field_type != input_type_selectbox:
st.session_state.text_razor = False
st.session_state.google_api = False
meta_tags_only = False
st.session_state.last_field_type = input_type_selectbox
url1 = st.text_input(label='Enter first URL')
url2 = st.text_input(label='Enter second URL')
# Every form must have a submit button.
# submitted = st.form_submit_button("Submit")
are_urls = utils.is_url(url1) and utils.is_url(url2)
urls = [url1, url2]
text_input = "None"
# if submitted:
# st.write("First Url", url1, "Second Url", url2)
is_url = utils.is_url(text_input)
if input_type_selectbox != "URL vs URL":
# print('is_uri from 192 line\n', is_url)
spacy_pos = st.checkbox('Process Part-of-Speech analysis with SpaCy')
# spacy_pos = False
# rint('Scrape all', scrape_all)
if api_selectbox == "TextRazor":
extract_categories_topics = st.checkbox('Extract Categories and Topics')
scrape_all = st.checkbox(
"Scrape ALL the Entities descriptions from Wikipedia. This is a time-consuming task, so grab a coffee if you need all the descriptions in your CSV file. The descriptions of the Entities you select for your 'about' and 'mentions' schema properties will be scraped and present in the corresponding JSON-LD files")
submitted = st.form_submit_button("Submit")
if submitted:
if not text_razor_key and not google_api:
st.warning("Please fill out all the required fields")
elif not text_input:
st.warning("Please Enter a URL/Text in the required field")
else:
st.session_state.submit = True
if api_selectbox == "TextRazor":
if input_type_selectbox == "URL vs URL":
output1, output2, entities1, entities2, language = utils.get_df_url2url_razor(text_razor_key, urls,
are_urls)
st.session_state.text_razor = True
st.session_state.google_api = False
st.session_state.df_url1 = pd.DataFrame(output1)
st.session_state.df_url2 = pd.DataFrame(output2)
lang = language
else:
output, response, topics_output, categories_output = utils.get_df_text_razor(text_razor_key,
text_input,
extract_categories_topics,
is_url, scrape_all)
st.session_state.text = response.cleaned_text
texts = st.session_state.text
st.session_state.text_razor = True
st.session_state.google_api = False
st.session_state.df_razor = pd.DataFrame(output)
if topics_output:
st.session_state.df_razor_topics = pd.DataFrame(topics_output)
if categories_output:
st.session_state.df_razor_categories = | pd.DataFrame(categories_output) | pandas.DataFrame |
import pandas as pd
class Shape:
def __init__(self, parent, x_coords, y_coords, color, plot_style="o-"):
self.parent = parent
self.x_coords = x_coords
self.y_coords = y_coords
self.color = [color]
self.plot_style = [plot_style]
self.graph = [[self.x_coords, self.y_coords]]
def modify_point(self, point_index, point):
self.x_coords[point_index], self.y_coords[point_index] = point
def append_point(self, point):
self.x_coords.append(point[0])
self.y_coords.append(point[1])
def export(self, divisions=200, dx=True):
return {"graph": | pd.DataFrame({"X": self.x_coords, "Y": self.y_coords}) | pandas.DataFrame |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for holiday_effects."""
from absl.testing import parameterized
import pandas as pd
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import holiday_effects
HOLIDAY_FILE_FIELDS = ['geo', 'holiday', 'date']
class HolidayEffectsTest(test_util.TestCase):
def test_get_default_holidays_invalid_country(self):
times = pd.to_datetime(['2012-12-25', '2013-01-01'])
country = 'AA'
with self.assertRaises(Exception):
holiday_effects.get_default_holidays(times, country)
def test_get_default_holidays_invalid_times(self):
times = ['2012-12-25', '2013-01-01']
country = 'US'
with self.assertRaises(Exception):
holiday_effects.get_default_holidays(times, country)
@parameterized.named_parameters(
('united_states_holidays', 'US',
pd.DataFrame([['US', 'Christmas Day', '2012-12-25'],
['US', 'New Year\'s Day', '2013-01-01']],
columns=HOLIDAY_FILE_FIELDS)),
('egypt_holidays', 'EG',
pd.DataFrame([['EG', 'New Year\'s Day - Bank Holiday', '2013-01-01']],
columns=HOLIDAY_FILE_FIELDS)))
def test_get_default_holidays(self, country, expected):
times = pd.date_range(
start='2012-12-25', end='2013-01-01', freq= | pd.DateOffset(days=1) | pandas.DateOffset |
#!/usr/bin/env python
"""Tests for `qnorm` package."""
import unittest
import numpy as np
import pandas as pd
import qnorm
import tracemalloc
tracemalloc.start()
df1 = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
df1.to_csv("test.csv")
df1.to_hdf("test.hdf", key="qnorm", format="table", data_columns=True, mode="w")
df1.to_parquet("test.parquet")
class TestQnorm(unittest.TestCase):
def test_000_numpy(self):
"""
test numpy support
"""
arr = np.random.normal(size=(20, 2))
qnorm.quantile_normalize(arr)
def test_001_pandas(self):
"""
test pandas support
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
qnorm.quantile_normalize(df)
def test_002_wiki(self):
"""
test the wiki example
https://en.wikipedia.org/wiki/Quantile_normalization
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
result = np.array(
[
[5.66666667, 5.16666667, 2.0],
[2.0, 2.0, 3.0],
[3.0, 5.16666667, 4.66666667],
[4.66666667, 3.0, 5.66666667],
]
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df).values, result
)
def test_003_no_change(self):
"""
no sorting should happen here
"""
arr = np.empty(shape=(20, 3))
for col in range(arr.shape[1]):
vals = np.arange(arr.shape[0])
np.random.shuffle(vals)
arr[:, col] = vals
qnorm_arr = qnorm.quantile_normalize(arr)
np.testing.assert_array_almost_equal(arr, qnorm_arr)
def test_004_double(self):
"""
if dtype is double, return double
"""
arr = np.random.normal(0, 1, size=(20, 3))
arr = arr.astype(np.float64)
qnorm_arr = qnorm.quantile_normalize(arr)
assert qnorm_arr.dtype == np.float64
def test_005_single(self):
"""
if dtype is single, return single
"""
arr = np.random.normal(0, 1, size=(20, 3))
arr = arr.astype(np.float32)
qnorm_arr = qnorm.quantile_normalize(arr)
assert qnorm_arr.dtype == np.float32
def test_006_target(self):
"""
test if the target is used instead of the qnorm values
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
np.random.shuffle(arr)
target = np.arange(10, 20)
qnorm_arr = qnorm.quantile_normalize(arr, target=target)
for val in target:
assert (
val in qnorm_arr[:, 0] and val in qnorm_arr[:, 1]
), f"value {val} not in qnorm array"
def test_007_target_notsorted(self):
"""
make sure an unsorted target gets sorted first
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
np.random.shuffle(arr)
# take the reverse, which should be sorted by qnorm
target = np.arange(10, 20)[::-1]
qnorm_arr = qnorm.quantile_normalize(arr, target=target)
for val in target:
assert (
val in qnorm_arr[:, 0] and val in qnorm_arr[:, 1]
), f"value {val} not in qnorm array"
def test_008_short_target(self):
"""
test if an error is raised with a invalid sized target
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
target = np.arange(10, 15)
self.assertRaises(ValueError, qnorm.quantile_normalize, arr, target)
def test_009_wiki_ncpus(self):
"""
test if an error is raised with a invalid sized target
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
result = np.array(
[
[5.66666667, 5.16666667, 2.0],
[2.0, 2.0, 3.0],
[3.0, 5.16666667, 4.66666667],
[4.66666667, 3.0, 5.66666667],
]
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df, ncpus=10).values, result
)
def test_010_axis_numpy(self):
"""
test numpy axis support
"""
arr = np.random.normal(size=(50, 4))
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(arr.T, axis=0).T,
qnorm.quantile_normalize(arr, axis=1),
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(arr, axis=1),
qnorm.quantile_normalize(arr.T, axis=0).T,
)
def test_011_axis_pandas(self):
"""
test numpy axis support
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df.T, axis=0).T,
qnorm.quantile_normalize(df, axis=1),
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df, axis=1),
qnorm.quantile_normalize(df.T, axis=0).T,
)
def test_012_from_csv(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.csv", "test_out.csv")
df1 = pd.read_csv("test.csv", index_col=0, header=0)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_013_from_csv_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv", "test_out.csv", rowchunksize=rowchunksize
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_014_from_csv_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv", "test_out.csv", colchunksize=colchunksize
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_015_from_csv_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv",
"test_out.csv",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_016_from_csv_largefile(self):
"""
test whether or not incremental_quantile_normalize works with a larger
random file
"""
np.random.seed(42)
df1 = pd.DataFrame(index=range(5000), columns=range(100))
df1[:] = np.random.randint(0, 100, size=df1.shape)
df1.to_csv("test_large.csv")
qnorm.incremental_quantile_normalize(
"test_large.csv",
"test_large_out.csv",
rowchunksize=11,
colchunksize=11,
)
df2 = | pd.read_csv("test_large_out.csv", index_col=0, header=0) | pandas.read_csv |
import time
import logging
import asyncio
import pandas as pd
from collections import defaultdict
from github import Github
from github.GithubException import RateLimitExceededException, UnknownObjectException
from ghutil import get_tokens, get_issues_in_text
async def get_issue(gh: Github, repo: str, number: int) -> dict or None:
for i in range(0, 3): # Max retry 3 times
try:
logging.info("Collecting {} issue {}, remaining rate {}, reset at {}".format(
repo, number, gh.rate_limiting, gh.rate_limiting_resettime))
issue = gh.get_repo(repo).get_issue(number)
data_type = "issue"
link = "https://github.com/{}/issues/{}".format(repo, issue.number)
if issue.pull_request is not None:
data_type = "pull request"
link = "https://github.com/{}/pull/{}".format(
repo, issue.number)
text = "{} - {} ({}) at {}\n\n{}\n\n".format(
issue.title, issue.user.name, issue.user.email, issue.created_at, issue.body)
for comment in issue.get_comments():
text += "{} ({}) at {}: {}\n\n".format(
comment.user.name, comment.user.email, comment.created_at, comment.body)
return {
"type": data_type,
"link": link,
"text": text,
}
except RateLimitExceededException as ex:
logging.error("{}: {}".format(type(ex), ex))
sleep_time = gh.rate_limiting_resettime - time.time() + 10
logging.info(" wait for {} seconds...".format(sleep_time))
time.sleep(max(1.0, sleep_time))
except UnknownObjectException as ex:
logging.error("{}: {}".format(type(ex), ex))
break
except Exception as ex:
logging.error("{}: {}".format(type(ex), ex))
time.sleep(5)
return None
async def get_coding_for_commits_and_prs():
coding_data = []
logging.info("Collecting commit coding data...")
migrations = pd.read_excel("data/migrations.xlsx")
commits = set()
for idx, row in migrations.iterrows():
if row["startCommit"] not in commits:
coding_data.append({
"type": "commit",
"link": "https://github.com/{}/commit/{}".format(row["repoName"].replace("_", "/"), row["startCommit"]),
"text": row["startCommitMessage"]
})
commits.add(row["startCommit"])
if row["endCommit"] not in commits:
coding_data.append({
"type": "commit",
"link": "https://github.com/{}/commit/{}".format(row["repoName"].replace("_", "/"), row["endCommit"]),
"text": row["endCommitMessage"]
})
commits.add(row["endCommit"])
logging.info("Collecting issue/PR coding data...")
issues = | pd.read_excel("data/prs.xlsx") | pandas.read_excel |
import threading
import time
import datetime
import pandas as pd
from functools import reduce, wraps
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import zscore
import model.queries as qrs
from model.NodesMetaData import NodesMetaData
import utils.helpers as hp
from utils.helpers import timer
import parquet_creation as pcr
import glob
import os
import dask
import dask.dataframe as dd
class Singleton(type):
def __init__(cls, name, bases, attibutes):
cls._dict = {}
cls._registered = []
def __call__(cls, dateFrom=None, dateTo=None, *args):
print('* OBJECT DICT ', len(cls._dict), cls._dict)
if (dateFrom is None) or (dateTo is None):
defaultDT = hp.defaultTimeRange()
dateFrom = defaultDT[0]
dateTo = defaultDT[1]
if (dateFrom, dateTo) in cls._dict:
print('** OBJECT EXISTS', cls, dateFrom, dateTo)
instance = cls._dict[(dateFrom, dateTo)]
else:
print('** OBJECT DOES NOT EXIST', cls, dateFrom, dateTo)
if (len(cls._dict) > 0) and ([dateFrom, dateTo] != cls._registered):
print('*** provide the latest and start thread', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
refresh = threading.Thread(target=cls.nextPeriodData, args=(dateFrom, dateTo, *args))
refresh.start()
elif ([dateFrom, dateTo] == cls._registered):
print('*** provide the latest', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
elif (len(cls._dict) == 0):
print('*** no data yet, refresh and wait', cls, dateFrom, dateTo)
cls.nextPeriodData(dateFrom, dateTo, *args)
instance = cls._dict[(dateFrom, dateTo)]
# keep only a few objects in memory
if len(cls._dict) >= 2:
cls._dict.pop(list(cls._dict.keys())[0])
return instance
def nextPeriodData(cls, dateFrom, dateTo, *args):
print(f'**** thread started for {cls}')
cls._registered = [dateFrom, dateTo]
instance = super().__call__(dateFrom, dateTo, *args)
cls._dict[(dateFrom, dateTo)] = instance
print(f'**** thread finished for {cls}')
class Updater(object):
def __init__(self):
self.StartThread()
@timer
def UpdateAllData(self):
print()
print(f'{datetime.now()} New data is on its way at {datetime.utcnow()}')
print('Active threads:',threading.active_count())
# query period must be the same for all data loaders
defaultDT = hp.defaultTimeRange()
GeneralDataLoader(defaultDT[0], defaultDT[1])
SiteDataLoader(defaultDT[0], defaultDT[1])
PrtoblematicPairsDataLoader(defaultDT[0], defaultDT[1])
SitesRanksDataLoader(defaultDT[0], defaultDT[1])
self.lastUpdated = hp.roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.UpdateAllData) # 1hour
thread.daemon = True
thread.start()
class ParquetUpdater(object):
def __init__(self):
self.StartThread()
@timer
def Update(self):
print('Starting Parquet Updater')
limit = pcr.limit
indices = pcr.indices
files = glob.glob('..\parquet\*')
print('files',files)
file_end = str(int(limit*24))
print('end of file trigger',file_end)
for f in files:
if f.endswith(file_end):
os.remove(f)
files = glob.glob('..\parquet\*')
print('files2',files)
for idx in indices:
j=int((limit*24)-1)
print('idx',idx,'j',j)
for f in files[::-1]:
file_end = str(idx)
end = file_end+str(j)
print('f',f,'end',end)
if f.endswith(end):
new_name = file_end+str(j+1)
head = '..\parquet\\'
final = head+new_name
print('f',f,'final',final)
os.rename(f,final)
j -= 1
jobs = []
limit = 1/24
timerange = pcr.queryrange(limit)
for idx in indices:
thread = threading.Thread(target=pcr.btwfunc,args=(idx,timerange))
jobs.append(thread)
for j in jobs:
j.start()
for j in jobs:
j.join()
# print('Finished Querying')
for idx in indices:
filenames = pcr.ReadParquet(idx,limit)
if idx == 'ps_packetloss':
print(filenames)
plsdf = dd.read_parquet(filenames).compute()
print('Before drops',len(plsdf))
plsdf = plsdf.drop_duplicates()
print('After Drops',len(plsdf))
print('packetloss\n',plsdf)
if idx == 'ps_owd':
owddf = dd.read_parquet(filenames).compute()
print('owd\n',owddf)
if idx == 'ps_retransmits':
rtmdf = dd.read_parquet(filenames).compute()
print('retransmits\n',rtmdf)
if idx == 'ps_throughput':
trpdf = dd.read_parquet(filenames).compute()
print('throughput\n',trpdf)
print('dask df complete')
self.lastUpdated = hp.roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.Update) # 1hour
thread.daemon = True
thread.start()
class GeneralDataLoader(object, metaclass=Singleton):
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.lastUpdated = None
self.pls = pd.DataFrame()
self.owd = pd.DataFrame()
self.thp = pd.DataFrame()
self.rtm = pd.DataFrame()
self.UpdateGeneralInfo()
@property
def dateFrom(self):
return self._dateFrom
@dateFrom.setter
def dateFrom(self, value):
self._dateFrom = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def dateTo(self):
return self._dateTo
@dateTo.setter
def dateTo(self, value):
self._dateTo = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def lastUpdated(self):
return self._lastUpdated
@lastUpdated.setter
def lastUpdated(self, value):
self._lastUpdated = value
@timer
def UpdateGeneralInfo(self):
# print("last updated: {0}, new start: {1} new end: {2} ".format(self.lastUpdated, self.dateFrom, self.dateTo))
self.pls = NodesMetaData('ps_packetloss', self.dateFrom, self.dateTo).df
self.owd = NodesMetaData('ps_owd', self.dateFrom, self.dateTo).df
self.thp = NodesMetaData('ps_throughput', self.dateFrom, self.dateTo).df
self.rtm = NodesMetaData('ps_retransmits', self.dateFrom, self.dateTo).df
self.latency_df = pd.merge(self.pls, self.owd, how='outer')
self.throughput_df = | pd.merge(self.thp, self.rtm, how='outer') | pandas.merge |
'''
This class uses scikit-learn to vectorize a corpus of text and
allow comparison of new documents to the existing corpus matrix
'''
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
class CosineMatcher(object):
def __init__(self, encoding='utf-8', analyzer='word', ngram_range=(1,1), \
min_df = 1, max_df = 0.8, use_idf=True):
'''
Defaults
encoding=utf-8
min_df = 1 => only include token if it appears in at least 1 document
max_df = 0.8 => drop the token if it appears in over 80pc of the docs
We aren't using TfidfVectorizer's built-in tokenizer and stop/stem
functionality because we have chosen to pre-process that text and will
be running other types of matching on the stop/stemmed text.
This code assumes that you have already processed the corpus.
'''
self.match_corpus = None
self.matrix = None
self.encoding = encoding
self.vectorizer = TfidfVectorizer(encoding=encoding, analyzer=analyzer,\
ngram_range=ngram_range, min_df=min_df, max_df=max_df,\
use_idf=use_idf)
def train(self, corpus, train_on='training_col'):
'''
Fit the training corpus to the TF-IDF Vectorizer.
corpus: Path to CSV file containing the training corpus.
train_on: Name of the column in the CSV.
'''
df_corpus = pd.read_csv(corpus, encoding=self.encoding)
self.match_corpus = df_corpus[
df_corpus[train_on].isnull()==False].reset_index()
training_corpus = self.match_corpus[train_on].values
self.matrix = self.vectorizer.fit_transform(training_corpus)
def check_matches(self, target, n_best):
'''
target is a string
n_best is the number of matches we want returned
Transforms target query into vector form
Calculates dot product across tfidf matrix
Returns a list of the n_best matches for the target
'''
if not isinstance(target, unicode) and np.isnan(target):
target = ''
vectorized_query = self.vectorizer.transform([target])
cosine_sim = linear_kernel(vectorized_query, self.matrix).flatten()
n_best_matches_indices = cosine_sim.argsort()[:-n_best-1:-1]
best_matches = pd.DataFrame()
for index in n_best_matches_indices:
match_values = self.match_corpus.ix[index]
score = cosine_sim[index]
match_values['score'] = '{:.2f}'.format(score*100)
best_matches = best_matches.append(match_values)
best_matches = best_matches.where(( | pd.notnull(best_matches) | pandas.notnull |
import loader
import numpy as np
import pandas as pd
from pathlib import Path
from definitions import *
def get_features():
def generate_user_features(df):
print('Generate user features')
if Path(name__features_user).exists():
print('- {} is existed already. Let\'s load it'.format(name__features_user))
return pd.read_pickle(name__features_user)
print('- {} is not existed. Let\'s generate it'.format(name__features_user))
# Step #1. Grouped by USER_ID_1, USER_ID_2, USER_ID_3
usr1 = df.groupby([df.USER_ID_1, df.USER_ID_2, df.USER_ID_3]).agg({'ORDERED': 'sum',
'SESSION_CNT': ['sum', 'mean']
})
usr1.columns = usr1.columns.droplevel(0)
usr1.columns = ['USR_ORDERED_SUM',
'USR_SESSION_CNT_SUM', 'USR_SESSION_CNT_MEAN'
]
usr1.reset_index(inplace=True)
# Step #2. Grouped by USER_ID_1, USER_ID_2
usr2 = df.groupby([df.USER_ID_1, df.USER_ID_2]).agg({'TENDENCY_1': ['sum', 'mean'],
'TENDENCY_2': ['sum', 'mean'],
'TENDENCY_3': ['sum', 'mean'],
'TENDENCY_4': ['sum', 'mean'],
'TENDENCY_5': ['sum', 'mean'],
'TENDENCY_6': ['sum', 'mean'],
'TENDENCY_7': ['sum', 'mean'],
'TENDENCY_8': ['sum', 'mean'],
'TENDENCY_9': ['sum', 'mean'],
'TENDENCY_10': ['sum', 'mean'],
'TENDENCY_11': ['sum', 'mean'],
'TENDENCY_12': ['sum', 'mean'],
'TENDENCY_13': ['sum', 'mean'],
'TENDENCY_14': ['sum', 'mean'],
'TENDENCY_15': ['sum', 'mean'],
'TENDENCY_16': ['sum', 'mean']})
usr2.columns = usr2.columns.droplevel(0)
usr2.columns = ['USR_TENDENCY_1_SUM', 'USR_TENDENCY_1_MEAN',
'USR_TENDENCY_2_SUM', 'USR_TENDENCY_2_MEAN',
'USR_TENDENCY_3_SUM', 'USR_TENDENCY_3_MEAN',
'USR_TENDENCY_4_SUM', 'USR_TENDENCY_4_MEAN',
'USR_TENDENCY_5_SUM', 'USR_TENDENCY_5_MEAN',
'USR_TENDENCY_6_SUM', 'USR_TENDENCY_6_MEAN',
'USR_TENDENCY_7_SUM', 'USR_TENDENCY_7_MEAN',
'USR_TENDENCY_8_SUM', 'USR_TENDENCY_8_MEAN',
'USR_TENDENCY_9_SUM', 'USR_TENDENCY_9_MEAN',
'USR_TENDENCY_10_SUM', 'USR_TENDENCY_10_MEAN',
'USR_TENDENCY_11_SUM', 'USR_TENDENCY_11_MEAN',
'USR_TENDENCY_12_SUM', 'USR_TENDENCY_12_MEAN',
'USR_TENDENCY_13_SUM', 'USR_TENDENCY_13_MEAN',
'USR_TENDENCY_14_SUM', 'USR_TENDENCY_14_MEAN',
'USR_TENDENCY_15_SUM', 'USR_TENDENCY_15_MEAN',
'USR_TENDENCY_16_SUM', 'USR_TENDENCY_16_MEAN']
usr2.reset_index(inplace=True)
# Step #3. Merged usr1 with usr2
usr = usr1.merge(usr2, on=['USER_ID_1', 'USER_ID_2'])
print('- Saving...')
usr.to_pickle(name__features_user)
print('- Saved {}'.format(name__features_user))
return usr
def generate_product_features(df):
print('Generate product features')
if Path(name__features_product).exists():
print('- {} is existed already. Let\'s load it'.format(name__features_product))
return pd.read_pickle(name__features_product)
print('- {} is not existed. Let\'s generate it'.format(name__features_product))
# Grouped by PRODUCT_ID
prd = df.groupby([df.PRODUCT_ID]).agg({'ORDERED' : 'sum',
# 'LAST_EPISODE': ['sum', 'mean'],
# 'START_DATE': ['sum', 'mean'],
# 'TOTAL_EPISODE_CNT': ['sum', 'mean']
})
# prd.columns = prd.columns.droplevel(0)
prd.columns = ['PRD_ORDERED_SUM',
# 'PRD_LAST_EPISODE_SUM', 'PRD_LAST_EPISODE_MEAN',
# 'PRD_START_DATE_SUM', 'PRD_START_DATE_MEAN',
# 'PRD_TOTAL_EPISODE_CNT_SUM', 'PRD_TOTAL_EPISODE_CNT_MEAN'
]
prd.reset_index(inplace=True)
print('- Saving...')
prd.to_pickle(name__features_product)
print('- Saved {}'.format(name__features_product))
return prd
def generate_user_product_features(df):
print('Generate user_product features')
if Path(name__features_user_product).exists():
print('- {} is existed already. Let\'s load it'.format(name__features_user_product))
return pd.read_pickle(name__features_user_product)
print('- {} is not existed. Let\'s generate it'.format(name__features_user_product))
# Grouped by USER_ID_1, USER_ID_2, USER_ID_3, PRODUCT_ID
usr_prd = df.groupby([df.USER_ID_1, df.USER_ID_2, df.USER_ID_3, df.PRODUCT_ID])\
.agg({'USER_ID_1': 'size',
'ORDERED': 'sum'})
# usr_prd.columns = usr_prd.columns.droplevel(0)
usr_prd.columns = ['UP_VIEW_CNT', 'UP_ORDERED_SUM']
usr_prd['UP_ORDERED_RATIO'] = pd.Series(usr_prd.UP_ORDERED_SUM / usr_prd.UP_VIEW_CNT).astype(np.float32)
usr_prd.reset_index(inplace=True)
print('- Saving...')
usr_prd.to_pickle(name__features_user_product)
print('- Saved {}'.format(name__features_user_product))
return usr_prd
def generate_features(dtrain, dtest):
# We do not use user_features because they took my cv score down!
# usr = generate_user_features(dtrain)
prd = generate_product_features(dtrain)
usr_prd = generate_user_product_features(dtrain)
# Merge usr_prd with original data
dtrain = dtrain.merge(usr_prd, on=['USER_ID_1', 'USER_ID_2', 'USER_ID_3', 'PRODUCT_ID'], how='left') \
.merge(prd, on=['PRODUCT_ID'], how='left')
dtest = dtest.merge(usr_prd, on=['USER_ID_1', 'USER_ID_2', 'USER_ID_3', 'PRODUCT_ID'], how='left') \
.merge(prd, on=['PRODUCT_ID'], how='left')
# Concatenate USER_ID
dtrain['USER_ID'] = dtrain[['USER_ID_1', 'USER_ID_2', 'USER_ID_3']].apply(
lambda x: '{}_{}_{}'.format(x[0], x[1], x[2]), axis=1)
dtrain.drop(['USER_ID_1', 'USER_ID_2', 'USER_ID_3'], axis=1, inplace=True)
dtest['USER_ID'] = dtest[['USER_ID_1', 'USER_ID_2', 'USER_ID_3']].apply(
lambda x: '{}_{}_{}'.format(x[0], x[1], x[2]), axis=1)
dtest.drop(['USER_ID_1', 'USER_ID_2', 'USER_ID_3'], axis=1, inplace=True)
# Add combined features
dtrain['UP_PRD_ORDERED_RATIO'] = (dtrain.UP_ORDERED_SUM / dtrain.PRD_ORDERED_SUM).astype(np.float32)
dtest['UP_PRD_ORDERED_RATIO'] = (dtest.UP_ORDERED_SUM / dtest.PRD_ORDERED_SUM).astype(np.float32)
# Remove some 'less important' initial features
drop_column_list = ['BUY_PRODUCT_31', 'BUY_PRODUCT_47', 'BUY_PRODUCT_54',
'BUY_PRODUCT_63', 'BUY_PRODUCT_64', 'BUY_PRODUCT_69',
'BUY_PRODUCT_78', 'BUY_PRODUCT_85', 'BUY_PRODUCT_91', 'BUY_PRODUCT_97',
'BUY_PRODUCT_58',
'SCHEDULE_4', 'SCHEDULE_10',
'GENRE_5', 'GENRE_7', 'GENRE_10', 'GENRE_11', 'GENRE_12', 'GENRE_13', 'GENRE_17', 'GENRE_18',
'TAG_3', 'TAG_4', 'TAG_5',
'TENDENCY_9']
dtrain.drop(drop_column_list, axis=1, inplace=True)
dtest.drop(drop_column_list, axis=1, inplace=True)
print('Train Features {}: [{}]'.format(dtrain.shape, ', '.join(dtrain.columns)))
print('Test Features {}: [{}]'.format(dtest.shape, ', '.join(dtest.columns)))
return dtrain, dtest
if Path(name__features_train).exists() and Path(name__features_test).exists():
print('{} {} are existed already. Let\'s load it'.format(name__features_train, name__features_test))
return pd.read_pickle(name__features_train), | pd.read_pickle(name__features_test) | pandas.read_pickle |
import pandas as pd
data_av_week = pd.read_csv("data_av_week.csv")
supermarkt_urls = pd.read_csv("supermarkt_urls.csv")
s_details = pd.read_csv("notebooksdetailed_supermarkt_python_mined.csv", header= None)
migros_details = pd.read_csv("notebooksdetailed_Migros_python_mined.csv", header= None)
coop_details = pd.read_csv("notebooksdetailed_Coop_python_mined.csv", header= None)
data_av_week = data_av_week.drop(["Unnamed: 0"], axis=1)
data_av_week = data_av_week.rename({'url':'urls'}, axis=1)
head = ["name_supermarkt", "address", "lat", "long", "key_words", "codes", "postal_code", "address2", "url2"]
s_details.columns = head
s_details = s_details.drop(columns=['address2'])
migros_details.columns = head
migros_details = migros_details.drop(columns=['address2'])
coop_details.columns = head
coop_details = coop_details.drop(columns=['address2'])
# merge the supermarkt data
supermarkt_details = pd.merge(s_details, migros_details, how="outer")
supermarkt_details = pd.merge(supermarkt_details, coop_details, how="outer")
data_week_urls = pd.merge(supermarkt_urls, data_av_week, how="outer", on="urls")
data_names_week_all = pd.merge(supermarkt_details, data_week_urls, how="outer", on="codes")
data_names_week_all.to_csv("all_data_per_week.csv", index=False)
# Per day
data_av_day = | pd.read_csv("data_av_day.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import pickle
import pyranges as pr
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC')]
##your path to the files directory
file_path = cwd / 'files/'
usecols = ['Hugo_Symbol', 'Chromosome', 'Start_position', 'End_position', 'Variant_Classification', 'Variant_Type', 'Reference_Allele', 'Tumor_Seq_Allele2', 'i_VAF', 'Tumor_Sample_Barcode', 'Donor_ID']
##from: https://dcc.icgc.org/releases/PCAWG/consensus_snv_indel
pcawg_maf = pd.read_csv(file_path / 'final_consensus_passonly.snv_mnv_indel.icgc.public.maf', sep='\t',
usecols=usecols,
low_memory=False)
##from: https://dcc.icgc.org/releases/PCAWG/donors_and_biospecimens
pcawg_sample_table = pd.read_csv(file_path / 'pcawg_sample_sheet.tsv', sep='\t', low_memory=False)
##limit samples to what's in the maf
pcawg_sample_table = pcawg_sample_table.loc[pcawg_sample_table['aliquot_id'].isin(pcawg_maf['Tumor_Sample_Barcode'].unique())]
pcawg_sample_table.drop_duplicates(['icgc_donor_id'], inplace=True)
pcawg_sample_table = pcawg_sample_table.loc[pcawg_sample_table['dcc_specimen_type'] != 'Cell line - derived from tumour']
##from: https://dcc.icgc.org/releases/current/Summary
pcawg_donor_table = pd.read_csv(file_path / 'donor.all_projects.tsv', sep='\t', low_memory=False)
pcawg_sample_table = pd.merge(pcawg_sample_table, pcawg_donor_table, how='left', on='icgc_donor_id')
##limit MAF to unique samples
pcawg_maf = pcawg_maf.loc[pcawg_maf['Tumor_Sample_Barcode'].isin(pcawg_sample_table['aliquot_id'])]
# df of counts via groupby, could add other metrics derived from mc maf here
non_syn = ['Missense_Mutation', 'Nonsense_Mutation', 'Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Nonstop_Mutation']
pcawg_counts = pcawg_maf[['Variant_Classification', 'Tumor_Sample_Barcode']].groupby('Tumor_Sample_Barcode').apply(lambda x: pd.Series([len(x), (x['Variant_Classification'].isin(non_syn)).sum()], index=['all_counts', 'non_syn_counts']))
pcawg_counts['non_syn_tmb'] = pcawg_counts['non_syn_counts'] / 31.85
pcawg_counts.reset_index(inplace=True)
# join to clinical annotation for data in mc3 only, this will add Tumor_Sample_Barcode also to the tcga_sample_table
pcawg_sample_table = pd.merge(pcawg_sample_table, pcawg_counts, how='right', left_on='aliquot_id', right_on='Tumor_Sample_Barcode')
##sample table is done, save to file
pickle.dump(pcawg_sample_table, open(file_path / 'pcawg_sample_table.pkl', 'wb'))
chromosomes = {}
for i in list(range(1, 23))+['X', 'Y']:
with open(file_path / 'chromosomes' / ('chr' + str(i) + '.txt')) as f:
chromosomes[str(i)] = f.read()
##Use GFF3 to annotate variants
##ftp://ftp.ensembl.org/pub/grch37/current/gff3/homo_sapiens/
gff = pd.read_csv(file_path / 'Homo_sapiens.GRCh37.87.gff3',
sep='\t',
names=['chr', 'unknown', 'gene_part', 'start', 'end', 'unknown2', 'strand', 'unknown3', 'gene_info'],
usecols=['chr','gene_part', 'start', 'end', 'gene_info'],
low_memory=False)
gff_cds_pr = pr.PyRanges(gff.loc[(gff['gene_part'] == 'CDS') & gff['chr'].isin(chromosomes), ['chr', 'start', 'end', 'gene_info']].astype({'start': int, 'end': int}).rename(columns={'chr': 'Chromosome', 'start': 'Start', 'end': 'End'})).merge()
gff_exon_pr = pr.PyRanges(gff.loc[(gff['gene_part'] == 'exon') & gff['chr'].isin(chromosomes), ['chr', 'start', 'end', 'gene_info']].astype({'start': int, 'end': int}).rename(columns={'chr': 'Chromosome', 'start': 'Start', 'end': 'End'})).merge()
del gff
##make index column for merging
pcawg_maf['index'] = pcawg_maf.index.values
maf_pr = pr.PyRanges(pcawg_maf.loc[:, ['Chromosome', 'Start_position', 'End_position', 'index']].rename(columns={'Start_position': 'Start', 'End_position': 'End'}))
##use the genie 7.0 panels: https://www.synapse.org/#!Synapse:syn21551261
genie = pd.read_csv(file_path / 'genomic_information.txt', sep='\t', low_memory=False)
panels = genie.SEQ_ASSAY_ID.unique()
panel_df = pd.DataFrame(data=panels, columns=['Panel'])
repeats = pd.read_csv(file_path / 'simpleRepeat.txt', sep='\t', low_memory=False, header=None, usecols=[1, 2, 3])
repeats[1] = repeats[1].str.replace('chr', '')
repeats.rename(columns={1: 'Chromosome', 2: 'Start', 3: 'End'}, inplace=True)
repeats_pr = pr.PyRanges(repeats.loc[repeats['Chromosome'].isin(chromosomes)]).merge()
total_sizes = []
cds_sizes = []
exon_sizes = []
panel_prs = []
for panel in panels:
print(panel)
panel_pr = pr.PyRanges(genie.loc[(genie['SEQ_ASSAY_ID'] == panel) & genie['Chromosome'].isin(chromosomes), 'Chromosome':'End_Position'].rename(columns={'Start_Position': 'Start', 'End_Position': 'End'})).merge()
total_sizes.append(sum([i + 1 for i in panel_pr.lengths()]))
cds_sizes.append(sum([i + 1 for i in panel_pr.intersect(gff_cds_pr).lengths()]))
exon_sizes.append(sum([i + 1 for i in panel_pr.intersect(gff_exon_pr).lengths()]))
panel_prs.append(panel_pr)
grs = {k: v for k, v in zip(['repeat', 'CDS', 'exon'] + list(panels), [repeats_pr, gff_cds_pr, gff_exon_pr] + panel_prs)}
result = pr.count_overlaps(grs, pr.concat({'maf': maf_pr}.values()))
result = result.df
pcawg_maf = | pd.merge(pcawg_maf, result.iloc[:, 3:], how='left', on='index') | pandas.merge |
import argparse
import pandas as pd
import numpy as np
import sys
p = str(Path(__file__).resolve().parents[2]) # directory two levels up from this file
sys.path.append(p)
from realism.realism_utils import make_orderbook_for_analysis
def create_orderbooks(exchange_path, ob_path):
MID_PRICE_CUTOFF = 10000
processed_orderbook = make_orderbook_for_analysis(exchange_path, ob_path, num_levels=1,
hide_liquidity_collapse=False)
cleaned_orderbook = processed_orderbook[(processed_orderbook['MID_PRICE'] > - MID_PRICE_CUTOFF) &
(processed_orderbook['MID_PRICE'] < MID_PRICE_CUTOFF)]
transacted_orders = cleaned_orderbook.loc[cleaned_orderbook.TYPE == "ORDER_EXECUTED"]
transacted_orders = transacted_orders.reset_index()
transacted_orders = transacted_orders.sort_values(by=['index', 'ORDER_ID']).iloc[1::2]
transacted_orders.set_index('index', inplace=True)
return processed_orderbook, transacted_orders, cleaned_orderbook
def calculate_market_impact(orders_df, ob_df, start_time, end_time, tao):
def create_bins(tao, start_time, end_time, orders_df, is_buy):
bins = pd.interval_range(start=start_time, end=end_time, freq=pd.DateOffset(seconds=tao))
binned = pd.cut(orders_df.loc[orders_df.BUY_SELL_FLAG == is_buy].index, bins=bins)
binned_volume = orders_df.loc[orders_df.BUY_SELL_FLAG == is_buy].groupby(binned).SIZE.agg(np.sum)
return binned_volume
def calculate_mid_move(row):
try:
t_start = row.name.left
t_end = row.name.right
mid_t_start = mid_resampled.loc[mid_resampled.index == t_start].item()
mid_t_end = mid_resampled.loc[mid_resampled.index == t_end].item()
if row.ti < 0:
row.mi = -1 * ((mid_t_end - mid_t_start) / mid_t_start) * 10000 # bps
else:
row.mi = (mid_t_end - mid_t_start) / mid_t_start * 10000 # bps
return row.mi
except:
pass
ob_df = ob_df.reset_index().drop_duplicates(subset='index', keep='last').set_index('index')
mid = ob_df.MID_PRICE
mid_resampled = mid.resample(f'{tao}s').ffill()
binned_buy_volume = create_bins(tao=int(tao), start_time=start_time, end_time=end_time, orders_df=orders_df,
is_buy=True).fillna(0)
binned_sell_volume = create_bins(tao=int(tao), start_time=start_time, end_time=end_time, orders_df=orders_df,
is_buy=False).fillna(0)
midf = pd.DataFrame()
midf['buy_vol'] = binned_buy_volume
midf['sell_vol'] = binned_sell_volume
midf['ti'] = midf['buy_vol'] - midf['sell_vol'] # Trade Imbalance
midf['pov'] = abs(midf['ti']) / (midf['buy_vol'] + midf['sell_vol']) # Participation of Volume in tao
midf['mi'] = None
midf.index = pd.interval_range(start=start_time, end=end_time, freq=pd.DateOffset(seconds=int(tao)))
midf.mi = midf.apply(calculate_mid_move, axis=1)
pov_bins = np.linspace(start=0, stop=1, num=1000, endpoint=False)
pov_binned = pd.cut(x=midf['pov'], bins=pov_bins)
midf['pov_bins'] = pov_binned
midf_gpd = midf.sort_values(by='pov_bins')
midf_gpd.index = midf_gpd.pov_bins
del midf_gpd['pov_bins']
df = pd.DataFrame(index=midf_gpd.index)
df['mi'] = midf_gpd['mi']
df['pov'] = midf_gpd['pov']
df = df.groupby(df.index).mean()
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Market Impact Curve as described in AlmgrenChriss 05 paper')
parser.add_argument('--stock', default=None, required=True, help='stock (ABM)')
parser.add_argument('--date', default=None, required=True, help='date (20200101)')
parser.add_argument('--log', type=str, default=None, required=True, help='log folder')
parser.add_argument('--tao', type=int, required=True, help='Number of seconds in each bin')
args, remaining_args = parser.parse_known_args()
stock = args.stock
date = args.date
start_time = pd.Timestamp(date) + pd.to_timedelta('09:30:00')
end_time = | pd.Timestamp(date) | pandas.Timestamp |
"""Summarize upstream catchment information.
Description
----------
Module that helps support network summarization of
information from "local" segments of the network.
Methods require information pre-summarized to local segments.
Methods currently support calculations for sum, min, max and
weighted average.
To help process information in the most efficient manner the
NetworkCalc class should be used. This seperates segments into
three bins 1)segments with no parents, 2)segments with one parent
and 3)segments with multiple parents. Segments in bins 1 and 2
are processed all together to minimize unneeded queries. Each
segment in 3 is processed individually.
"""
# import needed packages
import pandas as pd
import sys
from multiprocessing import Pool
from functools import partial
from xstrm import build_network
def get_local_vars_csv(file_name,
indx_df,
id_col_name,
weight_col_name=None,
drop_cols=[]):
"""Import and format CSV file with local segment data.
Description
----------
Imports CSV file into Pandas that contains summaries of information
at the local stream segment level. This file should also contain the
stream segment identifier and stream_segment weight (if applicable).
Examples of weights include area for area weighted average or length
for length weighted average.) If weight is not required use default.
See file 'tests/test_local_data.csv' as example.
Parameters
----------
file_name: str
String representation of file name including directory and extension
e.g. 'data/my_local_data.csv'
indx_df: df
Pandas dataframe that relates the temporary index ids ('xstrm_id') with
user provided id ('id_col_name').
id_col_name: str
String representation of the column name for the identifier column.
Values in this column can represent str or num.
weight_col_name: str
String representation of the column name for the column containing
weights for network weighted averages. This field is optional,
and as default all segments have equal weights.
Values should be int or float.
drop_cols: list
List of comma separated strings representing column names that
should not be processes during network_calc operations.
Returns
----------
df: df
Pandas dataframe formatted for and intended for use in
network_calc methods. Contains xstrm_id, seg_weight and
variables to be summarized.
"""
df = pd.read_csv(file_name, dtype={id_col_name: str})
df = get_local_vars_df(
df, indx_df, id_col_name, weight_col_name, drop_cols
)
return df
def get_local_vars_df(df,
indx_df,
id_col_name,
weight_col_name=None,
drop_cols=[]):
"""Format Pandas df with local segment data for use of network_calc methods.
Description
----------
Formats a dataframe of data for local stream segments to be used by
network_calc functions. This dataframe should contain the
stream segment identifier and stream_segment weight (e.g. area for
area weighted average or length for length weighted average.) If weight
is not required use default which will evenly weight segments.
Parameters
----------
df: df
Dataframe containing data for each local stream segment. Multiple
columns of data can be included.
indx_df: df
Pandas dataframe that relates the temporary index ids ('xstrm_id') with
user provided id ('id_col_name').
id_col_name: str
String representation of the column name for the identifier column.
Values in this column can represent str or num.
weight_col_name: str
String representation of the column name for the column containing
weights for network weighted averages. This field is optional,
and as default all segments have equal weights.
Values should be int or float.
drop_cols: list
List of comma separated strings representing column names that
should not be processes during network_calc operations.
Returns
----------
df2: df
Pandas dataframe formatted for and intended for use in
network_calc methods. Contains xstrm_id, seg_weight and
variables to be summarized.
"""
field_names = {}
required_cols = drop_cols + [id_col_name]
if weight_col_name is not None:
required_cols = drop_cols + [weight_col_name]
field_names = {str(weight_col_name): "seg_weight"}
else:
df['seg_weight'] = 1
# Make sure all user supplied columns are in the database
not_included = [
n for n in required_cols if n not in df.columns.to_list()
]
if len(not_included) > 0:
m = f"At least one variable supplied ({not_included}) was not in the dataset."
sys.exit(m)
if id_col_name not in indx_df.columns.to_list():
m = "Id column name must match name used in network build."
sys.exit(m)
# Join local data to the index file (see build_network.df_transform)
df = build_network.indx_to_id(
df, indx_df, id_col_name, need="xstrm_id"
)
df = df.rename(field_names, axis="columns")
df.set_index("xstrm_id", inplace=True)
# Drop columns specified by user
df = df.drop(drop_cols, axis="columns")
df2 = df.copy()
try:
df2 = df2.astype(float)
except Exception as e:
e = "Only works with numeric local data."
sys.exit(e)
return df2
class NetworkCalc:
def __init__(self, num_proc=4):
"""Initialize summary by setting process lists.
Description
----------
Manages information associated with processing
network calculations. Lists of identifiers are used
to process information in an efficient manner using
multiprocessing and also minimizing unneeded queries
for segments with one or less parents.
Parameters
----------
num_proc: int
Number of worker processes to use in multiprocessing.
"""
self.num_proc = num_proc
self.no_parent_ids = []
self.multi_parent_ids = []
self.one_parent_ids = []
self.no_parent_df = None
self.one_parent_df = None
self.multi_parent_df = None
self.final_df = None
def add_seg(self, xstrm_id, all_parents, include_seg=True):
"""Add xstrm_id to appropriate processing list.
Description
----------
Add xstrm_id to appropriate processing list, depending on
if the segment has no parents, one parent or multiple parents.
This method is used in the build_network.
Parameters
----------
xstrm_id: str or int
Index of the stream segment of interest.
all_parents: list
List of xstrm_ids that represent parents of the xstrm_id
of interest.
include_seg: bool
True means include processing segment in parent list.
False means omit processing segment from parent list.
"""
if len(all_parents) == 0:
self.no_parent_ids.append(xstrm_id)
elif include_seg and len(all_parents) == 1:
self.one_parent_ids.append(xstrm_id)
else:
val = {"xstrm_id": xstrm_id, "parents": all_parents}
self.multi_parent_ids.append(val)
def add_hdf_seg(self, xstrm_id, all_parents, include_seg=True):
"""Add xstrm_id to processing lists as traverse network.
Description
----------
Add xstrm_id to appropriate processing list when building
network to hdf file, depending on if the segment has no
parents, one parent or multiple parents.
This method is used in the build_network.
Parameters
----------
xstrm_id: str or int
Index of the stream segment of interest.
all_parents: list
List of xstrm_ids that represent parents of the xstrm_id
of interest.
include_seg: bool
True means include processing segment in parent list.
False means omit processing segment from parent list.
"""
if len(all_parents) == 0:
self.no_parent_ids.append(xstrm_id)
elif include_seg and len(all_parents) == 1:
self.one_parent_ids.append(xstrm_id)
else:
self.multi_parent_ids.append(xstrm_id)
def add_processing_details(
self, local_df, calc_type="sum", include_missing=True, hdf_file=None
):
"""Capture processing details to processing object.
Description
----------
Capture processing details to help direct code in
the network calculation steps. User defines local data,
calculation type and if missing information should be
calculated or not.
Parameters
----------
local_df: df
Pandas dataframe containing xstrm_id, seg_weight and
variables to be summarized. Should be formatted by (or
similar to) def get_local_vars_df
calc_type: str
Options include: 'sum','min','max','weighted_avg'
See calc_* functions for more detail.
include_missing: bool
Where True summarizes percent of segment weight missing data
at the local scale. Where False does not calculate missing.
hd5_file: str
String representing hd5 file.
"""
if isinstance(local_df, pd.DataFrame):
self.local_df = local_df
else:
m = "Verify local_df is a dataframe"
sys.exit(m)
if hdf_file is not None:
self.hdf_file = hdf_file
self.include_missing = include_missing
self.set_calc_type(calc_type)
self.get_var_names()
def set_calc_type(self, calc_type='sum'):
"""Validate and format calc type of NetworkSummary Class.
Parameters
----------
calc_type: str
Options include: 'sum','min','max','weighted_avg'
See calc_* functions for more detail.
"""
options = ['sum', 'max', 'min', 'weighted_avg']
if calc_type.lower() in options:
self.calc_type = calc_type.lower()
else:
sys.exit(
"Please use a supported calc type: sum, max, min or weighted_avg"
)
def get_var_names(self, drop_vars=['xstrm_id', 'seg_weight']):
"""Get column names to process (target) and output.
Parameters
----------
drop_vars: list
List of column names not to include in calculations.
Returns
----------
target_vars: list
List of column names to perform summary calculations on.
out_vars: list
List of column names (str) expected in network_calc output.
"""
all_vars = self.local_df.columns.to_list()
target_vars = [
t for t in all_vars if t not in drop_vars
]
# names of output variables prefix n_
out_vars = [
"n_" + t for t in target_vars
]
# if missing include add output variable prefix mn_
if self.include_missing:
out_mnvars = [
"mn_" + t for t in target_vars
]
out_vars = out_vars + out_mnvars
self.target_vars = target_vars
self.out_vars = out_vars
def calc_one_parent(self):
"""Build dataframe from one parent id list.
Description
----------
Build dataframe for all segments that have one
parent, where parent == segment.
"""
if len(self.one_parent_ids) > 0:
self.one_parent_df = one_parent_to_df(
self.local_df,
self.one_parent_ids,
self.target_vars,
include_missing=self.include_missing
)
def calc_no_parent(self):
"""Build dataframe from no parent id list.
Description
----------
Build dataframe for all segments that have no
parent. This dataframe will contain all null
data. This will never contain segments when
include_seg == True.
"""
if len(self.no_parent_ids) > 0:
self.no_parent_df = no_parent_to_df(
self.no_parent_ids, self.out_vars
)
def calc_mult_parent(self):
"""Build dataframe from mult parent id list, no mp."""
seg_summaries = []
for seg in self.multi_parent_ids:
xstrm_id = seg["xstrm_id"]
parents = seg["parents"]
target_df = self.local_df[
self.local_df.index.isin(parents)
]
if self.calc_type == 'sum':
seg_summary = calc_sum(
target_df, self.target_vars, self.include_missing
)
elif self.calc_type == 'max':
seg_summary = calc_max(
target_df, self.target_vars, self.include_missing
)
elif self.calc_type == 'min':
seg_summary = calc_min(
target_df, self.target_vars, self.include_missing
)
elif self.calc_type == 'weighted_avg':
seg_summary = calc_weighted_avg(
target_df, self.target_vars, self.include_missing
)
seg_summary.update({'xstrm_id': xstrm_id})
seg_summaries.append(seg_summary)
all_summary_df = | pd.DataFrame(seg_summaries) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index= | pd.Index(['feat1', 'feat2'], name='id') | pandas.Index |
#!/usr/bin/env python3
import os
import re
from collections import defaultdict
from datetime import datetime
from robobrowser import RoboBrowser
from ccf.config import LoadSettings
import pandas as pd
browser = RoboBrowser(history=True, timeout=6000, parser="lxml")
config = LoadSettings()["KSADS"]
download_dir = config["download_dir"]
def main():
login()
download_all()
generate_snapshot_from_raw_excel_files()
def login():
browser.open("https://ksads.net/Login.aspx")
form = browser.get_form("form1")
form["txtUsername"].value = config["user"]
form["txtPassword"].value = config["password"]
browser.submit_form(form)
if browser.response.url == "https://ksads.net/Login.aspx":
raise Exception("Incorrect credentials provided")
return False
else:
print("Logged in.")
return True
def download(siteid, studytype, name):
# submit the report "type"
print('Requesting "%s" from "%s"' % (studytype, name))
browser.open("https://ksads.net/Report/OverallReport.aspx")
form = browser.get_form("form1")
form["ddlGroupName"].value = str(siteid)
form["chkUserType"].value = studytype
browser.submit_form(form, form["btnexecute"])
# request the results
form = browser.get_form("form1")
form["ddlGroupName"].value = str(siteid)
form["chkUserType"].value = studytype
browser.submit_form(form, form["btnexportexcel"])
# save results to file
timestamp = datetime.today().strftime("%Y-%m-%d")
filename = f"{download_dir}/{timestamp}/{name}-{studytype}.xlsx"
os.makedirs(os.path.dirname(filename), exist_ok=True)
if browser.response.ok:
content = browser.response.content
if content:
print("Saving file %s" % filename)
with open(filename, "wb+") as fd:
fd.write(content)
def download_all():
"""
Download the KSADS excel files. Returns the list of files downloaded.
"""
studytypes = config["forms"]
# go through ever iteration of study site/type
for studytype in studytypes:
for name, siteid in config["siteids"].items():
download(siteid, studytype, name)
print("Download complete.")
def generate_snapshot_from_raw_excel_files(timestamp=None):
if timestamp is None:
timestamp = datetime.today().strftime("%Y-%m-%d")
prefix = f"{download_dir}/{timestamp}/"
dataframes = defaultdict(list)
for filename in os.listdir(prefix):
sitename, form = re.search("([^-]+)-(.+?)\.xlsx", filename).groups()
filename = f"{prefix}/{filename}"
df = pd.read_excel(filename, parse_dates=["DateofInterview"])
dataframes[form].append(df)
new_dfs = {}
for form, dfs in dataframes.items():
df = | pd.concat(dfs, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi // delta
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# GH#19125
box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# Operations with invalid others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser / two
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('op', [operator.mul, ops.rmul])
def test_td64arr_rmul_numeric_array(self, op, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
# TODO: Make this up-casting more systematic?
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = op(vector, tdser)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser / vector
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
vector / tdser
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box, names):
# GH#19042 test for correct name attachment
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_float_series_rdiv_td64arr(self, box, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
expected = Series([tdi[n] / ser[n] for n in range(len(ser))],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="raises ValueError "
"instead of TypeError",
strict=True))
])
def test_td64arr_pow_invalid(self, scalar_td, box):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
scalar_td ** td1
with tm.assert_raises_regex(TypeError, pattern):
td1 ** scalar_td
# ------------------------------------------------------------------
@pytest.fixture(params=[pd.Float64Index(np.arange(5, dtype='float64')),
pd.Int64Index(np.arange(5, dtype='int64')),
pd.UInt64Index(np.arange(5, dtype='uint64'))],
ids=lambda x: type(x).__name__)
def idx(request):
return request.param
zeros = [box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]]
zeros.extend([np.array(0, dtype=dtype)
for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([0, 0.0, long(0)])
@pytest.fixture(params=zeros)
def zero(request):
# For testing division by (or of) zero for Index with length 5, this
# gives several scalar-zeros and length-5 vector-zeros
return request.param
class TestDivisionByZero(object):
def test_div_zero(self, zero, idx):
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx / zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') / np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_floordiv_zero(self, zero, idx):
expected = pd.Index([np.nan, np.inf, np.inf, np.inf, np.inf],
dtype=np.float64)
result = idx // zero
tm.assert_index_equal(result, expected)
ser_compat = Series(idx).astype('i8') // np.array(zero).astype('i8')
tm.assert_series_equal(ser_compat, Series(result))
def test_mod_zero(self, zero, idx):
expected = pd.Index([np.nan, np.nan, np.nan, np.nan, np.nan],
dtype=np.float64)
result = idx % zero
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import pandas as pd
import numpy as np
import copy
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_selection import mutual_info_classif, SelectKBest
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from datetime import datetime
from os import listdir
from os.path import isfile, join
import sys
import math
from sklearn.metrics import accuracy_score, f1_score
import re
from Extractor import get_word_length_matrix, get_word_length_matrix_with_interval, get_average_word_length, \
get_word_length_matrix_with_margin, get_char_count, get_digits, get_sum_digits, get_word_n_grams, \
get_char_affix_n_grams, get_char_word_n_grams, get_char_punct_n_grams, get_pos_tags_n_grams, get_bow_matrix, \
get_yules_k, get_special_char_matrix, get_function_words, get_pos_tags, get_sentence_end_start, \
get_flesch_reading_ease_vector, get_sentence_count, get_word_count
from sklearn.preprocessing import StandardScaler, Normalizer
# Chapter 7.1.1. method to trim a feature with low sum e.g. ngrams lower then 5
def trim_df_sum_feature(par_df, par_n):
par_df = par_df.fillna(value=0)
columns = par_df.columns.to_numpy()
data_array = par_df.to_numpy(dtype=float)
sum_arr = data_array.sum(axis=0)
# reduce n if 0 features would be returned
while len(par_df.columns) - len(np.where(sum_arr < par_n)[0]) == 0:
par_n -= 1
positions = list(np.where(sum_arr < par_n))
columns = np.delete(columns, positions)
data_array = np.delete(data_array, positions, axis=1)
return pd.DataFrame(data=data_array, columns=columns)
# Chapter 7.1.1. method to trim feature with low occurrence over all article
def trim_df_by_occurrence(par_df, n):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum()
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] > n:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.1. Process of filtering the data with low occurrence and save the filtered features in a new file
def filter_low_occurrence():
df_bow = pd.read_csv("daten/raw/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_occurrence(df_bow, 1)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/2_filter_low_occurrence/bow.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/raw/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_occurrence(word_n_gram, 1)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", index=False)
for n in range(2, 6):
char_affix_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_affix_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_affix_{n}_gram before: {len(char_affix_n_gram.columns)}")
char_affix_n_gram = trim_df_sum_feature(char_affix_n_gram, 5)
print(f"char_affix_{n}_gram after: {len(char_affix_n_gram.columns)}")
char_affix_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_affix_{n}_gram.csv", index=False)
char_word_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_word_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_word_{n}_gram before: {len(char_word_n_gram.columns)}")
char_word_n_gram = trim_df_sum_feature(char_word_n_gram, 5)
print(f"char_word_{n}_gram after: {len(char_word_n_gram.columns)}")
char_word_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_word_{n}_gram.csv", index=False)
char_punct_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_punct_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_punct_{n}_gram before: {len(char_punct_n_gram.columns)}")
char_punct_n_gram = trim_df_sum_feature(char_punct_n_gram, 5)
print(f"char_punct_{n}_gram after: {len(char_punct_n_gram.columns)}")
char_punct_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_punct_{n}_gram.csv", index=False)
df_f_word = pd.read_csv("daten/raw/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Words before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_occurrence(df_f_word, 1)
print(f"Function Words after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/2_filter_low_occurrence/function_words.csv", index=False)
for n in range(2, 6):
pos_tags_n_gram = pd.read_csv(f"daten/raw/pos_tag_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"pos_tag_{n}_gram before: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram = trim_df_by_occurrence(pos_tags_n_gram, 1)
print(f"pos_tag_{n}_gram after: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram.to_csv(f"daten/2_filter_low_occurrence/pos_tag_{n}_gram.csv", index=False)
# Chapter 7.1.2. method to filter words based on document frequency
def trim_df_by_doc_freq(par_df, par_doc_freq):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum() / len(par_df)
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] < par_doc_freq:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.2 Process of filtering the data with high document frequency and save the filtered features in a new file
def filter_high_document_frequency():
# Filter words with high document frequency
df_bow = pd.read_csv("daten/2_filter_low_occurrence/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/3_fiter_high_frequency/bow.csv", index=False)
df_f_word = pd.read_csv("daten/2_filter_low_occurrence/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Word before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_doc_freq(df_f_word, 0.5)
print(f"Function Word after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/3_fiter_high_frequency/function_words.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", sep=',', encoding="utf-8",
nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_doc_freq(word_n_gram, 0.5)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/3_fiter_high_frequency/word_{n}_gram.csv", index=False)
# Chapter 7.1.4. get the relative frequency based on a length metric (char, word, sentence)
def get_rel_frequency(par_df_count, par_df_len_metric_vector):
df_rel_freq = pd.DataFrame(columns=par_df_count.columns)
for index, row in par_df_count.iterrows():
df_rel_freq = df_rel_freq.append(row.div(par_df_len_metric_vector[index]))
return df_rel_freq
# Chapter 7.1.4. whole process of the chapter. Get the individual relative frequency of a feature and compare
# the correlation to the article length from the absolute and relative feature, save the feature with the estimated
# relative frequency in a new file
def individual_relative_frequency():
df_len_metrics = pd.read_csv(f"daten/1_raw/length_metrics.csv", sep=',', encoding="utf-8", nrows=2500)
# different metrics for individual relative frequencies
metrics = ['word_count', 'char_count', 'sentence_count']
for m in metrics:
# The csv is placed in a folder based on the metric for the individual relative frequency
path = f'daten/4_relative_frequency/{m}'
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
x = pd.read_csv(f"daten/4_relative_frequency/{m}/{f}",
sep=',', encoding="utf-8", nrows=2500).fillna(value=0)
x_rel = get_rel_frequency(x, df_len_metrics[m])
# Save the CSV with relative frequency
x_rel.to_csv(
f"daten/4_relative_frequency/{f.split('.')[0]}"
f"_rel.csv", index=False)
# Correlation is always between the metrics and the word_count
x['word_count'] = df_len_metrics['word_count']
x_rel['word_count'] = df_len_metrics['word_count']
# only on the test data 60/40 split
x_train, x_test = train_test_split(x, test_size=0.4, random_state=42)
x_train_rel, x_test_rel = train_test_split(x_rel, test_size=0.4, random_state=42)
# Calculate the median correlation
print(f"{f}_abs: {x_train.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
print(f"{f}_rel: {x_train_rel.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
# Chapter 7.2.1 First step of the iterative filter: Rank the features
def sort_features_by_score(par_x, par_y, par_select_metric):
# Get a sorted ranking of all features by the selected metric
selector = SelectKBest(par_select_metric, k='all')
selector.fit(par_x, par_y)
# Sort the features by their score
return pd.DataFrame(dict(feature_names=par_x.columns, scores=selector.scores_)).sort_values('scores',
ascending=False)
# Chapter 7.2.1 method to get the best percentile for GNB
def get_best_percentile_gnb(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
gnb = GaussianNB()
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# GNB Training
result_list.append(
cross_val_score(gnb, x_new_training, par_y_train, cv=cv, n_jobs=-1, scoring='accuracy').mean())
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"GNB Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 0.5% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for SVC
def get_best_percentile_svc(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
# Parameter for SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# SVC Test
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"SVC Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for KNN
def get_best_percentile_knn(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# Parameter for KNN
# Some Values from 3 to square of samples
neighbors = [i for i in range(3, int(math.sqrt(len(x_new_training.index))), 13)]
neighbors += [1, 3, 5, 11, 19, 36]
if int(math.sqrt(len(feature_list))) not in neighbors:
neighbors.append(int(math.sqrt(len(x_new_training.index))))
# Not more neighbors then samples-2
neighbors = [x for x in neighbors if x < len(x_new_training.index) - 2]
# remove duplicates
neighbors = list(set(neighbors))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN Training
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"KNN Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y >= best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 Filter the feature based on the estimated best percentile and save it into a new file
def print_filter_feature_percentile(par_path, par_df_sorted_features, par_percent, par_x, par_file_name):
# select the 1 percent of the features (len/100) multiplied by par_best_percent
number_features = round(par_percent * (len(par_x.columns) / 100))
# If the 1st percent is less then 1
number_features = 1 if number_features < 1 else number_features
feature_list = par_df_sorted_features['feature_names'][:number_features].tolist()
# print the name of the features in a file
original_stdout = sys.stdout
with open(f'{par_path}selected_features/{par_file_name}_filtered.txt', 'w', encoding="utf-8") as f:
sys.stdout = f
print(f"Features: {len(feature_list)}")
print(f"{feature_list}")
sys.stdout = original_stdout
# select the best features from the original dataset
par_x[feature_list].to_csv(f"{par_path}csv_after_filter/{par_file_name}_filtered.csv", index=False)
# Chapter 7.2.1 Complete process of the iterative Filter
def iterative_filter_process(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb, best_round_gnb, result_list_gnb = get_best_percentile_gnb(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_knn, best_round_knn, result_list_knn = get_best_percentile_knn(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_gnb, x, "gnb_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, "svc_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_knn, x, "knn_" + filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as f:
sys.stdout = f
print(f"best_perc_gnb: ({best_perc_gnb}|{result_list_gnb[best_round_gnb]})\n"
f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n"
f"best_perc_knn: ({best_perc_knn}|{result_list_knn[best_round_knn]})")
sys.stdout = original_stdout
# draw diagram
len_list = [len(result_list_gnb), len(result_list_svc), len(result_list_knn)]
plt.plot([i * step_perc for i in range(1, len(result_list_gnb) + 1)], result_list_gnb, 'r-', label="gnb")
plt.plot(best_perc_gnb, result_list_gnb[best_round_gnb], 'rx')
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.plot([i * step_perc for i in range(1, len(result_list_knn) + 1)], result_list_knn, 'b-', label="knn")
plt.plot(best_perc_knn, result_list_knn[best_round_knn], 'bx')
plt.axis([step_perc, (max(len_list) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, max(len_list) + 1)], columns=['percent'])
df_gnb = pd.DataFrame(data=result_list_gnb, columns=['gnb'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_knn = pd.DataFrame(data=result_list_knn, columns=['knn'])
df_accuracy = | pd.concat([df_percent, df_gnb, df_svc, df_knn], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
X = np.load("all_scores_mag_compo60.npy", allow_pickle=True).item()
df = | pd.DataFrame(X) | pandas.DataFrame |
"""
Module to test differing featuresets.
"""
import os
import itertools
import pandas as pd
class Ablation_Experiment:
# public
def __init__(self, config_obj, app_obj, util_obj):
self.config_obj = config_obj
self.app_obj = app_obj
self.util_obj = util_obj
def run_experiment(self, start=0, end=1000000, domain='twitter',
featuresets=['base', 'content', 'graph', 'sequential'],
clf='lr', fold=0, train_size=0.8, relations=[],
analyze_subgraphs=False, param_search='single'):
rel_dir = self.config_obj.rel_dir
out_dir = rel_dir + 'output/' + domain + '/experiments/'
self.util_obj.create_dirs(out_dir)
fold = str(fold)
fn = fold + '_abl.csv'
combos = self._create_combinations(featuresets)
# filter combos
new_combos = []
for combo in combos:
if 'sequential' in combo and 'aggregate' in combo:
new_combos.append(combo)
elif 'sequential' not in combo and 'aggregate' not in combo:
new_combos.append(combo)
combos = new_combos
print(combos)
rows = []
cols = ['featureset', 'aupr', 'auroc']
for featuresets in combos:
row = ['+'.join(featuresets)]
d = self.app_obj.run(domain=domain, start=start, end=end,
fold=fold, engine=None, clf=clf,
stacking=0, data='both',
train_size=train_size, val_size=0,
relations=relations,
featuresets=featuresets,
analyze_subgraphs=analyze_subgraphs,
param_search=param_search)
row.append(d['ind']['aupr'])
row.append(d['ind']['auroc'])
rows.append(row)
self._write_scores_to_csv(rows, cols=cols, out_dir=out_dir,
fname=fn)
# private
def _clear_data(self, domain='twitter'):
ind_dir = self.config_obj.ind_dir
rel_dir = self.config_obj.rel_dir
fold_dir = ind_dir + '/data/' + domain + '/folds/'
ind_pred_dir = ind_dir + '/output/' + domain + '/predictions/'
rel_pred_dir = rel_dir + '/output/' + domain + '/predictions/'
os.system('rm %s*.csv' % (fold_dir))
os.system('rm %s*.csv' % (ind_pred_dir))
os.system('rm %s*.csv' % (rel_pred_dir))
def _create_combinations(self, fsets):
all_sets = []
for L in range(1, len(fsets) + 1):
for combo in itertools.combinations(fsets, L):
all_sets.append(list(combo))
return all_sets
def _write_scores_to_csv(self, rows, cols=[], out_dir='',
fname='results.csv'):
df = | pd.DataFrame(rows, columns=cols) | pandas.DataFrame |
from flask import Blueprint, redirect, url_for, render_template, request, session
from src.constants.model_params import Ridge_Params, Lasso_Params, ElasticNet_Params, RandomForestRegressor_Params, \
SVR_params, AdabootRegressor_Params, \
GradientBoostRegressor_Params
from src.constants.model_params import KmeansClustering_Params, DbscanClustering_Params, AgglomerativeClustering_Params
from src.constants.model_params import LogisticRegression_Params, SVC_Params, KNeighborsClassifier_Params, \
DecisionTreeClassifier_Params, RandomForestClassifier_Params, GradientBoostingClassifier_Params, \
AdaBoostClassifier_Params
from src.constants.constants import CLASSIFICATION_MODELS, CLUSTERING_MODELS
from flask.json import jsonify
from src.constants.model_params import DecisionTreeRegressor_Params, LinearRegression_Params
from src.model.custom.classification_models import ClassificationModels
from src.model.custom.regression_models import RegressionModels
from src.model.custom.clustering_models import ClusteringModels
from src.preprocessing.preprocessing_helper import Preprocessing
from src.constants.constants import REGRESSION_MODELS
from src.utils.common.prediction_helper import make_prediction
from src.utils.databases.mysql_helper import MySqlHelper
from werkzeug.utils import secure_filename
import os
from src.utils.common.common_helper import get_param_value, load_prediction_result, load_project_model, \
read_config, save_prediction_result, save_project_model
import pandas as pd
from src.utils.common.data_helper import load_data
from src.model.auto.Auto_classification import ModelTrain_Classification
from src.model.auto.Auto_regression import ModelTrain_Regression
from src.feature_engineering.feature_engineering_helper import FeatureEngineering
from loguru import logger
from from_root import from_root
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, accuracy_score, precision_score, \
f1_score, recall_score
from src.utils.common.project_report_helper import ProjectReports
app_training = Blueprint('training', __name__)
config_args = read_config("./config.yaml")
mysql = MySqlHelper.get_connection_obj()
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
@app_training.route('/model_training/<action>', methods=['GET'])
def model_training(action):
try:
if 'pid' in session:
df = load_data()
if df is not None:
target_column = ""
if session['target_column'] is not None:
target_column = session['target_column']
target_column = session['target_column']
cols_ = [col for col in df.columns if col != target_column]
# Check data contain any categorical independent features
Categorical_columns = Preprocessing.col_seperator(df.loc[:, cols_], "Categorical_columns")
if len(Categorical_columns.columns) > 0:
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Data contain some categorical indepedent features, please perform encoding first")
"""Check If Project type is Regression or Classificaion and target Columns is not Selected"""
if session['project_type'] != 3 and session['target_column'] is None:
return redirect('/target-column')
if action == 'help':
return render_template('model_training/help.html')
elif action == 'auto_training':
logger.info('Redirect To Auto Training Page')
ProjectReports.insert_record_ml('Redirect To Auto Training Page')
if session['project_type'] == 3:
return render_template('model_training/auto_training.html',
project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Auto Training is not available for Clustering!!!")
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'])
elif action == 'custom_training' or action == 'final_train_model':
query = f""" select a.pid ProjectId , a.TargetColumn TargetName,
a.Model_Name ModelName,
b.Schedule_date,
b.schedule_time ,
a.Model_Trained,
b.train_status ,
b.email,
b.deleted
from tblProjects as a
join tblProject_scheduler as b on a.Pid = b.ProjectId where b.ProjectId = '{session.get('project_name')}'
and b.deleted=0
"""
result = mysql.fetch_one(query)
if result is not None:
return render_template('scheduler/training_blocker.html')
logger.info('Redirect To Custom Training Page')
ProjectReports.insert_record_ml('Redirect To Custom Training Page')
try:
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS)
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS)
elif session['project_type'] == 3:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS)
else:
return render_template('model_training/custom_training.html')
except Exception as e:
logger.error(e)
return render_template('model_training/custom_training.html')
else:
return 'Non-Implemented Action'
else:
return redirect('/')
else:
return redirect(url_for('/'))
except Exception as e:
logger.error('Error in Model Training')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/model_training/<action>', methods=['POST'])
def model_training_post(action):
try:
if 'pid' in session:
df = load_data()
model = None
range = None
random_state = None
if df is not None:
if action == 'help':
return render_template('model_training/help.html')
elif action == 'custom_training':
try:
model = request.form['model']
range = int(request.form['range'])
if model != "KNeighborsClassifier" and model != "SVR":
random_state = int(request.form['random_state'])
logger.info('Submitted Custom Training Page')
ProjectReports.insert_record_ml('Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}")
target = session['target_column']
if session['project_type'] != 3:
X = df.drop(target, axis=1)
y = df[target]
train_model_fun = None
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=range / 100,
random_state=random_state)
model_params = {}
if model == "LinearRegression":
Model_Params = LinearRegression_Params
train_model_fun = RegressionModels.linear_regression_regressor
elif model == "Ridge":
Model_Params = Ridge_Params
train_model_fun = RegressionModels.ridge_regressor
elif model == "Lasso":
Model_Params = Lasso_Params
train_model_fun = RegressionModels.lasso_regressor
elif model == "ElasticNet":
Model_Params = ElasticNet_Params
train_model_fun = RegressionModels.elastic_net_regressor
elif model == "DecisionTreeRegressor":
Model_Params = DecisionTreeRegressor_Params
train_model_fun = RegressionModels.decision_tree_regressor
elif model == "RandomForestRegressor":
Model_Params = RandomForestRegressor_Params
train_model_fun = RegressionModels.random_forest_regressor
elif model == "SVR":
Model_Params = SVR_params
train_model_fun = RegressionModels.support_vector_regressor
elif model == "AdaBoostRegressor":
Model_Params = AdabootRegressor_Params
train_model_fun = RegressionModels.ada_boost_regressor
elif model == "GradientBoostingRegressor":
Model_Params = GradientBoostRegressor_Params
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model == "LogisticRegression":
Model_Params = LogisticRegression_Params
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model == "SVC":
Model_Params = SVC_Params
train_model_fun = ClassificationModels.support_vector_classifier
elif model == "KNeighborsClassifier":
print('here')
Model_Params = KNeighborsClassifier_Params
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model == "DecisionTreeClassifier":
Model_Params = DecisionTreeClassifier_Params
train_model_fun = ClassificationModels.decision_tree_classifier
elif model == "RandomForestClassifier":
Model_Params = RandomForestClassifier_Params
train_model_fun = ClassificationModels.random_forest_classifier
elif model == "AdaBoostClassifier":
Model_Params = AdaBoostClassifier_Params
train_model_fun = ClassificationModels.ada_boost_classifier
elif model == "GradientBoostClassifier":
Model_Params = GradientBoostingClassifier_Params
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model = train_model_fun(X_train, y_train, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Trained Data Size", "value": len(X_train)},
{"key": "Test Data Size", "value": len(X_test)}]
scores = []
# Regression
if trained_model is not None and session['project_type'] == 1:
y_pred = trained_model.predict(X_test)
scores.append({"key": "r2_score", "value": r2_score(y_test, y_pred)})
scores.append(
{"key": "mean_absolute_error", "value": mean_absolute_error(y_test, y_pred)})
scores.append(
{"key": "mean_squared_error", "value": mean_squared_error(y_test, y_pred)})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
# Classification
if trained_model is not None and session['project_type'] == 2:
y_pred = trained_model.predict(X_test)
scores.append({"key": "Accuracy", "value": accuracy_score(y_test, y_pred)})
scores.append({"key": "Classes", "value": df[target].unique()})
scores.append(
{"key": "Precision", "value": precision_score(y_test, y_pred, average=None)})
scores.append({"key": "Recall", "value": recall_score(y_test, y_pred, average=None)})
scores.append({"key": "F1_score", "value": f1_score(y_test, y_pred, average=None)})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
elif session['project_type'] == 3:
X = df
train_model_fun = None
model_params = {}
if model == "KMeans":
Model_Params = KmeansClustering_Params
train_model_fun = ClusteringModels.kmeans_clustering
elif model == "DBSCAN":
Model_Params = DbscanClustering_Params
train_model_fun = ClusteringModels.dbscan_clustering
elif model == "AgglomerativeClustering":
Model_Params = AgglomerativeClustering_Params
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Train Data Size", "value": len(X)},
{"key": "Test Data Size", "value": 0}]
scores = []
# Clustering
if trained_model is not None and session['project_type'] == 3:
scores.append({"key": "Predicted Classes",
"value": pd.DataFrame(data=y_pred, columns=['y_pred'])[
'y_pred'].unique()})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
else:
raise Exception("Model Couldn't train, please check parametes")
except Exception as e:
logger.error('Error Submitted Custom Training Page')
ProjectReports.insert_record_ml('Error Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}",
'', 0, str(e))
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS, status="error", msg=str(e))
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS, status="error", msg=str(e))
else:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS, status="error", msg=str(e))
elif action == "auto_training":
try:
target = session['target_column']
if target is None:
return redirect(url_for('/target-column'))
# data_len = len(df)
# data_len = 10000 if data_len > 10000 else int(len(df) * 0.9)
# df = df.sample(frac=1).loc[:data_len, :]
trainer = None
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=0.75,
random_state=101)
if session['project_type'] == 1:
trainer = ModelTrain_Regression(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
elif session['project_type'] == 2:
trainer = ModelTrain_Classification(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
except Exception as ex:
return render_template('model_training/auto_training.html', status="error",
project_type=session['project_type'],
target_column=session['target_column'], msg=str(ex))
elif action == 'final_train_model':
try:
logger.info('Final Train Model')
ProjectReports.insert_record_ml('Final Train Model')
query = f'''select Model_Name from tblProjects Where Id="{session.get('pid')}"'''
model_name = mysql.fetch_one(query)[0]
if session['project_type'] != 3:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "LinearRegression":
train_model_fun = RegressionModels.linear_regression_regressor
elif model_name == "Ridge":
train_model_fun = RegressionModels.ridge_regressor
elif model_name == "Lasso":
train_model_fun = RegressionModels.lasso_regressor
elif model_name == "ElasticNet":
train_model_fun = RegressionModels.elastic_net_regressor
elif model_name == "DecisionTreeRegressor":
train_model_fun = RegressionModels.decision_tree_regressor
elif model_name == "RandomForestRegressor":
train_model_fun = RegressionModels.random_forest_regressor
elif model_name == "SVR":
train_model_fun = RegressionModels.support_vector_regressor
elif model_name == "AdaBoostRegressor":
train_model_fun = RegressionModels.ada_boost_regressor
elif model_name == "GradientBoostingRegressor":
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model_name == "LogisticRegression":
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model_name == "SVC":
train_model_fun = ClassificationModels.support_vector_classifier
elif model_name == "KNeighborsClassifier":
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model_name == "DecisionTreeClassifier":
train_model_fun = ClassificationModels.decision_tree_classifier
elif model_name == "RandomForestClassifier":
train_model_fun = ClassificationModels.random_forest_classifier
elif model_name == "AdaBoostClassifier":
train_model_fun = ClassificationModels.ada_boost_classifier
elif model_name == "GradientBoostClassifier":
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
trained_model = train_model_fun(X, y, True, **model_params)
"""Save Final Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
elif session['project_type'] == 3:
X = df
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "KMeans":
train_model_fun = ClusteringModels.kmeans_clustering
elif model_name == "DBSCAN":
train_model_fun = ClusteringModels.dbscan_clustering
elif model_name == "AgglomerativeClustering":
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non Implemented mtd'
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
render_template('model_training/model_result.html', action=action, status="error",
msg="Model is not found, please train model again")
if action == "Scheduled_model":
path = os.path.join(from_root(), 'artifacts', 'model_temp.pkl')
pass
else:
return "Non Implemented Method"
else:
logger.critical('DataFrame has no data')
return redirect('/')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/congrats', methods=['GET', 'POST'])
def congrats():
try:
if 'pid' in session:
df = load_data()
if df is not None:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', status="error",
msg="Model is not found, please train model again")
else:
for key, value in model.get_params():
exec(key + "=value")
logger.info('Loaded Congrats Page')
ProjectReports.insert_record_ml('Loaded Congrats Page')
if request.method == "GET":
return render_template('model_training/congrats.html')
else:
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/prediction', methods=['GET', 'POST'])
def prediction():
try:
if 'pid' in session:
file_path = ""
logger.info('Loaded Prediction Page')
ProjectReports.insert_record_ml('Loaded Prediction Page')
if request.method == "GET":
is_trained = mysql.fetch_all(
f"SELECT * FROM tblProjects WHERE Id ={session.get('pid')} AND Model_Trained=1")
if is_trained is None:
return render_template('model_training/prediction_page.html', status="error",
msg="your model is not trained, please train model first")
else:
return render_template('model_training/prediction_page.html', status="success")
else:
try:
f = request.files['file']
ALLOWED_EXTENSIONS = ['csv', 'tsv', 'json']
msg = ""
if len(request.files) == 0:
msg = 'Please select a file to upload'
elif f.filename.strip() == '':
msg = 'Please select a file to upload'
elif f.filename.rsplit('.', 1)[1].lower() not in ALLOWED_EXTENSIONS:
msg = 'This file format is not allowed, please select mentioned one'
if msg:
logger.error(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
filename = secure_filename(f.filename)
file_path = os.path.join(config_args['dir_structure']['upload_folder'], filename)
f.save(file_path)
if file_path.endswith('.csv'):
df = | pd.read_csv(file_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
:Module: khorosjx.utils.df_utils
:Synopsis: Useful tools and utilities to assist in importing, manipulating and exporting pandas dataframes
:Usage: ``from khorosjx import df_utils``
:Example: TBD
:Created By: <NAME>
:Last Modified: <NAME>
:Modified Date: 18 Dec 2019
"""
import pandas as pd
# Define function to convert a list of dictionaries to a pandas dataframe
def convert_dict_list_to_dataframe(dict_list, column_names=[]):
"""This function converts a list of dictionaries into a pandas dataframe.
:param dict_list: List of dictionaries
:type dict_list: list
:param column_names: The column names for the dataframe (Optional)
:type column_names: list
:returns: A pandas dataframe of the data
"""
# Identify the dataframe column names
if len(column_names) == 0:
for field_name in dict_list[0].keys():
column_names.append(field_name)
# Identify the data for each column
df_data = []
for idx in range(0, len(dict_list)):
row_data = []
for field_value in dict_list[idx].values():
row_data.append(field_value)
df_data.append(row_data)
# Create and return the dataframe
dataframe = pd.DataFrame(df_data, columns=column_names)
return dataframe
def import_csv(file_path, delimiter=",", column_names=[], columns_to_return=[], has_headers=True):
"""This function imports a CSV file to generate a dataframe.
:param file_path: The absolute path to the CSV file to be imported
:type file_path: str
:param delimiter: The column delimiter utilized in the CSV
:type delimiter: str
:param column_names: The column names to use with the imported dataframe (Optional)
:type column_names: list
:param columns_to_return: Determines which of the columns should actually be returned (Default: all columns)
:param has_headers: Defines whether or not the data in the file has column headers (Default: ``True``)
:type has_headers: bool
:returns: The imported data as a pandas dataframe
:raises: FileNotFoundError, TypeError
"""
# Determine the appropriate use case and then import and return the dataframe
if has_headers is False and len(column_names) == 0:
if len(columns_to_return) == 0: # Use Case: Headless
dataframe = pd.read_csv(file_path, sep=delimiter, header=None)
else: # Use Case: Headless Filtered
dataframe = pd.read_csv(file_path, sep=delimiter, header=None, usecols=columns_to_return)
else:
if len(column_names) > 0 and len(columns_to_return) > 0: # Use Case: Custom Filtered
dataframe = pd.read_csv(file_path, sep=delimiter, names=column_names)
dataframe = dataframe[columns_to_return]
elif len(column_names) > 0 and len(columns_to_return) == 0: # Use Case: Custom
dataframe = pd.read_csv(file_path, sep=delimiter, names=column_names)
elif len(column_names) == 0 and len(columns_to_return) > 0: # Use Case: Filtered
dataframe = pd.read_csv(file_path, sep=delimiter, usecols=columns_to_return)
else: # Use Case: Default
dataframe = pd.read_csv(file_path, sep=delimiter)
return dataframe
def import_excel(file_path, excel_sheet='', use_first_sheet=False,
column_names=[], columns_to_return=[], has_headers=True):
"""This function imports a Microsoft Excel file to generate a dataframe.
:param file_path: The absolute path to the Excel file to be imported
:type file_path: str
:param excel_sheet: The name of the specific sheet in the file to import
:type excel_sheet: str
:param use_first_sheet: Defines whether or not the first sheet in the file should be used (Default: ``False``)
:type use_first_sheet: bool
:param column_names: The column names to use with the imported dataframe (Optional)
:type column_names: list
:param columns_to_return: Determines which of the columns should actually be returned (Default: all columns)
:param has_headers: Defines whether or not the data in the file has column headers (Default: ``True``)
:type has_headers: bool
:returns: The imported data as a pandas dataframe
:raises: FileNotFoundError, TypeError
"""
# Determine the appropriate use case and then import and return the dataframe
if excel_sheet != "" and use_first_sheet is False:
if has_headers is False and len(column_names) == 0:
if columns_to_return == 0: # Use Case: Headless
excel_data = | pd.read_excel(file_path, sheet_name=excel_sheet, header=None) | pandas.read_excel |
from directional import *
import pandas as pd
import numpy as np
demo_sin_cos_matrix = pd.read_csv("sample_data/sin-cos.csv")
demo_sin_cos_mean = pd.read_csv("sample_data/sin-cos-mean.csv")
demo_angle_matrix = pd.read_csv("sample_data/degrees.csv")
demo_radian_matrix = | pd.read_csv("sample_data/radians.csv") | pandas.read_csv |
from .gamedata import getPlayers, getPointLog, getMatches, getUnplayed, getDisqualified
from .pwr import PWRsystems
from .regression import Regression
from .simulate import simulateBracket, simulateMatch, simulateGamelog
from .players import Player, Players
from .tiebreak import getPlayoffSeeding
from .util import playoff_series_ids
from joblib import Parallel, delayed
import pandas as pd
import numpy as np
class Simulate(object):
def __init__(self, n_sims, pwr_systems=None, rank_adj=0.5, st_dev=1.6, season=2):
self.n_sims = n_sims
self.rank_adj = rank_adj
self.st_dev = st_dev
self.season = season
if pwr_systems is None:
self.pwr_systems = PWRsystems()
else:
self.pwr_systems = pwr_systems
self.players = getPlayers(season)
self.points = getPointLog(season)
self.played = getMatches(season)
self.unplayed = getUnplayed(season)
self.dq = getDisqualified(season)
for system in self.pwr_systems.systems:
system.calculate(gamelog=self.points, season=season)
self.regress(system)
self.pwr = self.pwr_systems.combine()
self.regress(self.pwr)
def run(self, parallel=True, combine=True):
simulations = []
if parallel:
simulations = Parallel(n_jobs=-1)(delayed(self.simulate)() for i in range(self.n_sims))
else:
for i in range(self.n_sims):
simulations.append(self.simulate())
self.simulations = Simulations(simulations, combine)
return self
def playoffs(self, reindex=False):
if self.simulations.combined:
return self.copied(self.simulations.playoffs.copy(), reindex)
def regularseason(self, reindex=False):
if self.simulations.combined:
return self.copied(self.simulations.regularseason.copy(), reindex)
def standings(self, reindex=False):
if self.simulations.combined:
return self.copied(self.simulations.standings.copy(), reindex)
def copied(self, df, reindex):
if reindex:
return df.reset_index(level='Simulation')
else:
return df
def simulate(self):
return Simulation(self)
def regress(self, system):
if system.regress_to is not None:
if type(system.regress_to) is not Regression:
system.regress_to = Regression(to=system.regress_to)
system.regress(system.values)
class Simulation(object):
def __init__(self, sim):
self.rankings = sim.pwr.values.copy()
pwr_adjustments = np.random.normal(0, sim.rank_adj, self.rankings.shape[0])
self.rankings['PWR'] = self.rankings['PWR'].values - pwr_adjustments
if sim.unplayed.empty:
self.regularseason = sim.played
else:
simulated = simulateGamelog(sim.unplayed, self.rankings, sim.st_dev, sim.season)
self.regularseason = pd.concat([sim.played, simulated], ignore_index=True)
adjusted = pd.DataFrame([x + [1] for x in self.regularseason[['Winner','Loser','W Pts']].values.tolist()] +
[x + [0] for x in self.regularseason[['Loser','Winner','L Pts']].values.tolist()],
columns=['Player','Opponent','Pts','Wins'])
df = pd.merge(pd.merge(adjusted, sim.players, on='Player'),
sim.players.rename({'Player':'Opponent','Division':'OppDivision'}, axis=1), on='Opponent')
self.standings = pd.merge(df.groupby(['Player','Division']).agg({'Pts':'sum','Wins':'sum'}).reset_index(),
self.rankings, on='Player')
df['Wins'] = np.where(np.isin(df['Player'].values, sim.dq), 0, df['Wins'].values)
self.seeding = getPlayoffSeeding(df)
self.playoffs = self.simulatePlayoffs(sim)
self.standings = pd.merge(self.standings, self.seeding, how='left', on='Player', suffixes=('', '_')).drop('Division_', axis=1)
def simulatePlayoffs(self, sim):
players = Players( | pd.merge(self.standings, self.seeding, on='Player', suffixes=('', '_')) | pandas.merge |
#-*- coding:utf-8 -*-
from __future__ import print_function
import os,sys,sip,time
from datetime import datetime,timedelta
from qtpy.QtWidgets import QTreeWidgetItem,QMenu,QApplication,QAction,QMainWindow
from qtpy import QtGui,QtWidgets
from qtpy.QtCore import Qt,QUrl,QDate
from Graph import graphpage
from layout import Ui_MainWindow
from pandas import DataFrame as df
import pandas as pd
import tushare as ts
import pickle
import numpy as np
list1 = []
class MyUi(QMainWindow):
def __init__(self):
super(MyUi, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
cwd = os.getcwd()
cwd = str(cwd)
if os.path.isfile(cwd+"/time"):
with open("time","rb") as outfile:#reads current time
history = pickle.load(outfile)
if (datetime.now()-history).total_seconds()<43200: #measures if time elapse>12 hours
print("Less than 12 hours. Loading previously saved Pickle...")
else:
print("More than 12 hours. Updating Pickle...")
data = ts.get_industry_classified()
with open("class","wb+") as outfile:
pickle.dump(data,outfile)
now = datetime.now()
with open("time", "wb+") as outfile: #update time
pickle.dump(now, outfile)
else:
print("No Pickle found!") #If this is first time using tuchart in this directory
data = df()
data = ts.get_industry_classified()
with open('class', 'wb+') as outfile: #records pickle
pickle.dump(data, outfile)
now = datetime.now()
with open("time", "wb+") as outfile:
pickle.dump(now,outfile)
with open("class", "rb") as infile: # reads current time
series = pickle.load(infile)
#series = pd.read_json(cwd + "\\class.json")
#series = ts.get_industry_classified()
series = | pd.DataFrame(series) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = | Series([10.3, 5., 5., None]) | pandas.Series |
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append("f2", df, index=["string"], data_columns=["string", "string2"])
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
msg = "cannot create table index on a Fixed format store"
with pytest.raises(TypeError, match=msg):
store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
msg = "'Cols' object has no attribute 'string2'"
with pytest.raises(AttributeError, match=msg):
col("f", "string2").is_indexed
# try to index a col which isn't a data_column
msg = (
"column string2 is not a data_column.\n"
"In order to read column string2 you must reload the dataframe \n"
"into HDFStore and include string2 with the data_columns argument."
)
with pytest.raises(AttributeError, match=msg):
store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
# GH 14435
idx = MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_table_mixed_dtypes(setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_calendar_roundtrip_issue(setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_remove(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_same_name_scoping(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(20, 2), index=date_range("20130101", periods=20))
store.put("df", df, format="table")
expected = df[df.index > Timestamp("20130105")]
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
# changes what 'datetime' points to in the namespace where
# 'select' does the lookup
from datetime import datetime # noqa:F401
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_store_index_name(setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(table_format, setup_path):
# GH #13492
idx = Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@pytest.mark.filterwarnings("ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning")
def test_overwrite_node(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_coordinates(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame({"A": range(5), "B": range(5)})
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected, check_freq=False)
# FIXME: 2021-01-18 on some (mostly windows) builds we get freq=None
# but expect freq="18B"
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
msg = (
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_start_stop_table(setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
{"A": np.random.rand(20), "B": np.random.rand(20)},
index=date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = [f"{c:3d}" for c in df.index]
df.columns = [f"{c:3d}" for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib():
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(start, stop, setup_path):
# GH 17021
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore():
df = tm.makeDataFrame()
def writer(path):
with HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with HDFStore(path) as store:
return read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath():
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore():
df = tm.makeDataFrame()
def writer(path):
with HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with HDFStore(path) as store:
return read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def test_copy():
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys=keys, propindexes=propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
os.remove(new_f) # noqa: PDF008
# new table
df = tm.makeDataFrame()
with tm.ensure_clean() as path:
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
def test_duplicate_column_name(setup_path):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(setup_path) as path:
msg = "Columns index has to be unique for fixed format"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", format="fixed")
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_preserve_timedeltaindex_type(setup_path):
# GH9635
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = | timedelta_range(start="0s", periods=10, freq="1s", name="example") | pandas.timedelta_range |
from pandas import read_csv, DataFrame
from numpy import asarray, transpose, array, linalg, abs, cov, reshape
from sklearn.externals import joblib
from sklearn import mixture
from sklearn.metrics import silhouette_score
from operator import itemgetter
import sympy as sp
def get_dataset(path):
data = | read_csv(path) | pandas.read_csv |
import base64
import json
import pandas as pd
import streamlit as st
st.set_page_config(layout='wide')
def download_link(object_to_download, download_filename, download_link_text):
"""
Generates a link to download the given object_to_download.
object_to_download (str, pd.DataFrame): The object to be downloaded.
download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt
download_link_text (str): Text to display for download link.
Examples:
download_link(YOUR_DF, 'YOUR_DF.csv', 'Click here to download data!')
download_link(YOUR_STRING, 'YOUR_STRING.txt', 'Click here to download your text!')
"""
if isinstance(object_to_download,pd.DataFrame):
object_to_download = object_to_download.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
# data['cards']: list of {'id', 'name', 'closed', 'desc', 'dueReminder', 'idList', 'idLabels', 'dueComplete'}
# data['labels']: list of {'id', 'name', 'board', 'color'}
# data['lists']: list of {'id', 'name', 'closed', 'pos'}
# data['checklists']: list of {'id', 'name', 'idCard', 'pos', 'checkItems'}, 'checkItems': list of {'idChecklist', 'state', 'id', 'name', 'due'}
@st.cache
def load_data(content=None):
if content is None:
with open('data/data.json') as infile:
return json.load(infile)
else:
return json.load(content)
@st.cache
def active_lists(content=None):
data = load_data(content)
lists = pd.DataFrame(data['lists'])
lists = lists[~lists['closed']]
lists.reset_index(inplace=True, drop=True)
lists = lists[['id', 'name']]
lists = lists.rename(columns={'id': 'idList', 'name': 'listName'})
return lists.to_dict('records')
@st.cache
def active_cards(content=None):
df = pd.DataFrame(load_data(content)['cards'])
df = df[~df['closed']]
df = df[['name', 'desc', 'idLabels', 'idList']]
df.reset_index(inplace=True, drop=True)
return df
@st.cache
def labels_map(content=None):
labels = load_data(content)['labels']
return {l['id']: l['name'] for l in labels}
st.sidebar.header('1. Export your data from Trello')
st.sidebar.write('''
- Show menu
- More
- Print and Export
- Export as JSON
''')
st.sidebar.header('2. Upload file')
trello_content = st.sidebar.file_uploader("Select file", type='json')
if trello_content:
st.sidebar.header('3. Adjust dump')
data = load_data(trello_content)
selected_lists = st.sidebar.multiselect(
'Lists',
active_lists(trello_content),
default=active_lists(trello_content),
format_func=lambda l: l['listName'],
)
selected_labels = st.sidebar.multiselect(
'Labels',
data['labels'],
format_func=lambda l: l['name'],
)
st.title('Your cards')
selected_lists = | pd.DataFrame(selected_lists) | pandas.DataFrame |
# %%
# Artificial Neural Network for RPM and FCR Prediction
# <NAME>, Ph.D. Candidate
# %%
# Load required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import keras
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# %%
# Display training progress
class ReportProgress(keras.callbacks.Callback):
def __init__(self, sample, test_split_ratio, n_epochs):
self.sample = sample
self.test_split_ratio = test_split_ratio
self.n_epochs = n_epochs
def on_train_begin(self, logs):
n_examples = len(self.sample)
n_train = int((1 - self.test_split_ratio) * n_examples)
print(
"Training started on {0} out of {1} available examples.".format(
n_train, n_examples
)
)
def on_epoch_end(self, epoch, logs):
if epoch % 20 == 0 and epoch != 0 and epoch != self.n_epochs:
print("{0} out of {1} epochs completed.".format(epoch, self.n_epochs))
def on_train_end(self, logs):
print("Training finished.")
# %%
# Load data from Excel to a pandas dataframe
def load_from_Excel(vehicle, settings):
directory = (
"../../Google Drive/Academia/PhD Thesis/Field Experiments/Veepeak/"
+ vehicle
+ "/Processed/"
)
input_file = vehicle + " - {0} - {1}.xlsx".format(
settings["INPUT_TYPE"], settings["INPUT_INDEX"]
)
input_path = directory + input_file
sheets_dict = pd.read_excel(input_path, sheet_name=None, header=0)
df = pd.DataFrame()
for _, sheet in sheets_dict.items():
df = df.append(sheet)
df.reset_index(inplace=True, drop=True)
return df
# %%
# Load sample data from Excel to a pandas dataframe
def load_sample_from_Excel(vehicle, settings):
directory = (
"../../Google Drive/Academia/PhD Thesis/Field Experiments/Veepeak/"
+ vehicle
+ "/Processed/"
)
input_file = vehicle + " - {0} - {1}.xlsx".format(
settings["INPUT_TYPE"], settings["INPUT_INDEX"]
)
input_path = directory + input_file
sheets_dict = pd.read_excel(input_path, sheet_name=None, header=0)
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = | pd.DataFrame(r['r1'] * r['r2']) | pandas.DataFrame |
"""
accounting.py
Accounting and Financial functions.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : <EMAIL>
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
import datetime
import numpy as np
import pandas as pd
from pf.constants import DAYS_IN_YEAR
from pf.util import get_age
################################################################################################################################
# Financial Statements
################################################################################################################################
def calc_balance(accounts=None, category_dict=None):
"""
Calculate daily balances of grouped assets/liabilities based on `category_dict`s from `accounts`, returns a DataFrame.
Balance sheet is split into these sections:
Assets
Current
Cash
...
Long Term
Investments
Property
...
Liabilities
Current
Credit Card
...
Long Term
Loans
...
categories = {
'Assets' : {
'Current': {
# User category keys and account DataFrame columns list for values
'Cash & Cash Equivalents': [
('Cash', 'BofA Checking'),
('Cash', 'BofA Savings'),
...
],
'User Category': [...]
...
},
'Long Term': {...}
},
'Liabilities' : {
'Current': {...},
'Long Term': {...}
}
}
"""
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
balance_dict = {
(k0, k1, k2): accounts[v2].sum(axis=1) if v2 else pd.Series(0, index=accounts.index)
for k0, v0 in category_dict.iteritems()
for k1, v1 in v0.iteritems()
for k2, v2 in v1.iteritems()
}
# Convert to DataFrame
balance = pd.DataFrame(balance_dict)
return balance.fillna(0.0)
def balance_sheet(balance=None, period=datetime.datetime.now().year):
"""
Calculate and return a balance sheet.
Balance will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
All levels may be user defined by the category dictonary. The value of the last level must contain valid pandas DataFrame
column selectors, e.g. `Account Type` for single index column / level 0 access or `('Cash', 'Account Name')` for
multilevel indexing.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
balance = calc_balance(accounts, category_dict=categories)
balancesheet = balance_sheet(balance, period=2015)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
balance_sheets = []
for p in period:
# Force period to string
p = str(p)
# Sum over Period and convert to Statement DataFrame
p_balance = pd.DataFrame(balance[p].iloc[-1])
p_balance.columns = ['$']
p_balance.index.names = ['Category', 'Type', 'Item']
# Calculate Net
net = p_balance[['$']].sum(level=[0, 1]).sum(level=1)
net.index = pd.MultiIndex.from_tuples([('Net', x0, 'Total') for x0 in net.index])
net.index.names = ['Category', 'Type', 'Item']
# Add Net
balance_df = pd.concat([p_balance, net])
# Calculate percentages of level 0
balance_df['%'] = 100.0 * balance_df.div(balance_df.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = balance_df.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = balance_df.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
balance_df = balance_df.combine_first(l1_totals)
balance_df = balance_df.combine_first(l0_totals)
# Update columns with period
balance_df.columns = pd.MultiIndex.from_product([[p], balance_df.columns])
# Add to main list
balance_sheets.append(balance_df)
# Concatenate all the periods together
balance_sheets_df = pd.concat(balance_sheets, 1)
return balance_sheets_df
def calc_income(paychecks=None, transactions=None, category_dict=None, tax_type=None):
"""
Calculate daily income of grouped revenue/expenses/taxes based on `category_dict`s from `paychecks` and `transactions`,
returns a DataFrame.
Income Statement is split into these sections:
Revenue
Operating
Technical Services
...
Non-Operating
Interest Income
Dividend & Capital Gains
...
Expenses
Operating
Medical
...
Non-Operating
...
Taxes
Operating
Federal
State
...
All levels may be user defined by the category dictonary. However the last level must contain a dictionary
with at least a `category` key and set of categories for the value along with optional parameters.
```
'Revenue': {
'Operating': {
# Paychecks
'Technical Services': {
'source': 'paycheck', # Optional string to select data source, defaults to 'transactions'
'categories': {'Paycheck', ...}, # Required set of categories
'labels': set(), # Optional set of labels, defaults to set() if not passed in
'logic': '', # Optional 'not' string to set inverse of 'labels', defaults to ''
'tax_type' '' # Optional string for tax ('realized' or 'unrealized'), defaults to 'realized'
},
'User Category': {...}
},
'Non-Operating': {
'User Category': {
'categories': {...}
}
}
},
'Expenses': {
'Operating': {...},
'Non-Operating': {..}
},
'Taxes': {
'Operating': {...},
'Non-Operating': {..}
}
```
"""
# Clean category
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if not v2.has_key('source'):
category_dict[k0][k1][k2]['source'] = 'transactions'
if not v2.has_key('labels'):
category_dict[k0][k1][k2]['labels'] = set()
if not v2.has_key('logic'):
category_dict[k0][k1][k2]['logic'] = ''
if not v2.has_key('agg'):
category_dict[k0][k1][k2]['agg'] = np.ones(len(category_dict[k0][k1][k2]['categories']))
if not v2.has_key('tax_type'):
category_dict[k0][k1][k2]['tax_type'] = 'realized'
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
income_dict = {}
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if v2['source'] == 'transactions':
income_dict[(k0, k1, k2)] = transactions[
(
# If it is in the category
transactions['Category'].isin(v2['categories'])
& transactions['Account Name'].isin(tax_type[v2['tax_type']])
) & (
# And if is has the correct label
(transactions['Labels'].apply(
lambda x: x.isdisjoint(v2['labels']) if v2['logic'] else not x.isdisjoint(v2['labels'])
)) |
# Or it does not have any labels
(transactions['Labels'].apply(lambda x: v2['labels'] == set()))
)
]['Amount']
else:
income_dict[(k0, k1, k2)] = (v2['agg'] * paychecks[list(v2['categories'])]).sum(axis=1)
# Convert to DataFrame
cats = income_dict.keys()
cats.sort()
income = pd.DataFrame(
data=[],
columns= | pd.MultiIndex.from_tuples(cats) | pandas.MultiIndex.from_tuples |
# 有三種網頁轉換方法,必放
from django.shortcuts import render # 呼叫模板,合成後送往瀏覽器
from django.http import HttpResponse, request # 程式送往瀏覽器
from django.shortcuts import redirect # 程式送往程式
import pymysql
import re
import pandas as pd
from datetime import datetime
from sql_account import sql_account
'''思考一下
1. 能否依照權限顯示資料 - ok
2. 是否可以刪除,修改時傳送訊息 - 暫不加入功能
3. 刪除,修改的javascrip能否提示時加入帳號 - ok
4. list_all 部門要做排序 - ok
5. 基本美化 - ok
6. 匯出列表 - ok
7. 匯入 - 考量實際應用出現的問題暫不加入功能
'''
'''
level
adm, pre, dir, spe, sup, dir, nor
dep
總部, 財務部, 人力資源部, 業務部, 客戶服務部, 總務部, 企劃部
admin_Office, Finance_Department, Human_Resources_Department, Sales_Department, Customer_Service_Department, General_Affairs_Department, Planning_Department'''
# ===========================測試回傳==============================
# return HttpResponse('hi')
# ========================staff_Login_Data_Retrieve=================================
# 各分頁顯示登入資訊所需函式
def staff_Login_Data_Retrieve(request):
# 設定空字典作為接收db tuple資料的轉換
staff_Login_Data = {}
staff_Login_Data['login_account'] = request.session['login_account']
staff_Login_Data['login_name'] = request.session['login_name']
staff_Login_Data['login_subjection_depar'] = request.session['login_subjection_depar']
staff_Login_Data['level'] = request.session['level']
# 判斷db是否有登錄,回傳到其他def與html
staff_Login_Data['login'] = 1
return staff_Login_Data
# =========================主頁面============================
def index(request):
return render(request, 'index.html')
# ====================staff頁面=============================
def staff_index(request):
show = {}
# 從db > session內取得login資料
if request.session.get("login_name") != None:
# 呼叫staff_Login_Data_Retrieve存入變數
staff_Login_Data=staff_Login_Data_Retrieve(request)
# 判斷要顯示哪個頁面
if (staff_Login_Data['login_subjection_depar'] and staff_Login_Data['level']) in (['總部'] and ['adm', 'pre']):
show['data'] = 0
# 將要顯示的登入資料傳送至模板
return render(request, "staff\\staff_index.html", {'staff_Login_Data':staff_Login_Data, 'show':show})
elif (staff_Login_Data['login_subjection_depar'] and staff_Login_Data['level']) in (['人力資源部'] and ['dir', 'spe']):
show['data'] = 1
return render(request, "staff\\staff_index.html", {'staff_Login_Data':staff_Login_Data, 'show':show})
elif (staff_Login_Data['login_subjection_depar'] and staff_Login_Data['level']) in (['財務部, 業務部, 客戶服務部, 總務部, 企劃部'] and ['dir', 'spe', 'sup']):
show['data'] = 2
return render(request, "staff\\staff_index.html", {'staff_Login_Data':staff_Login_Data, 'show':show})
else:
return render(request, 'staff\\staff_index.html', {'staff_Login_Data':staff_Login_Data})
else:
return render(request, 'staff\\staff_index.html')
# ======================staff_login=======================
def staff_Login(request):
if request.session.get("login_name") == None:
# 呼叫staff_Login_Data_Retrieve存入變數
# staff_Login_Data=staff_Login_Data_Retrieve(request)
# 網頁獲取資料的方式
if request.method == "POST":
# 取出html表單的輸入值
account = request.POST['account']
name = request.POST['name']
password = request.POST['password']
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
# 檢查帳號是否存在,單筆資料取出,對應取值的select不是對應sql,否則會tuple index out of range
sql = "select account, password, name, subjection_depar, level from staff_contrl where account='{}'".format(account)
cursor.execute(sql)
db.commit()
# 放置暫存區檢查
staff_Login_Data = cursor.fetchone()
# 一階段確認帳號是否為空值,對應html內之值
if staff_Login_Data[0] != None :
# 二階段確認密碼是否存在
if staff_Login_Data[1] == password:
# 三階段確認姓名是否存在
if staff_Login_Data[2] == name:
# 將登陸資料存至session內供回應其他模板
# [0][1][2]對應 > sql select account,password,name,subjection_depar
request.session['login_account'] = staff_Login_Data[0]
request.session['login_name'] = staff_Login_Data[2]
request.session['login_subjection_depar'] = staff_Login_Data[3]
request.session['level'] = staff_Login_Data[4]
# return HttpResponse(request.session['level'])
# return HttpResponse("檢查完成")
return redirect("/staff_index/")
else:
return HttpResponse("查無此姓名,請重新登錄 <a href='/staff_Login/'>回上一頁</a>")
else:
return HttpResponse("密碼錯誤,請重新登入 <a href='/staff_Login/'>回上一頁</a>")
else:
return HttpResponse("帳號錯誤,請聯繫管理員 <a href='/staff_index/'>回上一頁</a>")
else:
return render(request, 'staff\\staff_Login.html')
else:
# 呼叫staff_Login_Data_Retrieve存入變數
staff_Login_Data=staff_Login_Data_Retrieve(request)
return redirect("/staff_index/")
# return HttpResponse('staff_Login')
# 登出時從有帳號 > 無帳號,會帶account
def staff_Logout(request, account=""):
if request.session.get("login_name") != None:
del request.session['login_account']
del request.session['login_name']
del request.session['login_subjection_depar']
return redirect('staff\\staff_index.html')
else:
return HttpResponse("已登出職員管理系統 <a href='/index/'>返回主頁</a>")
# =======================staff_Create==========================
def staff_Create(request):
if request.session.get("login_name") != None:
# 呼叫staff_Login_Data_Retrieve存入變數
staff_Login_Data=staff_Login_Data_Retrieve(request)
# 進行權限檢查 > 部門檢查
if request.session['login_subjection_depar'] in (['總部', '人力資源部'] or ['admin_Office','Human_Resources_Department']):
# 進行權限檢查 > 職務等級檢查
if request.session['level'] in ['adm', 'pre', 'dir', 'spe']:
# 將要顯示的登入資料傳送至模板
return render(request, 'staff\\staff_Create.html', {'staff_Login_Data':staff_Login_Data})
else:
# 權限等級檢查未通過
return HttpResponse('權限等級不足')
else:
# 部門檢查未通過
return HttpResponse('不隸屬於部門職權範圍')
else:
# 未登入不得新增
return HttpResponse("尚未登入 <a href='/staff_Login/'>進行登入</a>")
def staff_DubleCheck(request):
if request.session.get("login_name") != None:
# 呼叫staff_Login_Data_Retrieve存入變數
staff_Login_Data=staff_Login_Data_Retrieve(request)
# 將要顯示的登入資料傳送至模板
# 設定一變數為dic接收html資料
data={}
# 擷取create填寫的資料透過request.post轉為list,原始資料為dic{'key':values}
# data['list'] = request.POST['dic']
# [''] > '' > 字串
data['account'] = request.POST['account']
data['name'] = request.POST['name']
data['password'] = request.POST['password']
data['privacy_mail'] = request.POST['privacy_mail']
data['mobile_phine'] = request.POST['mobile_phine']
data['addr'] = request.POST['addr']
data['emergency_contact_name'] = request.POST['emergency_contact_name']
data['emergency_contact_tel'] = request.POST['emergency_contact_tel']
data['subjection_depar'] = request.POST['subjection_depar']
data['job_title'] = request.POST['job_title']
data['depar_director'] = request.POST['depar_director']
# ----------驗證區 >> 帳號,姓名,密碼,電話,信箱----------------------
# 帳號驗證,只接受英文,數字,底線
if not re.search(r"[A-Za-z]+", request.POST['account']):
msg = "帳號輸入錯誤,帳號不能有空白與特殊字元"
return HttpResponse(msg)
elif len(request.POST['account']) < 4:
msg = "帳號過短"
return HttpResponse(msg)
else:
# 若帳號格式皆正確,存入原始變數並把所有空白都移除
data['account'] = request.POST['account'].strip()
# 姓名驗證,只接受中文
if not re.search(r"[\u4e00-\u9fa5]", request.POST['name']):
msg = "姓名輸入錯誤,只接受中文"
return HttpResponse(msg)
else:
# 若姓名格式皆正確,存入原始變數並把所有空白都移除
data['name'] = request.POST['name'].strip()
# 密碼驗證,密碼要包含一個大小寫英文,長度大於6,小於15字元
if re.search(r"\s", request.POST['password']):
msg = "密碼輸入錯誤,不包含空白,請返回上一頁"
return HttpResponse(msg)
elif not re.search(r"[A-Z]", request.POST['password']):
msg = "密碼輸入錯誤,需至少需一個大寫英文"
return HttpResponse(msg)
elif not re.search(r"[a-z]", request.POST['password']):
msg = "密碼輸入錯誤,需至少需一個小寫英文"
return HttpResponse(msg)
# 長度檢查
elif len(request.POST['password']) < 6:
msg = "密碼輸入錯誤,長度需大於6個字元"
return HttpResponse(msg)
elif len(request.POST['password']) > 15:
msg = "密碼輸入錯誤,長度需小於15個字元"
return HttpResponse(msg)
else:
# 若密碼格式皆正確,存入原始變數並把所有空白都移除
data['password'] = request.POST['password'].strip()
# 手機驗證,只接受數字,不接受特殊字元,長度需 == 10
if not re.search(r"09\d+", request.POST['mobile_phine']):
msg = "手機號碼需為數字"
return HttpResponse(msg)
elif len(request.POST['mobile_phine']) > 10:
msg = "手機號碼為10個數字"
return HttpResponse(msg)
elif len(request.POST['mobile_phine']) < 10:
msg = "手機號碼為10個數字"
return HttpResponse(msg)
else:
# 若手機號碼格式皆正確,存入原始變數並把所有空白都移除
data['mobile_phine'] = request.POST['mobile_phine'].strip()
# 私人信箱驗證,格式 > [email protected],長度2-6字元
if not re.search(r"[a-z0-9_\.-]+\@[\da-z\.-]+\.[a-z\.]{2,6}", request.POST['privacy_mail']):
msg = "信箱格式錯誤"
return HttpResponse(msg)
else:
# 若信箱格式皆正確,存入原始變數並把所有空白都移除
data['privacy_mail'] = request.POST['privacy_mail'].strip()
# 緊急聯絡人電話驗證,只接受數字,不接受特殊字元,長度需 == 10
if not re.search(r"09\d+", request.POST['emergency_contact_tel']):
msg = "緊急聯絡人電話號碼需為數字"
return HttpResponse(msg)
elif len(request.POST['emergency_contact_tel']) > 10:
msg = "緊急聯絡人電話號碼為10個數字"
return HttpResponse(msg)
elif len(request.POST['emergency_contact_tel']) < 10:
msg = "緊急聯絡人電話號碼為10個數字"
return HttpResponse(msg)
else:
# 若緊急連絡人電話格式皆正確,存入原始變數並把所有空白都移除
data['emergency_contact_tel'] = request.POST['emergency_contact_tel'].strip()
return render(request, 'staff\\staff_DubleCheck.html', {'data': data,'staff_Login_Data':staff_Login_Data})
else:
# 未登入不得新增
return HttpResponse("尚未登入 <a href='/staff_Login/'>進行登入</a>")
def staff_CreateConfirm(request):
try:
# 接收staff_DubleCheck的輸入資料
account = request.POST['account']
name = request.POST['name']
password = request.POST['password']
privacy_mail = request.POST['privacy_mail']
# 設定為'NULL' > 防止回傳時出現Nano資料庫會出錯(varchar)
mail = 'NULL'
mobile_phine = request.POST['mobile_phine']
addr = request.POST['addr']
emergency_contact_name = request.POST['emergency_contact_name']
emergency_contact_tel = request.POST['emergency_contact_tel']
# 設定為0 > 防止回傳時出現Nano資料庫會出錯(int)
status = 0
category = 0
subjection_depar = request.POST['subjection_depar']
job_title = request.POST['job_title']
depar_director = request.POST['depar_director']
level = 'NULL'
note = 'NULL'
nomal_hour_month = 0
total_hour_month = 0
official_leave = 0
annual_sick_leave = 0
overtime_hour = 0
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
# 先檢查account是否重複
sql = "select * from staff_contrl where account='{}'".format(account)
cursor.execute(sql)
db.commit()
# 將檢查結果放置變數中,temporary > 臨時
tmp = cursor.fetchone()
# 檢查變數中是否有值,若db檢查為空
if tmp == None:
# 存入db
sql = "insert into staff_contrl (account, name, password, privacy_mail, mail, mobile_phine, addr, emergency_contact_name, emergency_contact_tel, status, category, subjection_depar, job_title, depar_director, level, note, nomal_hour_month, total_hour_month, official_leave, annual_sick_leave, overtime_hour) values ('{}','{}','{}','{}','{}','{}','{}','{}','{}',{},{},'{}','{}','{}','{}','{}',{},{},{},{},{})".format(account, name, password, privacy_mail, mail, mobile_phine, addr, emergency_contact_name, emergency_contact_tel, status, category, subjection_depar, job_title, depar_director, level, note, nomal_hour_month, total_hour_month, official_leave, annual_sick_leave, overtime_hour)
cursor.execute(sql)
db.commit()
cursor.close()
db.close()
result = "儲存成功 <a href='/staff_index/'>回首頁</a>"
else:
return HttpResponse('帳號已存在,請另選帳號 <a href="/staff_Create/">回上一頁</a>')
except:
result = "儲存失敗"
return HttpResponse(result)
# ===================staff_ListAll=========================
# 登入後判斷部門別顯示全部資訊
def staff_ListAll(request):
# 檢查db session中是否有loginName > 獲取用get
if request.session.get("login_name") != None:
# 將已登入資料存至變數中
staff_Login_Data=staff_Login_Data_Retrieve(request)
if request.session['login_subjection_depar'] in (['總部'] or ['admin_Office']):
# 進行權限檢查 > 職務等級檢查
if request.session['level'] in ['adm', 'pre']:
try:
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
# 列表需要抓出所有資料
sql = "select * from staff_contrl"
cursor.execute(sql)
db.commit()
# 存入變數中
staff_ListAll = cursor.fetchall()
cursor.close()
db.close()
# 回傳db資料與登陸資料
return render(request, "staff\\staff_ListAll.html", {'staff_ListAll': staff_ListAll, 'staff_Login_Data':staff_Login_Data})
except:
return HttpResponse('讀取失敗,請重新嘗試 <a href="/staff_index/">回職員管理首頁</a>')
else:
# 權限等級檢查未通過
return HttpResponse('權限等級不足')
else:
# 部門檢查未通過
return HttpResponse('不隸屬於部門範圍')
else:
# 未登入不得觀看資料
return HttpResponse('<a href="/staff_Login/">未登入,請登入後繼續</a>')
# 登入後判斷部門別顯示該部門資訊
def dep_Staff_ListAll(request):
# 檢查db session中是否有loginName > 獲取用get
if request.session.get("login_name") != None:
# 將已登入資料存至變數中
staff_Login_Data=staff_Login_Data_Retrieve(request)
subjection_depar = request.session['login_subjection_depar']
# return HttpResponse([staff_Login_Data])
if request.session['login_subjection_depar'] in ['財務部', '人力資源部', '業務部', '客戶服務部', '總務部', '企劃部']:
# 進行權限檢查 > 職務等級檢查
if request.session['level'] in ['dir', 'spe', 'sup']:
try:
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
# 列表需要抓出所有資料
sql = "select * from staff_contrl where subjection_depar='{}'".format(subjection_depar)
cursor.execute(sql)
db.commit()
# 存入變數中
dep_Staff_ListAll = cursor.fetchall()
cursor.close()
db.close()
# 回傳db資料與登陸資料
return render(request, "staff\\dep_Staff_ListAll.html", {'dep_Staff_ListAll': dep_Staff_ListAll, 'staff_Login_Data':staff_Login_Data})
except:
return HttpResponse('讀取失敗,請重新嘗試 <a href="/staff_index/">回職員管理首頁</a>')
else:
# 權限等級檢查未通過
return HttpResponse('權限等級不足')
else:
# 部門檢查未通過
return HttpResponse('不隸屬於部門範圍')
else:
# 未登入不得觀看資料
return HttpResponse('<a href="/staff_Login/">未登入,請登入後繼續</a>')
# ==================personal_staff_Revise===========================
# 修改一定會有帶值account
def staff_Revise(request, account=""):
# 判斷是否有登入
if request.session.get("login_name") != None:
# 以session內的login_account作為sql搜尋條件
account = request.session['login_account']
if request.session['login_subjection_depar'] in (['總部', '財務部', '人力資源部', '業務部', '客戶服務部', '企劃部','總務部'] or ['admin_Office', 'Finance_Department', 'Human_Resources_Department', 'Sales_Department', 'Customer_Service_Department', 'Planning_Department', 'General_Affairs_Department']):
if request.session['level'] in ['adm', 'pre', 'dir', 'sup', 'spe', 'dir', 'nor']:
try:
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
sql = "select * from staff_contrl where account = '{}'".format(account)
cursor.execute(sql)
db.commit()
db.close()
cursor.close()
# 取出單個資料
staff_Revise_Data = cursor.fetchone()
# 判斷取出資訊是否為空值
if staff_Revise_Data != None:
# 呼叫def staff_Login_Data_Retrieve,basic.html顯示登入之資料
staff_Login_Data=staff_Login_Data_Retrieve(request)
# return HttpResponse(staff_Revise_Data) > 回傳測試
return render(request, 'staff\\staff_Revise.html', {'staff_Revise_Data': staff_Revise_Data, 'size':1,'staff_Login_Data':staff_Login_Data})
else:
return HttpResponse('資料庫無資料取出 <a href="/staff_index/" >回上一頁</a>')
except:
return HttpResponse('資料庫連線失敗,請重試 <a href="/staff_index/" >回上一頁</a>')
else:
return HttpResponse('權限等級不足')
else:
return HttpResponse('不隸屬於部門範圍')
else:
return HttpResponse("尚未登入 <a href='/staff_Login/'>進行登入</a>")
def staff_ReviseDB(request):
# 將回傳值包回字典{key:value}
data = {}
# ----------驗證區 >> 帳號,姓名,密碼,電話,信箱----------------------
# 姓名驗證,只接受中文
if not re.search(r"[\u4e00-\u9fa5]", request.POST['name']):
msg = "姓名輸入錯誤,只接受中文"
return HttpResponse(msg)
else:
# 若姓名格式皆正確,存入原始變數並把所有空白都移除
data['name'] = request.POST['name'].strip()
# 密碼驗證,密碼要包含一個大小寫英文,長度大於6,小於15字元
if re.search(r"\s", request.POST['password']):
msg = "密碼輸入錯誤,不包含空白,請返回上一頁"
return HttpResponse(msg)
elif not re.search(r"[A-Z]", request.POST['password']):
msg = "密碼輸入錯誤,需至少需一個大寫英文"
return HttpResponse(msg)
elif not re.search(r"[a-z]", request.POST['password']):
msg = "密碼輸入錯誤,需至少需一個小寫英文"
return HttpResponse(msg)
# 長度檢查
elif len(request.POST['password']) < 6:
msg = "密碼輸入錯誤,長度需大於6個字元"
return HttpResponse(msg)
elif len(request.POST['password']) > 15:
msg = "密碼輸入錯誤,長度需小於15個字元"
return HttpResponse(msg)
else:
# 若密碼格式皆正確,把所有空白都移除
data['password'] = request.POST['password'].strip()
# 手機驗證,只接受數字,不接受特殊字元,長度需 == 10
if not re.search(r"09\d+", request.POST['mobile_phine']):
msg = "手機號碼需為數字"
return HttpResponse(msg)
elif len(request.POST['mobile_phine']) > 10:
msg = "手機號碼為10個數字"
return HttpResponse(msg)
elif len(request.POST['mobile_phine']) < 10:
msg = "手機號碼為10個數字"
return HttpResponse(msg)
else:
# 若手機號碼格式皆正確,存入原始變數並把所有空白都移除
data['mobile_phine'] = request.POST['mobile_phine'].strip()
# 私人信箱驗證,格式 > [email protected],長度2-6字元
if not re.search(r"[a-z0-9_\.-]+\@[\da-z\.-]+\.[a-z\.]{2,6}", request.POST['privacy_mail']):
msg = "信箱格式錯誤"
return HttpResponse(msg)
else:
# 若信箱格式皆正確,存入原始變數並把所有空白都移除
data['privacy_mail'] = request.POST['privacy_mail'].strip()
# 緊急聯絡人電話驗證,只接受數字,不接受特殊字元,長度需 == 10
if not re.search(r"09\d+", request.POST['emergency_contact_tel']):
msg = "緊急聯絡人電話號碼需為數字"
return HttpResponse(msg)
elif len(request.POST['emergency_contact_tel']) > 10:
msg = "緊急聯絡人電話號碼為10個數字"
return HttpResponse(msg)
elif len(request.POST['emergency_contact_tel']) < 10:
msg = "緊急聯絡人電話號碼為10個數字"
return HttpResponse(msg)
else:
# 若緊急連絡人電話格式皆正確,存入原始變數並把所有空白都移除
data['emergency_contact_tel'] = request.POST['emergency_contact_tel'].strip()
# 接收從sraff_Revise的表單資料,轉換為要放回資料庫的list資料
# 此段接收但不執行db修改
account = request.POST['account']
# 將驗證完資料放回要存入db的變數中
name = data['name']
password = data['password']
privacy_mail = data['privacy_mail']
mail = request.POST['mail']
mobile_phine = data['mobile_phine']
addr = request.POST['addr']
emergency_contact_name = request.POST['emergency_contact_name']
emergency_contact_tel = data['emergency_contact_tel']
status = request.POST['status']
category = request.POST['category']
subjection_depar = request.POST['subjection_depar']
job_title = request.POST['job_title']
depar_director = request.POST['depar_director']
level = request.POST['level']
note = request.POST['note']
nomal_hour_month = request.POST['nomal_hour_month']
total_hour_month = request.POST['total_hour_month']
official_leave = request.POST['official_leave']
annual_sick_leave = request.POST['annual_sick_leave']
overtime_hour = request.POST['overtime_hour']
# 執行db修改
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
# 要加上where條件式 > 否則資料庫會全改,若要多條件 > and
sql = "update staff_contrl set name='{}', password='{}', privacy_mail='{}', mail='{}', mobile_phine='{}', addr='{}', emergency_contact_name='{}', emergency_contact_tel='{}', status={}, category={}, subjection_depar='{}', job_title='{}', depar_director='{}', level='{}', note='{}', nomal_hour_month={}, total_hour_month={}, official_leave={}, annual_sick_leave={}, overtime_hour={} where account='{}'".format(name, password, privacy_mail, mail, mobile_phine, addr, emergency_contact_name, emergency_contact_tel, status, category, subjection_depar, job_title, depar_director, level, note, nomal_hour_month, total_hour_month, official_leave, annual_sick_leave, overtime_hour, account)
cursor.execute(sql)
db.commit()
return HttpResponse("<a href='/staff_Revise/'>個人資料修改成功,回至修改頁面</a>")
# ==================allstaff_staff_Revise===========================
def all_staff_Revise(request, account=""):
# 判斷是否有登入
if request.session.get("login_name") != None:
# 進行權限檢查 > 部門檢查
if request.session['login_subjection_depar'] in (['總部', '財務部', '人力資源部', '業務部', '客戶服務部', '企劃部', '總務部'] or ['admin_Office', 'Finance_Department', 'Human_Resources_Department', 'Sales_Department', 'Customer_Service_Department', 'Planning_Department', 'General_Affairs_Department']):
# 進行權限檢查 > 職務等級檢查
if request.session['level'] in ['adm', 'pre', 'spe', 'dir']:
# 以staff_listAll內的account作為sql搜尋條件
if account != "":
try:
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
sql = "select * from staff_contrl where account = '{}'".format(account)
cursor.execute(sql)
db.commit()
db.close()
cursor.close()
# 取出單個資料
all_Staff_Revise_Data = cursor.fetchone()
# 判斷取出資訊是否為空值
if all_Staff_Revise_Data != None:
# 呼叫def staff_Login_Data_Retrieve,basic.html顯示登入之資料
staff_Login_Data=staff_Login_Data_Retrieve(request)
return render(request, 'staff\\all_staff_Revise.html', {'all_Staff_Revise_Data': all_Staff_Revise_Data, 'size':1,'staff_Login_Data':staff_Login_Data})
else:
return HttpResponse('資料庫無資料取出 <a href="/staff_index/" >回上一頁</a>')
except:
return HttpResponse('資料庫連線失敗,請重試 <a href="/staff_index/" >回上一頁</a>')
else:
return HttpResponse('資料庫未找到相關資料,請返回重新嘗試 <a href="/staff_index/" >回上一頁</a>')
else:
# 權限等級檢查未通過
return HttpResponse('權限等級不足')
else:
# 部門檢查未通過
return HttpResponse('不隸屬於部門範圍')
else:
return HttpResponse("尚未登入 <a href='/staff_Login/'>進行登入</a>")
def all_staff_ReviseDB(request, account=""):
# 將回傳值包回字典{key:value}
data = {}
# ----------驗證區 >> 帳號,姓名,密碼,電話,信箱----------------------
# 姓名驗證,只接受中文
if not re.search(r"[\u4e00-\u9fa5]", request.POST['name']):
msg = "姓名輸入錯誤,只接受中文"
return HttpResponse(msg)
else:
# 若姓名格式皆正確,存入原始變數並把所有空白都移除
data['name'] = request.POST['name'].strip()
# 密碼驗證,密碼要包含一個大小寫英文,長度大於6,小於15字元
if re.search(r"\s", request.POST['password']):
msg = "密碼輸入錯誤,不包含空白,請返回上一頁"
return HttpResponse(msg)
elif not re.search(r"[A-Z]", request.POST['password']):
msg = "密碼輸入錯誤,需至少需一個大寫英文"
return HttpResponse(msg)
elif not re.search(r"[a-z]", request.POST['password']):
msg = "密碼輸入錯誤,需至少需一個小寫英文"
return HttpResponse(msg)
# 長度檢查
elif len(request.POST['password']) < 6:
msg = "密碼輸入錯誤,長度需大於6個字元"
return HttpResponse(msg)
elif len(request.POST['password']) > 15:
msg = "密碼輸入錯誤,長度需小於15個字元"
return HttpResponse(msg)
else:
# 若密碼格式皆正確,把所有空白都移除
data['password'] = request.POST['password'].strip()
# 手機驗證,只接受數字,不接受特殊字元,長度需 == 10
if not re.search(r"09\d+", request.POST['mobile_phine']):
msg = "手機號碼需為數字"
return HttpResponse(msg)
elif len(request.POST['mobile_phine']) > 10:
msg = "手機號碼為10個數字"
return HttpResponse(msg)
elif len(request.POST['mobile_phine']) < 10:
msg = "手機號碼為10個數字"
return HttpResponse(msg)
else:
# 若手機號碼格式皆正確,存入原始變數並把所有空白都移除
data['mobile_phine'] = request.POST['mobile_phine'].strip()
# 私人信箱驗證,格式 > <EMAIL>,長度2-6字元
if not re.search(r"[a-z0-9_\.-]+\@[\da-z\.-]+\.[a-z\.]{2,6}", request.POST['privacy_mail']):
msg = "私人信箱格式錯誤"
return HttpResponse(msg)
else:
# 若信箱格式皆正確,存入原始變數並把所有空白都移除
data['privacy_mail'] = request.POST['privacy_mail'].strip()
# 公司信箱驗證,格式 > <EMAIL>,長度2-6字元
if not re.search(r"[a-z0-9_\.-]+\@[\da-z\.-]+\.[a-z\.]{2,6}", request.POST['mail']):
msg = "公司信箱格式錯誤"
return HttpResponse(msg)
else:
# 若信箱格式皆正確,存入原始變數並把所有空白都移除
data['mail'] = request.POST['mail'].strip()
# 緊急聯絡人電話驗證,只接受數字,不接受特殊字元,長度需 == 10
if not re.search(r"09\d+", request.POST['emergency_contact_tel']):
msg = "緊急聯絡人電話號碼需為數字"
return HttpResponse(msg)
elif len(request.POST['emergency_contact_tel']) > 10:
msg = "緊急聯絡人電話號碼為10個數字"
return HttpResponse(msg)
elif len(request.POST['emergency_contact_tel']) < 10:
msg = "緊急聯絡人電話號碼為10個數字"
return HttpResponse(msg)
else:
# 若緊急連絡人電話格式皆正確,存入原始變數並把所有空白都移除
data['emergency_contact_tel'] = request.POST['emergency_contact_tel'].strip()
# 接收從all_staff_Revise的表單資料,轉換為要放回資料庫的list資料
account = request.POST['account']
name = data['name']
password = data['password']
privacy_mail = data['privacy_mail']
mail = data['mail']
mobile_phine = data['mobile_phine']
addr = request.POST['addr']
emergency_contact_name = request.POST['emergency_contact_name']
emergency_contact_tel = data['emergency_contact_tel']
status = request.POST['status']
category = request.POST['category']
subjection_depar = request.POST['subjection_depar']
job_title = request.POST['job_title']
depar_director = request.POST['depar_director']
level = request.POST['level']
note = request.POST['note']
nomal_hour_month = request.POST['nomal_hour_month']
total_hour_month = request.POST['total_hour_month']
official_leave = request.POST['official_leave']
annual_sick_leave = request.POST['annual_sick_leave']
overtime_hour = request.POST['overtime_hour']
# 執行修改
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
# 要加上where條件式 > 否則資料庫全改,多條件 > and
sql = "update staff_contrl set name='{}', password='{}', privacy_mail='{}', mail='{}', mobile_phine='{}', addr='{}', emergency_contact_name='{}', emergency_contact_tel='{}', status={}, category={}, subjection_depar='{}', job_title='{}', depar_director='{}', level='{}', note='{}', nomal_hour_month={}, total_hour_month={}, official_leave={}, annual_sick_leave={}, overtime_hour={} where account='{}'".format(name, password, privacy_mail, mail, mobile_phine, addr, emergency_contact_name, emergency_contact_tel, status, category, subjection_depar, job_title, depar_director, level, note, nomal_hour_month, total_hour_month, official_leave, annual_sick_leave, overtime_hour, account)
cursor.execute(sql)
db.commit()
return HttpResponse("<a href='/staff_ListAll/'>職員資料修改成功,回至職員列表</a>")
# ==================dep_all_staff_Revise===========================
def dep_all_staff_Revise(request, account=""):
# 判斷是否有登入
if request.session.get("login_name") != None:
# 進行權限檢查 > 部門檢查
if request.session['login_subjection_depar'] in (['總部', '財務部', '人力資源部', '業務部', '客戶服務部', '企劃部', '總務部']):
# 進行權限檢查 > 職務等級檢查
if request.session['level'] in ['adm', 'pre', 'spe', 'dir']:
# 以staff_listAll內的account作為sql搜尋條件
if account != "":
try:
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
sql = "select * from staff_contrl where account = '{}'".format(account)
cursor.execute(sql)
db.commit()
db.close()
cursor.close()
# 取出單個資料
dep_all_staff_Revise_Data = cursor.fetchone()
# return HttpResponse(dep_all_staff_Revisee_Data)
# 判斷取出資訊是否為空值
if dep_all_staff_Revise_Data != None:
# 呼叫def staff_Login_Data_Retrieve,basic.html顯示登入之資料
staff_Login_Data=staff_Login_Data_Retrieve(request)
return render(request, 'staff\\dep_all_staff_Revise.html', {'dep_all_staff_Revise_Data': dep_all_staff_Revise_Data, 'size':1,'staff_Login_Data':staff_Login_Data})
else:
return HttpResponse('資料庫無資料取出 <a href="/staff_index/" >回上一頁</a>')
except:
return HttpResponse('資料庫連線失敗,請重試 <a href="/staff_index/" >回上一頁</a>')
else:
return HttpResponse('資料庫未找到相關資料,請返回重新嘗試 <a href="/staff_index/" >回上一頁</a>')
else:
# 權限等級檢查未通過
return HttpResponse('權限等級不足')
else:
# 部門檢查未通過
return HttpResponse('不隸屬於部門範圍')
else:
return HttpResponse("尚未登入 <a href='/staff_Login/'>進行登入</a>")
def dep_all_staff_ReviseDB(request, account=""):
# 將回傳值包回字典{key:value}
data = {}
# ----------驗證區 >> 帳號,姓名,密碼,電話,信箱----------------------
# 姓名驗證,只接受中文
if not re.search(r"[\u4e00-\u9fa5]", request.POST['name']):
msg = "姓名輸入錯誤,只接受中文"
return HttpResponse(msg)
else:
# 若姓名格式皆正確,存入原始變數並把所有空白都移除
data['name'] = request.POST['name'].strip()
# 密碼驗證,密碼要包含一個大小寫英文,長度大於6,小於15字元
if re.search(r"\s", request.POST['password']):
msg = "密碼輸入錯誤,不包含空白,請返回上一頁"
return HttpResponse(msg)
elif not re.search(r"[A-Z]", request.POST['password']):
msg = "密碼輸入錯誤,需至少需一個大寫英文"
return HttpResponse(msg)
elif not re.search(r"[a-z]", request.POST['password']):
msg = "密碼輸入錯誤,需至少需一個小寫英文"
return HttpResponse(msg)
# 長度檢查
elif len(request.POST['password']) < 6:
msg = "密碼輸入錯誤,長度需大於6個字元"
return HttpResponse(msg)
elif len(request.POST['password']) > 15:
msg = "密碼輸入錯誤,長度需小於15個字元"
return HttpResponse(msg)
else:
# 若密碼格式皆正確,把所有空白都移除
data['password'] = request.POST['password'].strip()
# 手機驗證,只接受數字,不接受特殊字元,長度需 == 10
if not re.search(r"09\d+", request.POST['mobile_phine']):
msg = "手機號碼需為數字"
return HttpResponse(msg)
elif len(request.POST['mobile_phine']) > 10:
msg = "手機號碼為10個數字"
return HttpResponse(msg)
elif len(request.POST['mobile_phine']) < 10:
msg = "手機號碼為10個數字"
return HttpResponse(msg)
else:
# 若手機號碼格式皆正確,存入原始變數並把所有空白都移除
data['mobile_phine'] = request.POST['mobile_phine'].strip()
# 私人信箱驗證,格式 > [email protected],長度2-6字元
if not re.search(r"[a-z0-9_\.-]+\@[\da-z\.-]+\.[a-z\.]{2,6}", request.POST['privacy_mail']):
msg = "私人信箱格式錯誤"
return HttpResponse(msg)
else:
# 若信箱格式皆正確,存入原始變數並把所有空白都移除
data['privacy_mail'] = request.POST['privacy_mail'].strip()
# 公司信箱驗證,格式 > <EMAIL>@xxx.<EMAIL>,長度2-6字元
if not re.search(r"[a-z0-9_\.-]+\@[\da-z\.-]+\.[a-z\.]{2,6}", request.POST['mail']):
msg = "公司信箱格式錯誤"
return HttpResponse(msg)
else:
# 若信箱格式皆正確,存入原始變數並把所有空白都移除
data['mail'] = request.POST['mail'].strip()
# 緊急聯絡人電話驗證,只接受數字,不接受特殊字元,長度需 == 10
if not re.search(r"09\d+", request.POST['emergency_contact_tel']):
msg = "緊急聯絡人電話號碼需為數字"
return HttpResponse(msg)
elif len(request.POST['emergency_contact_tel']) > 10:
msg = "緊急聯絡人電話號碼為10個數字"
return HttpResponse(msg)
elif len(request.POST['emergency_contact_tel']) < 10:
msg = "緊急聯絡人電話號碼為10個數字"
return HttpResponse(msg)
else:
# 若緊急連絡人電話格式皆正確,存入原始變數並把所有空白都移除
data['emergency_contact_tel'] = request.POST['emergency_contact_tel'].strip()
# 接收從all_staff_Revise的表單資料,轉換為要放回資料庫的list資料
account = request.POST['account']
name = data['name']
password = data['password']
privacy_mail = data['privacy_mail']
mail = data['mail']
mobile_phine = data['mobile_phine']
addr = request.POST['addr']
emergency_contact_name = request.POST['emergency_contact_name']
emergency_contact_tel = data['emergency_contact_tel']
status = request.POST['status']
category = request.POST['category']
subjection_depar = request.POST['subjection_depar']
job_title = request.POST['job_title']
depar_director = request.POST['depar_director']
level = request.POST['level']
note = request.POST['note']
nomal_hour_month = request.POST['nomal_hour_month']
total_hour_month = request.POST['total_hour_month']
official_leave = request.POST['official_leave']
annual_sick_leave = request.POST['annual_sick_leave']
overtime_hour = request.POST['overtime_hour']
# 執行修改
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
# 要加上where條件式 > 否則資料庫全改,多條件 > and
sql = "update staff_contrl set name='{}', password='{}', privacy_mail='{}', mail='{}', mobile_phine='{}', addr='{}', emergency_contact_name='{}', emergency_contact_tel='{}', status={}, category={}, subjection_depar='{}', job_title='{}', depar_director='{}', level='{}', note='{}', nomal_hour_month={}, total_hour_month={}, official_leave={}, annual_sick_leave={}, overtime_hour={} where account='{}'".format(name, password, privacy_mail, mail, mobile_phine, addr, emergency_contact_name, emergency_contact_tel, status, category, subjection_depar, job_title, depar_director, level, note, nomal_hour_month, total_hour_month, official_leave, annual_sick_leave, overtime_hour, account)
cursor.execute(sql)
db.commit()
return HttpResponse("<a href='/dep_Staff_ListAll/'>部門職員資料修改成功,回至職員列表</a>")
# ========================staff_Delete=================================
def staff_Delete(request, account=""):
if request.session.get("login_name") != None:
# 進行權限檢查 > 部門檢查
if request.session['login_subjection_depar'] in (['總部', '財務部', '人力資源部', '業務部', '客戶服務部', '企劃部'] or ['admin_Office', 'Finance_Department', 'Human_Resources_Department', 'Sales_Department', 'Customer_Service_Department', 'Planning_Department']):
# 進行權限檢查 > 職務等級檢查
if request.session['level'] in ['adm', 'pre']:
# 以listall表單內的account作為sql搜尋條件
if account != "":
try:
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
sql = "delete from staff_contrl where account = '{}'".format(account)
cursor.execute(sql)
db.commit()
db.close()
cursor.close()
return HttpResponse("<a href='/staff_ListAll/'>刪除成功,回至列表</a>")
except:
return HttpResponse("<a href='/staff_ListAll/'>刪除失敗,請重試</a>")
else:
return render(request, "staff\\staff_ListAll.html")
else:
# 權限等級檢查未通過
return HttpResponse('權限等級不足')
else:
# 部門檢查未通過
return HttpResponse('不隸屬於部門範圍')
else:
return HttpResponse("尚未登入 <a href='/staff_Login/'>進行登入</a>")
# ========================all_staff_data_Export=================================
# 所有檔案匯出
def all_staff_data_Export(request):
try:
# 連結資料庫 > 呼叫sql_account內的db_conect
db = sql_account.connect()
cursor = db.cursor()
sql = "select * from staff_contrl"
cursor.execute(sql)
all_staff_data = cursor.fetchall()
# 取得sql欄位
field = cursor.description
# columns > sql列
columns = []
# 以長度透過迴圈加入變數中
for i in range(len(field)):
columns.append(field[i][0])
# 取得本機時間並格式化(excel不接受特殊字元),需要引入datetime
localTime = datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%p")
# 設定存取路徑,名稱為現在時間
result_PATH = r'D:\\python課程\\自我練習與實作\\rex_web\\{}.xlsx'.format('職員管理總表備份'+localTime)
# 以pandas寫入路徑
writer = pd.ExcelWriter( result_PATH , engine='xlsxwriter')
# 添加sql欄位
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
"""
E2E Tests for Generating Data. These tests make use of pre-created models that can
be downloaded from S3. We utilize a generation utility that will automatically determine
if we are using a simple model or a DF Batch model.
When adding a new model to test, the model filename should conform to:
description-MODE-TOK-major-minor.tar.gz
So for example:
safecast-batch-sp-0-14.tar.gz -- would be a model built on Safecast data in DF batch mode
using version 0.14.x of synthetics with a SentencePiece tokenizer
MODES:
- simple
- batch
TOK:
- char
- sp
"""
import pytest
import pandas as pd
from gretel_synthetics.generate_utils import DataFileGenerator
BATCH_MODELS = [
"https://gretel-public-website.s3-us-west-2.amazonaws.com/tests/synthetics/models/safecast-batch-sp-0-14.tar.gz"
]
@pytest.mark.parametrize(
"model_path", BATCH_MODELS
)
def test_generate_batch(model_path, tmp_path):
gen = DataFileGenerator(model_path)
out_file = str(tmp_path / "outdata")
fname = gen.generate(100, out_file)
count = 0
with open(fname) as fin:
for _ in fin:
count += 1
# account for the header
assert count-1 == 100
def scooter_val(line):
rec = line.split(", ")
if len(rec) == 6:
float(rec[5])
float(rec[4])
float(rec[3])
float(rec[2])
int(rec[0])
else:
raise Exception('record not 6 parts')
SIMPLE_MODELS = [
("https://gretel-public-website.s3-us-west-2.amazonaws.com/tests/synthetics/models/scooter-simple-sp-0-14.tar.gz", scooter_val), # noqa
("https://gretel-public-website.s3-us-west-2.amazonaws.com/tests/synthetics/models/scooter-simple-char-0-15.tar.gz", scooter_val) # noqa
]
@pytest.mark.parametrize(
"model_path,validator_fn", SIMPLE_MODELS
)
def test_generate_simple(model_path, validator_fn, tmp_path):
gen = DataFileGenerator(model_path)
out_file = str(tmp_path / "outdata")
fname = gen.generate(100, out_file, validator=validator_fn)
count = 0
with open(fname) as fin:
for _ in fin:
count += 1
assert count == 100
@pytest.mark.parametrize(
"model_path,seed", [
("https://gretel-public-website.s3-us-west-2.amazonaws.com/tests/synthetics/models/safecast-batch-sp-0-14.tar.gz", {"payload.service_handler": "i-051a2a353509414f0"}) # noqa
]
)
def test_generate_batch_smart_seed(model_path, seed, tmp_path):
gen = DataFileGenerator(model_path)
out_file = str(tmp_path / "outdata")
fname = gen.generate(100, out_file, seed=seed)
df = pd.read_csv(fname)
for _, row in df.iterrows():
row = dict(row)
for k, v in seed.items():
assert row[k] == v
@pytest.mark.parametrize(
"model_path,seed", [
("https://gretel-public-website.s3-us-west-2.amazonaws.com/tests/synthetics/models/safecast-batch-sp-0-14.tar.gz",
[{"payload.service_handler": "i-051a2a353509414f0"},
{"payload.service_handler": "i-051a2a353509414f1"},
{"payload.service_handler": "i-051a2a353509414f2"},
{"payload.service_handler": "i-051a2a353509414f3"}]) # noqa
]
)
def test_generate_batch_smart_seed_multi(model_path, seed, tmp_path):
gen = DataFileGenerator(model_path)
out_file = str(tmp_path / "outdata")
fname = gen.generate(100, out_file, seed=seed)
df = pd.read_csv(fname)
assert list(df["payload.service_handler"]) == list( | pd.DataFrame(seed) | pandas.DataFrame |
# Copyright 2016 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import inspyred
import numpy
from IProgress import ProgressBar, Bar, Percentage
from cameo.core.strain_design import StrainDesignMethod, StrainDesign, StrainDesignMethodResult
from cameo.core.utils import get_reaction_for
from cameo.flux_analysis.analysis import phenotypic_phase_plane, flux_variability_analysis
from cameo.flux_analysis.simulation import fba
from cameo.strain_design.heuristic.evolutionary.archives import ProductionStrainArchive
from cameo.strain_design.heuristic.evolutionary.objective_functions import biomass_product_coupled_min_yield, \
biomass_product_coupled_yield
from cameo.visualization.plotting import plotter
from cobra.core.model import Model
from cobra.exceptions import OptimizationError
from pandas import DataFrame
from marsi.cobra.flux_analysis.manipulation import knockout_metabolite
from marsi.cobra.strain_design.metaheuristic import MetaboliteKnockoutOptimization
from marsi.cobra.strain_design.target import MetaboliteKnockoutTarget
from marsi.utils import search_metabolites
logger = logging.getLogger(__name__)
__all__ = ["OptMet"]
class OptMet(StrainDesignMethod):
def __init__(self, model, evolutionary_algorithm=inspyred.ec.GA,
essential_metabolites=None, plot=True, *args, **kwargs):
super(OptMet, self).__init__(*args, **kwargs)
self._model = model
self._algorithm = evolutionary_algorithm
self._optimization_algorithm = None
self._essential_metabolites = essential_metabolites
self._plot = plot
self._manipulation_type = "metabolites"
@property
def manipulation_type(self):
return self._manipulation_type
@property
def plot(self):
return self._plot
@plot.setter
def plot(self, plot):
self._plot = plot
if self._optimization_algorithm is not None:
self._optimization_algorithm.plot = plot
def run(self, target=None, biomass=None, substrate=None, max_knockouts=5, variable_size=True,
simulation_method=fba, growth_coupled=False, max_evaluations=20000, population_size=200,
max_results=50, seed=None, **kwargs):
"""
Parameters
----------
target : str, Metabolite or Reaction
The design target
biomass : str, Metabolite or Reaction
The biomass definition in the model
substrate : str, Metabolite or Reaction
The main carbon source
max_knockouts : int
Max number of knockouts allowed
variable_size : bool
If true, all candidates have the same size. Otherwise the candidate size can be from 1 to max_knockouts.
simulation_method: function
Any method from cameo.flux_analysis.simulation or equivalent
growth_coupled : bool
If true will use the minimum flux rate to compute the fitness
max_evaluations : int
Number of evaluations before stop
population_size : int
Number of individuals in each generation
max_results : int
Max number of different designs to return if found.
kwargs : dict
Arguments for the simulation method.
seed : int
A seed for random.
Returns
-------
OptMetResult
"""
target = get_reaction_for(self._model, target)
biomass = get_reaction_for(self._model, biomass)
substrate = get_reaction_for(self._model, substrate)
if growth_coupled:
objective_function = biomass_product_coupled_min_yield(biomass, target, substrate)
else:
objective_function = biomass_product_coupled_yield(biomass, target, substrate)
optimization_algorithm = MetaboliteKnockoutOptimization(
model=self._model,
heuristic_method=self._algorithm,
essential_metabolites=self._essential_metabolites,
objective_function=objective_function,
plot=self.plot)
optimization_algorithm.simulation_kwargs = kwargs
optimization_algorithm.simulation_method = simulation_method
optimization_algorithm.archiver = ProductionStrainArchive()
result = optimization_algorithm.run(max_evaluations=max_evaluations,
pop_size=population_size,
max_size=max_knockouts,
variable_size=variable_size,
maximize=True,
max_archive_size=max_results,
seed=seed,
**kwargs)
kwargs.update(optimization_algorithm.simulation_kwargs)
return OptMetResult(self._model, result, objective_function, simulation_method, self._manipulation_type,
biomass, target, substrate, kwargs)
class OptMetResult(StrainDesignMethodResult):
__method_name__ = "OptMet"
def __init__(self, model, knockouts, objective_function, simulation_method, manipulation_type, biomass, target,
substrate, simulation_kwargs, *args, **kwargs):
assert isinstance(model, Model)
self._model = model
self._knockouts = knockouts
self._objective_function = objective_function
self._simulation_method = simulation_method
self._manipulation_type = manipulation_type
self._biomass = biomass
self._target = target
self._substrate = substrate
self._processed_solutions = None
self._simulation_kwargs = simulation_kwargs
super(OptMetResult, self).__init__(self.designs, *args, **kwargs)
@property
def designs(self):
if self._processed_solutions is None:
self._process_solutions()
return self._processed_solutions.designs.tolist()
def _repr_html_(self):
return """
<h3>OptMet Result</h3>
<ul>
<li>Simulation: %s<br/></li>
<li>Objective Function: %s<br/></li>
</ul>
%s
""" % (self._simulation_method.__name__,
self._objective_function._repr_latex_(),
self.data_frame._repr_html_())
@property
def data_frame(self):
if self._processed_solutions is None:
self._process_solutions()
return DataFrame(self._processed_solutions)
def _process_solutions(self):
processed_solutions = DataFrame(columns=["designs", "size", "fva_min", "fva_max",
"target_flux", "biomass_flux", "yield", "fitness"])
if len(self._knockouts) == 0:
logger.warning("No solutions found")
self._processed_solutions = processed_solutions
else:
progress = ProgressBar(maxval=len(self._knockouts), widgets=["Processing solutions: ", Bar(), Percentage()])
for i, solution in progress(enumerate(self._knockouts)):
try:
processed_solutions.loc[i] = process_metabolite_knockout_solution(
self._model, solution, self._simulation_method, self._simulation_kwargs,
self._biomass, self._target, self._substrate, self._objective_function)
except OptimizationError as e:
logger.error(e)
processed_solutions.loc[i] = [numpy.nan for _ in processed_solutions.columns]
self._processed_solutions = processed_solutions
def display_on_map(self, index=0, map_name=None, palette="YlGnBu"):
with self._model:
for ko in self.data_frame.loc[index, "metabolites"]:
knockout_metabolite(self._model, self._model.metabolites.get_by_id(ko))
fluxes = self._simulation_method(self._model, **self._simulation_kwargs)
fluxes.display_on_map(map_name=map_name, palette=palette)
def plot(self, index=0, grid=None, width=None, height=None, title=None, palette=None, **kwargs):
wt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass])
with self._model:
for ko in self.data_frame.loc[index, "metabolites"]:
knockout_metabolite(self._model, self._model.metabolites.get_by_id(ko))
mt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass])
if title is None:
title = "Production Envelope"
data_frame = | DataFrame(columns=["ub", "lb", "value", "strain"]) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = | pd.DataFrame(False, index=df.index, columns=df.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self):
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(
dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x=[1], y=[np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
df.ix["Hello Friend"] = df.ix[0]
self.assertIn("Hello Friend", df['A'].index)
self.assertIn("Hello Friend", df['B'].index)
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
self.assertIn("A+1", panel.ix[0].columns)
self.assertIn("A+1", panel.ix[1].columns)
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 1)
# correct setting
df.loc[(0, 0), 'z'] = 2
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 2)
# 10264
df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e'], index=range(5))
df['f'] = 0
df.f.values[3] = 1
# TODO(wesm): unused?
# y = df.iloc[np.arange(2, len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
expected.at[3, 'f'] = 2
tm.assert_frame_equal(df, expected)
expected = Series([0, 0, 0, 2, 0], name='f')
tm.assert_series_equal(df.f, expected)
def test_set_ix_out_of_bounds_axis_0(self):
df = pd.DataFrame(
randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_set_ix_out_of_bounds_axis_1(self):
df = pd.DataFrame(
randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_index_type_coercion(self):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
self.assertTrue(s.index.is_integer())
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(indexer(s2)[0.1] == 0)
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
for s in [Series(range(5), index=np.arange(5.))]:
self.assertTrue(s.index.is_floating())
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(idxr(s2)[0.1] == 0)
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_duplicate_ix_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_float_index_at_iat(self):
s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
self.assertEqual(s.at[el], item)
for i in range(len(s)):
self.assertEqual(s.iat[i], i + 1)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
r, i, s = list('bcd'), [1, 2, 3], slice(1, 4)
c, j, l = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[r, c] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.iloc[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[s, l] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[r, c] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = pd.DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
df2 = df.ix[[], :]
self.assertEqual(df2.loc[:, 'a'].dtype, np.int64)
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = pd.Series(index=range(x))
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_non_reducing_slice(self):
df = pd.DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
pd.Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
self.assertTrue(isinstance(df.loc[tslice_], DataFrame))
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], pd.Series(['A']), np.array(['A'])]
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])
expected = pd.IndexSlice[:, ['A']]
for subset in slices:
result = _non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_maybe_numeric_slice(self):
df = pd.DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})
result = _maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ['A']]
self.assertEqual(result, expected)
result = _maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ['A', 'C']]
result = _maybe_numeric_slice(df, [1])
expected = [1]
self.assertEqual(result, expected)
class TestSeriesNoneCoercion(tm.TestCase):
EXPECTED_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_setitem_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
class TestDataframeNoneCoercion(tm.TestCase):
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_loc(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[0, ['foo']] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_coercion_with_setitem_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe[start_dataframe['foo'] == start_dataframe['foo'][
0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_loc_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = | DataFrame({'foo': start_data}) | pandas.core.api.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = | pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']) | pandas.DataFrame |
#!/usr/bin/python
# coding: utf-8
import json
import pickle
import re
import jieba
import numpy as np
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import QuantileTransformer
def max_min_scaler(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
def trans(text):
text = list(jieba.cut(text))
text = " ".join([x for x in text if not x.isdigit()])
return text
def get_sub_col(col):
sub1_col, sub2_col = [sub for sub in col.split(" \n \n ")]
return pd.Series([sub1_col, sub2_col])
def get_sub_col_raw(col):
sub1_col, sub2_col = [sub for sub in col.split("\n\n")]
return pd.Series([sub1_col, sub2_col])
def get_length_features(df, stopwords, name):
def words_count(text, stopwords):
wordlist = [word for word in str(text).split() if word not in stopwords]
return len(wordlist)
# Word
df[f"col_len_{name}"] = df["col"].apply(lambda s: words_count(s, stopwords))
df[f"sub1_col_len_{name}"] = df["sub1_col"].apply(
lambda s: words_count(s, stopwords)
)
df[f"sub2_col_len_{name}"] = df["sub2_col"].apply(
lambda s: words_count(s, stopwords)
)
df[f"col_len_ratio_{name}"] = (
df[f"sub1_col_len_{name}"] / df[f"sub2_col_len_{name}"]
)
df[f"col_len_ratio2_{name}"] = (
df[f"sub2_col_len_{name}"] / df[f"col_len_{name}"]
)
df[f"col_len_c_{name}"] = df["col"].apply(len)
df[f"sub1_col_len_c_{name}"] = df["sub1_col"].apply(len)
df[f"sub2_col_len_c_{name}"] = df["sub2_col"].apply(len)
df[f"col_len_c_ratio_{name}"] = (
df[f"sub1_col_len_c_{name}"] / df[f"sub2_col_len_c_{name}"]
)
df[f"col_len_c_ratio2_{name}"] = (
df[f"sub2_col_len_c_{name}"] / df[f"col_len_c_{name}"]
)
df[f"sub1_col_len_{name}"] = df[[f"sub1_col_len_{name}"]].apply(
max_min_scaler
)
df[f"sub2_col_len_{name}"] = df[[f"sub2_col_len_{name}"]].apply(
max_min_scaler
)
df[f"sub1_col_len_c_{name}"] = df[[f"sub1_col_len_c_{name}"]].apply(
max_min_scaler
)
df[f"sub2_col_len_c_{name}"] = df[[f"sub2_col_len_c_{name}"]].apply(
max_min_scaler
)
useful_cols = [
f"sub1_col_len_{name}",
f"sub2_col_len_{name}",
f"col_len_ratio_{name}",
f"col_len_ratio2_{name}",
#
f"sub1_col_len_c_{name}",
f"sub2_col_len_c_{name}",
f"col_len_c_ratio_{name}",
f"col_len_c_ratio2_{name}",
]
return df[useful_cols]
def get_plantiff_features(df, name):
def f_plantiff_is_company(x):
r = re.search(r"原告(.*?)被告", x)
s = 0
if r:
plantiff = r.group(1)
if "法定代表人" in plantiff:
return 1
return s
reg = re.compile(r"原告")
df[f"sub1_col_num_plantiff_{name}"] = df["sub1_col_raw"].apply(
lambda s: len(reg.findall(s))
)
df[f"sub1_col_bool_plantiff_{name}"] = df["sub1_col_raw"].apply(
lambda s: f_plantiff_is_company(s)
)
useful_cols = [
f"sub1_col_num_plantiff_{name}",
f"sub1_col_bool_plantiff_{name}",
]
return df[useful_cols]
def get_defendant_features(df, name):
def f_defandent_noreply(text):
if any(
ss in text
for ss in ["未答辩", "拒不到庭", "未到庭", "未做答辩", "未应诉答辩", "未作出答辩", "未出庭"]
):
return 1
return 0
reg = re.compile(r"被告.*?法定代表人.*?。")
df[f"sub1_col_bool_defendant_{name}"] = df["sub1_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_bool_defendant_noreply_{name}"] = df["sub2_col_raw"].apply(
lambda s: f_defandent_noreply(s)
)
reg = re.compile(r"被告")
df[f"sub1_col_num_defendant_{name}"] = df["sub1_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [
f"sub1_col_bool_defendant_{name}",
f"sub1_col_num_defendant_{name}",
]
return df[useful_cols]
def get_guarantor_features(df, name):
reg = re.compile(r"担保")
df[f"sub2_col_bool_guarantor_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_guarantor_{name}"] = df["sub2_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [
f"sub2_col_bool_guarantor_{name}",
f"sub2_col_num_guarantor_{name}",
]
return df[useful_cols]
def get_guaranty_features(df, name):
reg = re.compile(r"抵押")
df[f"sub2_col_bool_guaranty_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_guaranty_{name}"] = df["sub2_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [
f"sub2_col_bool_guarantor_{name}",
f"sub2_col_num_guarantor_{name}",
]
return df[useful_cols]
def get_interest_features(df, name):
def do_lixi(text):
m_reg = re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)(%|分)")
mm = m_reg.search(text)
m2_reg = re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)毛")
mm2 = m2_reg.search(text)
m3_reg = re.compile(r"月(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)")
mm3 = m3_reg.search(text)
y_reg = re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)(%|分)")
ym = y_reg.search(text)
y2_reg = re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)毛")
ym2 = y2_reg.search(text)
y3_reg = re.compile(r"年(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)")
ym3 = y3_reg.search(text)
if mm:
return round(float(mm.group(2)) * 12, 2)
elif mm2:
return round(float(mm2.group(2)) * 10 * 12, 2)
elif mm3:
return round(float(mm3.group(1)) * 12, 2)
elif ym:
return float(ym.group(2))
elif ym2:
return round(float(ym2.group(2)) * 10, 2)
elif ym3:
return float(ym3.group(1))
else:
return 0
def do_lixi_c(text):
m_reg = re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)(%|分)")
mm = m_reg.search(text)
m2_reg = re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)毛")
mm2 = m2_reg.search(text)
m3_reg = re.compile(r"月(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)")
mm3 = m3_reg.search(text)
y_reg = re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)(%|分)")
ym = y_reg.search(text)
y2_reg = re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)毛")
ym2 = y2_reg.search(text)
y3_reg = re.compile(r"年(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)")
ym3 = y3_reg.search(text)
count = 0
if mm:
count = round(float(mm.group(2)) * 12, 2)
elif mm2:
count = round(float(mm2.group(2)) * 10 * 12, 2)
elif mm3:
count = round(float(mm3.group(1)) * 12, 2)
elif ym:
count = float(ym.group(2))
elif ym2:
count = round(float(ym2.group(2)) * 10, 2)
elif ym3:
count = float(ym3.group(1))
else:
count = 0
if count == 0:
return 0
elif count < 24:
return 1
elif count < 36:
return 2
else:
return 3
reg = re.compile(r"约定利息|约定月利息|年息|月息|利息|利率")
df[f"sub2_col_bool_interest_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_interest_{name}"] = df["sub2_col_raw"].apply(
lambda s: do_lixi(s)
)
df[f"sub2_col_num_interest_c_{name}"] = df["sub2_col_raw"].apply(
lambda s: do_lixi_c(s)
)
useful_cols = [
f"sub2_col_bool_interest_{name}",
f"sub2_col_num_interest_{name}",
f"sub2_col_num_interest_c_{name}",
]
return df[useful_cols]
def get_couple_features(df, name):
reg = re.compile(r"夫妻")
df[f"sub2_col_bool_couple_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_couple_{name}"] = df["sub2_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [
f"sub2_col_bool_couple_{name}",
f"sub2_col_num_couple_{name}",
]
return df[useful_cols]
def get_death_features(df, name):
reg = re.compile(r"死亡")
df[f"sub2_col_bool_death_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_death_{name}"] = df["sub2_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [f"sub2_col_bool_death_{name}", f"sub2_col_num_death_{name}"]
return df[useful_cols]
def do_basic_feature(df, stopwords, name):
feature_list = []
feature = get_length_features(df, stopwords, name)
feature_list.append(feature)
feature = get_plantiff_features(df, name)
feature_list.append(feature)
feature = get_defendant_features(df, name)
feature_list.append(feature)
feature = get_guarantor_features(df, name)
feature_list.append(feature)
feature = get_guaranty_features(df, name)
feature_list.append(feature)
feature = get_interest_features(df, name)
feature_list.append(feature)
feature = get_couple_features(df, name)
feature_list.append(feature)
index = feature_list[0].index
for feature_dataset in feature_list[1:]:
pd.testing.assert_index_equal(index, feature_dataset.index)
df = pd.concat(feature_list, axis=1)
return df
def do_tfidf_feature(df, tfidf):
n_components = 30
svd = TruncatedSVD(
n_components=n_components, algorithm="arpack", random_state=2019
)
col_tfidf = tfidf.transform(df["col"])
feature_names = tfidf.get_feature_names()
ret_df = pd.DataFrame(col_tfidf.toarray(), columns=feature_names)
return ret_df
col_svd = svd.fit_transform(col_tfidf)
best_fearures = [
feature_names[i] + "i" for i in svd.components_[0].argsort()[::-1]
]
ret_df = pd.DataFrame(col_svd, columns=best_fearures[:n_components])
return ret_df
def get_length_related_features_col2(df):
df_copy = df.copy()
df_copy["col2_len"] = df_copy["col2"].apply(len)
df_copy["col2_len_relative"] = (
df_copy["col2_len"] / df_copy["col2_len"].max()
)
df_copy["col2_title_len"] = df_copy["col2"].apply(
lambda s: len(s.split("\n\n")[0])
)
df_copy["col2_title_relative"] = (
df_copy["col2_title_len"] / df_copy["col2_title_len"].max()
)
df_copy["col2_content_len"] = (
df_copy["col2_len"] - df_copy["col2_title_len"]
)
df_copy["col2_content_len_relative"] = (
df_copy["col2_content_len"] / df_copy["col2_content_len"].max()
)
df_copy["col2_title_ratio"] = (
df_copy["col2_title_len"] / df_copy["col2_len"]
)
useful_cols = [
"col2_len_relative",
"col2_title_relative",
"col2_content_len_relative",
"col2_title_ratio",
]
return df_copy[useful_cols]
def get_col2_re_features(df):
old_cols = set(df.columns)
# 原告数量, 原告数据差
reg = re.compile(r"原告")
df["col2_num_accuser"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
# 原告中男性数量, 比例
reg = re.compile(r"原告.*?男.*?被告")
df["col2_num_male_accuser"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
df["col2_num_male_accuser_rate"] = (
df["col2_num_male_accuser"] / df["col2_num_accuser"]
)
# 原告中委托诉讼代理人数量,比例
reg = re.compile(r"原告.*?委托诉讼代理人.*?被告")
df["col2_num_company_accuser"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
df["col2_num_company_accuser_rate"] = (
df["col2_num_company_accuser"] / df["col2_num_accuser"]
)
# 被告数量, 原告数据差
reg = re.compile(r"被告")
df["col2_num_defendant"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
# 被告中男性数量, 比例
reg = re.compile(r"被告.*?男.*?。")
df["col2_num_male_defendant"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
df["col2_num_male_defendant_rate"] = (
df["col2_num_male_defendant"] / df["col2_num_defendant"]
)
df["col2_defendant_minus_num_accuser"] = (
df["col2_num_defendant"] - df["col2_num_accuser"]
).astype(int)
# 被告中法定代表人数量,比例
reg = re.compile(r"被告.*?法定代表人.*?。")
df["col2_num_company_defendant"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
df["col2_num_company_accuser_rate"] = (
df["col2_num_company_defendant"] / df["col2_num_defendant"]
)
# 担保
reg = re.compile(r"担保")
df["col2_num_danbao"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[1]))
)
# 标点符号数量
reg = re.compile(r"[。:,]")
df["col2_num_punctuation"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[1]))
)
# 词数量
df["col2_num_word"] = df["col2"].apply(
lambda s: len(list(jieba.cut(s.split("\n\n")[1])))
)
df["col2_num_word_ratio"] = df["col2_num_word"] / df["col2_num_word"].max()
df["col2_num_word_divide_length"] = df["col2_num_word"] / df["col2"].apply(
len
)
useful_cols = list(set(df.columns).difference(old_cols))
return df[useful_cols]
def do_feature_engineering(list_text):
df = pd.DataFrame(list_text, columns=["col2"])
feature_list = []
feature = get_length_related_features_col2(df)
feature_list.append(feature)
feature = get_col2_re_features(df)
feature_list.append(feature)
index = feature_list[0].index
for feature_dataset in feature_list[1:]:
pd.testing.assert_index_equal(index, feature_dataset.index)
data = pd.concat(feature_list, axis=1)
qt = QuantileTransformer(random_state=2019)
for col in data.columns:
data[col] = qt.fit_transform(data[[col]])
return data
def do_feature_engineering_bak(list_text, stopwords, name):
df = pd.DataFrame(list_text, columns=["col_raw"])
df["col"] = df["col_raw"].apply(trans)
sub_col_raw_df = df["col_raw"].apply(lambda s: get_sub_col_raw(s))
sub_col_raw_df.columns = ["sub1_col_raw", "sub2_col_raw"]
sub_col_df = df["col"].apply(lambda s: get_sub_col(s))
sub_col_df.columns = ["sub1_col", "sub2_col"]
df = pd.concat([df, sub_col_raw_df, sub_col_df], axis=1)
basic_df = do_basic_feature(df, stopwords, name)
return basic_df
def get_tfidf(train_path, test_path, train_flag=False):
se = set()
list_text_a = []
list_text_b = []
list_text_c = []
num_of_train = 0
with open(train_path, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
x = json.loads(line)
se.add(x["A"])
se.add(x["B"])
se.add(x["C"])
if i % 2 == 0:
list_text_a.append(x["A"])
list_text_b.append(x["B"])
list_text_c.append(x["C"])
else:
list_text_a.append(x["A"])
list_text_b.append(x["C"])
list_text_c.append(x["B"])
num_of_train += 1
with open(test_path, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
x = json.loads(line)
se.add(x["A"])
se.add(x["B"])
se.add(x["C"])
if i % 2 == 0:
list_text_a.append(x["A"])
list_text_b.append(x["B"])
list_text_c.append(x["C"])
else:
list_text_a.append(x["A"])
list_text_b.append(x["C"])
list_text_c.append(x["B"])
df = pd.DataFrame(se, columns=["col_raw"])
text_a_df = pd.DataFrame(list_text_a, columns=["col_raw"])
text_b_df = | pd.DataFrame(list_text_b, columns=["col_raw"]) | pandas.DataFrame |
import pandas as pd
import sqlite3
def load_coded_as_dicts(link_codes_file, twitter_user_codes_file):
"""
Loads two dictionaries
link: code_str
twitter_screen_name: code_str
"""
try:
link_codes_df = pd.read_csv(link_codes_file)
link_codes = pd.Series(link_codes_df.code_str.values, index=link_codes_df.key).to_dict()
except FileNotFoundError:
print('Could not load {} creating empty dict'.format(link_codes_file))
link_codes = {}
try:
twitter_user_codes_df = pd.read_csv(twitter_user_codes_file)
twitter_user_codes = pd.Series(twitter_user_codes_df.code_str.values, index=twitter_user_codes_df.key).to_dict()
except FileNotFoundError:
print('Could not load {} creating empty dict'.format(twitter_user_codes_file))
twitter_user_codes = {}
return link_codes, twitter_user_codes
def get_dataframes(dbname):
"""
Get rows from the db and convert to dataframes
"""
print('dbname,', dbname)
conn = sqlite3.connect(dbname)
select_results = (
"""
SELECT serp.*, link.*, scraper_searches_serps.scraper_search_id from serp INNER JOIN link on serp.id = link.serp_id
INNER JOIN scraper_searches_serps on serp.id = scraper_searches_serps.serp_id;
"""
)
select_serps = (
"""
SELECT * from serp;
"""
)
data = pd.read_sql_query(select_results, conn)
serp_df = pd.read_sql_query(select_serps, conn)
conn.close()
return data, serp_df
def process_domain(x):
if x.raw_domain == 'TweetCarousel':
if 'search' in x.link:
return 'SearchTweetCarousel'
else:
return 'UserTweetCarousel'
try:
# TODO: mention that this throws an error if theres 5 periods...
if x.raw_domain.count('.') > 1:
first_period = x.raw_domain.find('.')
stripped = x.raw_domain[first_period+1:]
return stripped
except TypeError:
pass
return x.raw_domain
def prep_data(data):
"""
Prep operation on the dataframe:
change nulls to false for Boolean variables
fill null links w/ empty string
make domain categorical variable
args:
data - dataframe with results
returns:
prepped dataframe
"""
data.fillna({
'isTweetCarousel': 0,
'isMapsPlaces': 0,
'isMapsLocations': 0,
'isNewsCarousel': 0,
}, inplace=True)
data.loc[data.link.isnull(), 'link'] = ''
tweet_mask = data.isTweetCarousel == True
news_mask = data.isNewsCarousel == True
maps_location_mask = data.isMapsLocations == True
maps_places_mask = data.isMapsPlaces == True
people_also_ask_mask = data.misc.astype(str).str.contains(';People also ask')
data.loc[people_also_ask_mask, 'domain'] = 'people also ask'
data.loc[tweet_mask, 'domain'] = 'TweetCarousel'
data.loc[news_mask, 'link'] = 'NewsCarousel'
data.loc[news_mask, 'domain'] = 'NewsCarousel'
#kp_mask = data.link_type == 'knowledge_panel'
# data.loc[kp_mask, 'link'] = 'KnowledgePanel'
# data.loc[kp_mask, 'domain'] = 'KnowledgePanel'
data.loc[maps_location_mask, 'link'] = 'MapsLocations'
data.loc[maps_location_mask, 'domain'] = 'MapsLocations'
data.loc[maps_places_mask, 'link'] = 'MapsPlaces'
data.loc[maps_places_mask, 'domain'] = 'MapsPlaces'
def process_each_domain(df):
return df.apply(process_domain, axis=1)
data = data.rename(index=str, columns={"domain": "raw_domain"})
data = data.assign(domain = process_each_domain)
data.raw_domain = data.raw_domain.astype('category')
data.domain = data.domain.astype('str')
data.domain = data.domain.astype('category')
return data
def set_or_concat(df, newdf):
if df is None:
ret = newdf
else:
ret = | pd.concat([df, newdf], sort=True) | pandas.concat |
from pathlib import Path
from typing import Callable, List, Optional, Dict
import cv2
import torch
import pandas as pd
from torch.utils.data import Dataset
from transforms import tensor_transform
N_CLASSES = 1103
DATA_ROOT = Path('./data')
def build_dataframe_from_folder(root: Path, class_map: Optional[Dict] = None):
if class_map is None:
new_class_map = {}
tmp = []
for subfolder in root.iterdir():
if class_map is None:
new_class_map[subfolder.name] = len(new_class_map)
class_id = new_class_map[subfolder.name]
else:
class_id = class_map[subfolder.name]
for image in subfolder.iterdir():
tmp.append((image, class_id))
df = | pd.DataFrame(tmp, columns=["image_path", "label"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[6]:
# import of standard python libraries
import numpy as np
import os
import time
import corner
import astropy.io.fits as pyfits
import sys
import argparse
#from tqdm import tqdm
import pandas as pd
import gc
#sys.path.insert(0, '../lenstronomy/lenstronomy/')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.ImSim.image_model import ImageModel
import lenstronomy.Util.param_util as param_util
import lenstronomy.Util.simulation_util as sim_util
import lenstronomy.Util.image_util as image_util
from lenstronomy.Util import kernel_util
from lenstronomy.Data.imaging_data import ImageData
from lenstronomy.Data.psf import PSF
# lenstronomy module import
import lenstronomy.Util.data_util as data_util
import lenstronomy.Util.util as util
import lenstronomy.Plots.plot_util as plot_util
from lenstronomy.SimulationAPI.sim_api import SimAPI
from lenstronomy.LightModel.Profiles.gaussian import GaussianEllipse
from lenstronomy.LensModel.lens_model import LensModel
# In[7]:
# simulation 2019-7-29 12:26 CT by <NAME>-<NAME>
from lenstronomy.SimulationAPI.sim_api import SimAPI
data = pd.read_csv('merged_agn_lc.csv')
data
def mag_to_flux(m, mz):
return 10**((mz - m)/2.5)
mag_g = mag_to_flux(data['mag_g'],27.5)
mag_r = mag_to_flux(data['mag_r'],27.5)
mag_i = mag_to_flux(data['mag_i'],27.5)
mag_z = mag_to_flux(data['mag_z'],27.5)
print(mag_g, mag_r, mag_i, mag_z)
# define lens configuration and cosmology (not for lens modelling)
z_lens = 0.6
z_source = 1.5
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.)
# data specifics
sigma_bkg = 10.0 # background noise per pixel (Gaussian)
exp_time = 100. # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
numPix = 100 # cutout pixel size
deltaPix = 0.263 # pixel size in arcsec (area per pixel = deltaPix**2)
fwhm = 1.0 # full width half max of PSF (only valid when psf_type='gaussian')
psf_type = 'GAUSSIAN' # 'gaussian', 'pixel', 'NONE'
kernel_size = 91
# initial input simulation
# generate the coordinate grid and image properties
kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg)
data_class = ImageData(**kwargs_data)
kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix, 'truncation': 3}
psf_class = PSF(**kwargs_psf)
# In[8]:
if __name__ == "__main__":
print("simulation started")
show_img = True
IsTrain = True #False
num_samples = 10
root_folder = "./test_sims_full_band/"
if not os.path.exists(root_folder):
os.mkdir(root_folder)
if IsTrain:
np.random.seed(12345)
file_path = root_folder+"train/"
else:
np.random.seed(54321)
file_path = root_folder+"test/"
if not os.path.exists(file_path):
os.mkdir(file_path)
sigma_bkg = 8.0 # background noise per pixel (Gaussian)
exp_time = 90. # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
numPix = 100 # cutout pixel size
deltaPix = 0.263 # pixel size in arcsec (area per pixel = deltaPix**2)
#fwhm = 1.0 # full width half max of PSF (only valid when psf_type='gaussian')
psf_type = 'GAUSSIAN' # 'gaussian', 'pixel', 'NONE'
kernel_size = 91
bands = ['g', 'r', 'i', 'z']
for i in range(num_samples):
if i % 200 ==0:
print("making progress")
full_band_images = np.zeros((numPix, numPix, 4))
mag_agn_seed = np.random.randint(0, 9)
# generate the coordinate grid and image properties
# lensing quantities
### mean of the lens parameters
gamma_ext_mu, theta_E_mu, gamma_mu, lens_center_mu, lens_e_mu = 0.015, 1.4, 2.0, 0.0, 0.0
gamma_ext_sigma, theta_E_sigma, gamma_sigma, lens_center_sigma, lens_e_sigma= 0.005, 0.4, 0.05, 0.2, 0.2
gamma_ext = np.maximum(np.random.normal(gamma_ext_mu, gamma_ext_sigma), 0)
psi_ext = np.random.uniform(0.0, 2* np.pi)
theta_E = np.maximum(np.random.normal(loc=theta_E_mu, scale=theta_E_sigma), 0.1)
gamma = np.maximum(np.random.normal(gamma_mu, gamma_sigma), 1.85)
lens_center_x = np.random.normal(lens_center_mu, lens_center_sigma)
lens_center_y = np.random.normal(lens_center_mu, lens_center_sigma)
lens_e1 = np.minimum(np.random.normal(lens_e_mu, lens_e_sigma), 0.9)
lens_e2 = np.minimum(np.random.normal(lens_e_mu, lens_e_sigma), 0.9)
kwargs_shear = {'gamma_ext': gamma_ext, 'psi_ext': psi_ext} # shear values to the source plane
kwargs_spemd = {'theta_E': theta_E, 'gamma': gamma, 'center_x': lens_center_x, 'center_y': lens_center_y, 'e1': lens_e1, 'e2': lens_e2} # parameters of the deflector lens model
# the lens model is a supperposition of an elliptical lens model with external shear
lens_model_list = ['SPEP', 'SHEAR_GAMMA_PSI']
kwargs_lens = [kwargs_spemd, kwargs_shear]
lens_model_class = LensModel(lens_model_list=lens_model_list)
# choice of source type
source_type = 'SERSIC' # 'SERSIC' or 'SHAPELETS'
source_position_mu = 0.0
source_position_sigma = 0.1
#sigma_source_position = 0.1
source_x = np.random.normal(source_position_mu, source_position_sigma)
source_y = np.random.normal(source_position_mu, source_position_sigma)
# Sersic parameters in the initial simulation
phi_G, q = 0.5, 0.8
sersic_source_e1, sersic_source_e2 = param_util.phi_q2_ellipticity(phi_G, q)
source_R_sersic_mu, source_R_sersic_sigma = 0.2, 0.1
source_n_sersic_mu, source_n_sersic_sigma = 1.0, 0.1
source_R_sersic = np.random.normal(source_R_sersic_mu, source_R_sersic_sigma)
source_n_sersic = np.random.normal(source_n_sersic_mu, source_n_sersic_sigma)
kwargs_sersic_source = {'amp': 1000, 'R_sersic': source_R_sersic, 'n_sersic': source_n_sersic, 'e1': sersic_source_e1, 'e2': sersic_source_e2, 'center_x': source_x, 'center_y': source_y}
#kwargs_else = {'sourcePos_x': source_x, 'sourcePos_y': source_y, 'quasar_amp': 400., 'gamma1_foreground': 0.0, 'gamma2_foreground':-0.0}
source_model_list = ['SERSIC_ELLIPSE']
kwargs_source = [kwargs_sersic_source]
source_model_class = LightModel(light_model_list=source_model_list)
# lens light model
phi_G, q = 0.9, 0.9
lens_light_e1, lens_light_e2 = param_util.phi_q2_ellipticity(phi_G, q)
lens_light_R_sersic_mu, lens_light_R_sersic_sigma = 0.3, 0.1
lens_light_n_sersic_mu, lens_light_n_sersic_sigma = 1.0, 0.1
lens_light_R_sersic = np.random.normal(lens_light_R_sersic_mu, lens_light_R_sersic_sigma)
lens_light_n_sersic = np.random.normal(lens_light_n_sersic_mu, lens_light_n_sersic_sigma)
kwargs_sersic_lens = {'amp': 1000, 'R_sersic': lens_light_R_sersic, 'n_sersic': lens_light_n_sersic , 'e1': lens_light_e1, 'e2': lens_light_e2, 'center_x': lens_center_x, 'center_y': lens_center_y}
lens_light_model_list = ['SERSIC_ELLIPSE']
kwargs_lens_light = [kwargs_sersic_lens]
lens_light_model_class = LightModel(light_model_list=lens_light_model_list)
for color_idx, band in enumerate(bands):
# lensing quantities
fwhm_list = [1.12, 0.96, 0.88, 0.84] # PSF median from arXiv:1708.01533
if color_idx ==0:
mag_agn = mag_g
elif color_idx ==1:
mag_agn = mag_r
elif color_idx ==2:
mag_agn = mag_i
elif color_idx ==3:
mag_agn = mag_z
kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg)
data_class = ImageData(**kwargs_data)
# generate the psf variables
#kernel_cut = kernel_util.cut_psf(kernel, kernel_size)
kwargs_psf = {'psf_type': psf_type, 'fwhm':fwhm_list[color_idx], 'pixel_size': deltaPix, 'truncation': 3}
#kwargs_psf = sim_util.psf_configure_simple(psf_type=psf_type, fwhm=fwhm, kernelsize=kernel_size, deltaPix=deltaPix, kernel=kernel)
psf_class = PSF(**kwargs_psf)
lensEquationSolver = LensEquationSolver(lens_model_class)
x_image, y_image = lensEquationSolver.findBrightImage(source_x, source_y, kwargs_lens, numImages=4,
min_distance=deltaPix, search_window=numPix * deltaPix)
mag = lens_model_class.magnification(x_image, y_image, kwargs=kwargs_lens)
kwargs_ps = [{'ra_image': x_image, 'dec_image': y_image,
'point_amp': np.abs(mag)*mag_agn[mag_agn_seed]}] # quasar point source position in the source plane and intrinsic brightness
point_source_list = ['LENSED_POSITION']
point_source_class = PointSource(point_source_type_list=point_source_list, fixed_magnification_list=[False])
kwargs_numerics = {'supersampling_factor': 1}
imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class,
lens_light_model_class, point_source_class, kwargs_numerics=kwargs_numerics)
# generate image
image_sim = imageModel.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
poisson = image_util.add_poisson(image_sim, exp_time=exp_time)
bkg = image_util.add_background(image_sim, sigma_bkd=sigma_bkg)
image_sim = image_sim + bkg + poisson
#image_sim = add_noise(image_sim, kwargs_band =DES_survey_noise)#image_sim# + bkg + poisson
data_class.update_data(image_sim)
kwargs_data['image_data'] = image_sim
kwargs_model = {'lens_model_list': lens_model_list,
'lens_light_model_list': lens_light_model_list,
'source_light_model_list': source_model_list,
'point_source_model_list': point_source_list
}
full_band_images[:, :, color_idx] += image_sim
##### saving files
#np.save(file_path + "lens" + "_" + "%07d" % (i+1) + ".npy", image_sim)
np.save(file_path + "full_band_lens" + "_" + "%07d" % (i+1) + ".npy", full_band_images)
##### saveing parameters
lens_dict = kwargs_lens[0]
shear_dict = kwargs_shear
df_lens = pd.DataFrame([lens_dict], columns=lens_dict.keys())
df_lens['gamma_ext'] = gamma_ext
df_lens['psi_ext'] = psi_ext
#df_shear = pd.DataFrame([shear_dict], columns=shear_dict.keys())
df_lens['name'] = "lens" + "_" + "%07d" % (i+1)
df_lens['source_R_sersic'] = source_R_sersic
df_lens['source_n_sersic'] = source_n_sersic
df_lens['sersic_source_e1'] = sersic_source_e1
df_lens['sersic_source_e2'] = sersic_source_e2
df_lens['source_x'] = source_x
df_lens['source_y'] = source_y
df_lens['lens_light_e1'] = lens_light_e1
df_lens['lens_light_e2'] = lens_light_e2
df_lens['lens_light_R_sersic'] = lens_light_R_sersic
df_lens['lens_light_n_sersic'] = lens_light_n_sersic
df_lens['ps_counts'] = x_image.size
# lens_light_R_sersic = np.random.normal(lens_light_R_sersic_mu, lens_light_R_sersic_sigma)
# lens_light_n_sersic
if i > 0:
df_lens_podcast = pd.concat([df_lens_podcast, df_lens], axis =0).reset_index(drop=True)
else:
df_lens_podcast = df_lens
if show_img == True:
plt.figure(figsize=(20,10))
plt.subplot(1, 4, 1)
plt.imshow(full_band_images[:, :, 0])
#plt.colorbar()
plt.title("g")
plt.subplot(1, 4, 2)
plt.imshow(full_band_images[:, :, 1])
plt.title("r")
plt.subplot(1, 4, 3)
plt.imshow(full_band_images[:, :, 2])
plt.title("i")
plt.subplot(1, 4, 4)
plt.imshow(full_band_images[:, :, 3])
plt.title("z")
plt.show()
print("x_image", x_image.size)
df_lens_podcast = df_lens_podcast[['name', 'theta_E', 'gamma', 'center_x', 'center_y', 'e1', 'e2', 'gamma_ext', 'psi_ext', 'source_x', 'source_y', 'source_n_sersic', 'source_R_sersic', 'sersic_source_e1', 'sersic_source_e2', 'lens_light_e1', 'lens_light_e2', 'lens_light_n_sersic', 'lens_light_R_sersic', 'ps_counts']]
df_lens_podcast.to_csv(file_path + "lens_info.csv")
# In[9]:
from lenstronomy.SimulationAPI.sim_api import SimAPI
data = | pd.read_csv('merged_agn_lc.csv') | pandas.read_csv |
from django.test import TestCase
from transform_layer.services.data_service import DataService, KEY_SERVICE, KEY_MEMBER, KEY_FAMILY
from transform_layer.calculations import CalculationDispatcher
from django.db import connections
import pandas
from pandas.testing import assert_frame_equal, assert_series_equal
import unittest
class HasDataTestCase(unittest.TestCase):
def test_has_data_empty_dataframe(self):
data = pandas.DataFrame()
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_nonempty_dataframe(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = pandas.DataFrame(d1)
self.assertTrue(CalculationDispatcher.has_data(data))
def test_has_data_no_services(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = {
KEY_SERVICE: pandas.DataFrame(),
KEY_MEMBER: pandas.DataFrame(d1),
KEY_FAMILY: pandas.DataFrame(d1)
}
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_no_members(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = {
KEY_SERVICE: pandas.DataFrame(d1),
KEY_MEMBER: pandas.DataFrame(),
KEY_FAMILY: pandas.DataFrame(d1)
}
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_full_dict(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = {
KEY_SERVICE: | pandas.DataFrame(d1) | pandas.DataFrame |
import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
from qutip import *
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import scipy
from .loading import load_settings
from .fitting import decay_gen
from ..tools.tools import metastable_calc_optimization, prob_objective_calc
class TransientResults:
def __init__(self, directory):
self.defect_list = []
self.exp_results = None
self.fit_params = None
self.ss_results = None
self.axes = None
self.fit = None
self.list = []
walk = os.walk(directory)
for path_info in walk:
path = path_info[0]
if os.path.exists(path + '/ss_results.csv'):
settings = load_settings(path + '/settings.csv')
ss_results = pd.read_csv(path + '/ss_results.csv', index_col=0)
ss_results['eps'] = settings.eps
ss_results['fd'] = settings.fd
ss_results.set_index(['eps', 'fd'], append=True, inplace=True)
self.list.append(ss_results)
if self.ss_results is None:
self.ss_results = ss_results
else:
self.ss_results = pd.concat([self.ss_results, ss_results], sort=True)
self.ss_results['a_exp'] = self.ss_results['a_op_re'] + 1j * self.ss_results['a_op_im']
self.ss_results['b_exp'] = self.ss_results['sm_op_re'] + 1j * self.ss_results['sm_op_im']
self.ss_results.sort_index(inplace=True)
def plot_transmission(self, axes=None, label=True):
if axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.set_xlabel(r'$f_d$ (GHz)')
axes.set_ylabel(r'$|\langle a \rangle|$')
eps_level_idx = self.ss_results.index.names.index('eps')
eps_array = self.ss_results.index.levels[eps_level_idx]
for eps in eps_array:
cut = self.ss_results.xs(eps, level='eps')
fd_array = cut.index.get_level_values('fd')
transmission_array = np.abs(cut['a_exp'])
index_array = cut.index.get_level_values('job_index')
axes.plot(fd_array, transmission_array, marker='o')
if label:
for index, fd, trans in zip(index_array, fd_array, transmission_array):
axes.annotate(index, (fd, trans))
def plot_slowdown(self, axes=None, eps_indices=None, label=True, legend=False, interpolate=True, kwargs=dict()):
if self.fit_params is None:
self.slowdown_calc()
if axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
self.fit_params_pruned = self.fit_params.drop(labels=self.defect_list, level='job_index')
self.fit_params_pruned.sort_index(level='fd', inplace=True)
axes.set_xlabel(r'$f_d$ (GHz)')
axes.set_ylabel(r'$T_s$ ($\mu$s)')
eps_level_idx = self.ss_results.index.names.index('eps')
eps_array = self.ss_results.index.levels[eps_level_idx]
if eps_indices is not None:
eps_array = eps_array[eps_indices]
for eps in eps_array:
cut = self.fit_params_pruned.xs(eps, level='eps')
cut = cut.reset_index()
cut = cut.drop_duplicates(subset='fd')
fd_array = cut['fd'].values
slowdown_array = np.abs(cut['Ts'])
if legend:
legend_label = eps
else:
legend_label = ''
if interpolate:
new_fd_array = np.linspace(fd_array[0], fd_array[-1], 201)
interp_func = interp1d(fd_array, slowdown_array, kind='cubic')
axes.plot(new_fd_array, interp_func(new_fd_array), label=legend_label, **kwargs)
else:
axes.plot(fd_array, slowdown_array, label=legend_label, **kwargs)
if label:
index_array = cut['job_index'].values
for index, fd, Ts in zip(index_array, fd_array, slowdown_array):
axes.annotate(index, (fd, Ts))
def plot_slowdown_exp(self, axes=None, threshold=10.0, powers=None, errors=True, legend_label=True, kwargs=dict()):
if axes is None:
if self.axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
self.axes = axes
else:
axes = self.axes
mask = self.exp_results['error'] < threshold
self.exp_results_pruned = self.exp_results[mask]
if powers is None:
powers = self.exp_results_pruned.index.levels[0]
for power in powers:
cut = self.exp_results_pruned.xs(power, level=0, drop_level=False)
if legend_label:
label = power
else:
label = ''
if errors:
axes.errorbar(cut.index.get_level_values('fd'), cut['Ts'].values, yerr=cut['error'], label=label,
**kwargs)
else:
axes.plot(cut.index.get_level_values('fd'), cut['Ts'].values, label=label, **kwargs)
def load_transients(self, directory):
self.transients = None
self.slowdown = None
walk = os.walk(directory)
for path_info in walk:
path = path_info[0]
if os.path.exists(path + '/results.csv'):
settings = load_settings(path + '/settings.csv')
results = pd.read_csv(path + '/results.csv', index_col=0)
results.index /= (2 * np.pi * 1000)
results['job_index'] = settings.job_index
results['eps'] = settings.eps
results['fd'] = settings.fd
results.set_index(['job_index', 'eps', 'fd'], append=True, inplace=True)
if self.transients is None:
self.transients = results
else:
self.transients = pd.concat([self.transients, results], sort=True)
self.transients = self.transients.reorder_levels(['job_index', 'eps', 'fd', 'times'])
self.transients.sort_index(inplace=True)
def plot_transient(self, job_index, y_quantity='a_op_re', axes=None):
if axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.set_xlabel(r'Time ($\mu$s)')
transient = self.transients.xs(job_index, level='job_index')
axes.plot(transient.index.get_level_values('times'), transient[y_quantity])
def plot_fit(self, job_index, axes=None):
if axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.set_xlabel(r'Time ($\mu$s)')
transient = self.transients.xs(job_index, level='job_index')
times = transient.index.get_level_values('times')
axes.plot(times, transient['a_op_re'])
if job_index not in self.fit_params.index.get_level_values('job_index'):
self.fit_transient(job_index)
a_op_re = self.ss_results['a_op_re'].xs(job_index, level='job_index').values[0]
decay_func = decay_gen(a_op_re)
popt = self.fit_params.xs(job_index, level='job_index').values[0, :]
axes.plot(times, decay_func(times, *popt))
def fit_transient(self, transient_index, t0=3.0):
package_index = self.ss_results.xs(transient_index, level=self.ss_results.index.names).index
i_signal = self.transients['a_op_re'].loc[transient_index]
t_end = i_signal.index.get_level_values('times')[-1]
i_sample = i_signal.loc[t0:t_end]
if i_sample.shape[0] > 1:
times = i_sample.index.get_level_values('times')
a_ss_re = self.ss_results.xs(transient_index, level=self.ss_results.index.names)['a_op_re'].values[0]
decay_fixed = decay_gen(a_ss_re)
d_est = (i_sample.iloc[-1] - i_sample.iloc[0]) / (times[-1] - times[0])
T_est = (a_ss_re - i_sample.iloc[0]) / d_est
A_est = -d_est * T_est * np.exp(times[0] / T_est)
popt, pcov = curve_fit(f=decay_fixed, xdata=times, ydata=i_sample.values, p0=[A_est, T_est])
fit_params = pd.DataFrame(np.array([popt]), index=package_index, columns=['A', 'Ts'])
if self.fit_params is None:
self.fit_params = fit_params
else:
self.fit_params = pd.concat([self.fit_params, fit_params], sort=True)
self.fit_params.sort_index(inplace=True, level=['eps', 'fd'])
def slowdown_calc(self):
transient_indices = self.transients.index.droplevel('times')
steadystate_indices = self.ss_results.index
for index in set(transient_indices).intersection(steadystate_indices):
try:
self.fit_transient(index)
except Exception as e:
print(e, index)
self.fit_params.sort_index(level=['eps', 'fd'], inplace=True)
def load_exp(self, path):
self.exp_results = pd.read_hdf(path)
def load_calibration(self, path):
self.calibration = pd.read_hdf(path)
def load_exp_spectra(self, path):
self.exp_spectra = pd.read_hdf(path)
def load_states(self, directory):
self.states = None
walk = os.walk(directory)
for path_info in walk:
path = path_info[0]
if os.path.exists(path + '/results.csv'):
try:
settings = load_settings(path + '/settings.csv')
steady_state = qload(path + '/steady_state')
checkpoint_state = qload(path + '/state_checkpoint')
checkpoint_state += checkpoint_state.dag()
checkpoint_state /= checkpoint_state.tr()
packaged_states = pd.DataFrame(np.array([[steady_state, checkpoint_state]]),
columns=['steady', 'checkpoint'])
packaged_states['job_index'] = settings.job_index
packaged_states['eps'] = settings.eps
packaged_states['fd'] = settings.fd
packaged_states.set_index(['job_index', 'eps', 'fd'], append=False, inplace=True)
if self.states is None:
self.states = packaged_states
else:
self.states = pd.concat([self.states, packaged_states], sort=True)
except Exception as e:
print(e, path)
self.states = self.states.reorder_levels(['job_index', 'eps', 'fd'])
self.states.sort_index(inplace=True, level=['eps', 'fd'])
def metastable_calc(self):
self.metastable_states = None
states = parallel_map(metastable_calc_task, self.states.index, task_args=(self.states,), num_cpus=10)
self.metastable_states = pd.concat(states)
self.metastable_states = self.metastable_states.reorder_levels(['job_index', 'eps', 'fd'])
self.metastable_states.sort_index(inplace=True, level=['eps', 'fd'])
def occupations_calc(self):
self.occupations = None
for idx in self.states.index:
print(idx)
try:
states = self.states.loc[idx]
rho_steady = states.steady
metastable_states = self.metastable_states.loc[idx]
rho_d = metastable_states.dim
rho_b = metastable_states.bright
res = scipy.optimize.minimize(prob_objective_calc, 0.0, method='Nelder-Mead',
args=(rho_d.ptrace(0), rho_b.ptrace(0), rho_steady.ptrace(0)))
p_d = res.x[0]
p_b = 1 - p_d
rho = p_d * rho_d + p_b * rho_b
distance = tracedist(rho_steady, rho)
packaged_occupations = pd.DataFrame(np.array([[p_d, p_b, distance]]), columns=['d', 'b', 'distance'])
packaged_occupations['job_index'] = idx[0]
packaged_occupations['eps'] = idx[1]
packaged_occupations['fd'] = idx[2]
packaged_occupations.set_index(['job_index', 'eps', 'fd'], append=False, inplace=True)
if self.occupations is None:
self.occupations = packaged_occupations
else:
self.occupations = pd.concat([self.occupations, packaged_occupations], sort=True)
except Exception as e:
print(e, idx)
self.occupations = self.occupations.reorder_levels(['job_index', 'eps', 'fd'])
self.occupations.sort_index(inplace=True, level=['eps', 'fd'])
def amplitudes_calc(self):
self.amplitudes = None
for idx in self.metastable_states.index:
print(idx)
try:
metastable_states = self.metastable_states.loc[idx]
rho_d = metastable_states.dim
rho_b = metastable_states.bright
c_levels = rho_b.dims[0][0]
t_levels = rho_b.dims[0][1]
a = tensor(destroy(c_levels), qeye(t_levels))
b = tensor(qeye(c_levels), destroy(t_levels))
amplitudes = np.array([[expect(rho_b, a), expect(rho_d, a), expect(rho_b, b), expect(rho_d, b)]])
amplitudes = pd.DataFrame(amplitudes, columns=['a_b', 'a_d', 'b_b', 'b_d'])
amplitudes['job_index'] = idx[0]
amplitudes['eps'] = idx[1]
amplitudes['fd'] = idx[2]
amplitudes.set_index(['job_index', 'eps', 'fd'], append=False, inplace=True)
if self.amplitudes is None:
self.amplitudes = amplitudes
else:
self.amplitudes = pd.concat([self.amplitudes, amplitudes], sort=True)
except Exception as e:
print(e, idx)
self.amplitudes = self.amplitudes.reorder_levels(['job_index', 'eps', 'fd'])
self.amplitudes.sort_index(inplace=True, level=['eps', 'fd'])
def rates_calc(self):
self.rates = None
mi1 = self.fit_params.index
mi2 = self.occupations.index
mi_common = mi1.intersection(mi2)
mi_common = mi_common.sort_values()
for idx in mi_common:
T_s = self.fit_params.loc[idx]['Ts']
p_b = self.occupations.loc[idx].b
p_d = self.occupations.loc[idx].d
rate_bd = p_d / T_s
rate_db = p_b / T_s
rates = np.array([[rate_bd, rate_db]])
rates = pd.DataFrame(rates, columns=['bd', 'db'])
rates['job_index'] = idx[0]
rates['eps'] = idx[1]
rates['fd'] = idx[2]
rates.set_index(['job_index', 'eps', 'fd'], append=False, inplace=True)
if self.rates is None:
self.rates = rates
else:
self.rates = | pd.concat([self.rates, rates], sort=True) | pandas.concat |
import pandas as pd
import numpy as np
import json
from bs4 import BeautifulSoup
import requests
import matplotlib.pyplot as plt
# save data
import pickle
def save(data,fileName):
with open(fileName+'.dat', 'wb') as f:
pickle.dump(data, f)
def load(fileName):
with open(fileName+'.dat', 'rb') as f:
new_data = pickle.load(f)
return new_data
pivoted_portfolio = pd.read_csv("crypto_prices_sub.csv")
# get covariance & returns of the coin - daily & for the period
daily_returns = pivoted_portfolio.pct_change()
# daily_returns_sum = daily_returns.sum()
# daily_returns_cumsum = np.exp(np.log1p(daily_returns).cumsum())-1
# daily_cor = daily_returns.corr()
# daily_cor_sub = daily_cor[daily_cor < 0.5]
# print(daily_cor_sub)
# figure = ff.create_annotated_heatmap(
# z=corrs.values,
# x=list(corrs.columns),
# y=list(corrs.index),
# annotation_text=corrs.round(2).values,
# showscale=True)
# plt.matshow(daily_cor)
# plt.show()
# print(daily_cor.info())
# print(daily_cor.loc['priceUsd','bitcoin'].sort_values(ascending=True))
# daily_returns_cumsum.to_csv('file_name.csv', index=False)
# daily_cor.to_csv('bitcoincorrelations.csv', index=False)
# pivoted_portfolio.to_csv('crypto_prices.csv', index=False)
# print(daily_returns_cumsum.iloc[-1].sort_values(ascending=False))
# period_returns = daily_returns.mean()*729
# daily_covariance = daily_returns.cov()
# period_covariance = daily_covariance*729
p_returns, p_volatility, p_sharpe_ratio, p_coin_weights=([] for i in range(4))
# portfolio combinations to probe
number_of_cryptoassets = len(daily_returns.columns)
number_crypto_portfolios = 10000
# for each portoflio, get returns, risk and weights
for a_crypto_portfolio in range(number_crypto_portfolios):
weights = np.random.random(number_of_cryptoassets)
weights /= np.sum(weights)
# print(weights)
cumsum = []
cumsum.append( 100.0)
for index, row in daily_returns.iterrows():
# print(index)
if index > 0:
returnsPort = np.dot(weights, row)
# print(returnsPort)
portValue = cumsum[index-1] + (cumsum[index-1] * returnsPort)
# print(cumsum[index-1],portValue)
cumsum.append(portValue)
# returns = np.dot(weights, period_returns)*100
returns = 100 * (cumsum[-1] - cumsum[0])/cumsum[0]
# print(returns)
# print(cumsum)
cumsum = | pd.Series(cumsum) | pandas.Series |
# Este script toma todas las cuentas de POS y crea un nuevo dataset de ellas.
import pandas as pd
import os
datasets = [file for file in os.listdir(os.path.join("..","data","processed")) if "POS" in file]
filepath_in = os.path.join("..","data","processed", datasets[0])
data = pd.DataFrame()
for data_file in datasets:
filepath_in = os.path.join("..","data","processed", data_file)
df = | pd.read_csv(filepath_in) | pandas.read_csv |
#!/usr/bin/env python
# coding=utf-8
"""
@version: 0.1
@author: li
@file: factor_revenue_quality.py
@time: 2019-01-28 11:33
"""
import gc, six
import sys
sys.path.append("../")
sys.path.append("../../")
sys.path.append("../../../")
import numpy as np
import pandas as pd
import json
from pandas.io.json import json_normalize
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorRevenueQuality(object):
"""
收益质量
"""
def __init__(self):
__str__ = 'factor_revenue_quality'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '收益质量'
self.description = '财务指标的二级指标, 收益质量'
@staticmethod
def NetNonOIToTP(tp_revenue_quanlity, revenue_quality, dependencies=['total_profit', 'non_operating_revenue', 'non_operating_expense']):
"""
:name: 营业外收支净额/利润总额
:desc: 营业外收支净额/利润总额
:unit:
:view_dimension: 0.01
"""
earning = tp_revenue_quanlity.loc[:, dependencies]
earning['NetNonOIToTP'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.non_operating_revenue.values +
earning.non_operating_expense.values)
/ earning.total_profit.values
)
earning = earning.drop(dependencies, axis=1)
revenue_quality = pd.merge(revenue_quality, earning, how='outer', on="security_code")
return revenue_quality
@staticmethod
def NetNonOIToTPTTM(ttm_revenue_quanlity, revenue_quality, dependencies=['total_profit', 'non_operating_revenue', 'non_operating_expense']):
"""
:name:营业外收支净额(TTM)/利润总额(TTM)
:desc: 营业外收支净额(TTM)/利润总额(TTM)
:unit:
:view_dimension: 0.01
"""
earning = ttm_revenue_quanlity.loc[:, dependencies]
earning['NetNonOIToTPTTM'] = np.where(
CalcTools.is_zero(earning.total_profit.values), 0,
(earning.non_operating_revenue.values +
earning.non_operating_expense.values)
/ earning.total_profit.values
)
earning = earning.drop(dependencies, axis=1)
revenue_quality = | pd.merge(revenue_quality, earning, how='outer', on="security_code") | pandas.merge |
# import packages
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import time
import datetime as dt
import json
from pathlib import Path
import pickle
import os, glob
import warnings
warnings.filterwarnings('ignore')
import feature_engineering_functions as fef
### Merge and Read files
path = '/Users/petrapinger/neuefische/MPO-Kickstarter/data'
def read_files():
""" Only create new csv if not already done"""
if not Path("./data/Kickstarter_merged.csv").exists():
# Read and merge .csv-files
# Read all .csv-files
all_files = glob.glob(os.path.join(path, "Kickstarter*.csv"))
df_from_each_file = (pd.read_csv(f, sep=',') for f in all_files)
# Merge .csv-files
df_merged = pd.concat(df_from_each_file, ignore_index=True)
df_merged.to_csv('./data/Kickstarter_merged.csv')
"""Otherwise just read in dataframe from merged .csv file"""
return pd.read_csv('./data/Kickstarter_merged.csv', index_col=0)
df = read_files()
# drop rows with suspended, live and canceled status
df = fef.drop_rows_value(df, 'state', 'suspended')
df = fef.drop_rows_value(df, 'state', 'live')
df = fef.drop_rows_value(df, 'state', 'canceled')
## splitting into X and y
y = df.state
X = df.drop('state', axis=1)
# splittin into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42, stratify = y)
## in order to exemplify how the predict will work.. we will save the y_train
print("Saving test data in the data folder")
X_test.to_csv("data/X_test.csv", index=False)
y_test.to_csv("data/y_test.csv", index=False)
X_train = pd.concat([X_train, y_train], axis=1)
X_train.reset_index(inplace = True)
X_test.reset_index(inplace= True)
#y_train.reset_index(inplace = True)
#y_test.reset_index(inplace = True)
## Create new columns
# Blurb Length
X_train = fef.add_blurb_len_w(X_train)
# Slug Length
X_train = fef.add_slug_len_w(X_train)
# Category name aka parent_name
X_train = fef.add_parent_id(X_train)
X_train = fef.add_category_id(X_train)
X_train = fef.add_category_name(X_train)
X_train = fef.fill_na(X_train, 'category_parent_id')
empty = []
for i in range(X_train.shape[0]):
if X_train["category_parent_id"][i] != 0:
empty.append(X_train["category_parent_id"][i])
else:
empty.append(X_train["category_id"][i])
X_train = fef.add_list_as_column(X_train, "filled_parent", empty)
X_train = fef.add_parent_name(X_train, "parent_name", "filled_parent", {1: "Art", 3: "Comics", 6: "Dance", 7: "Design", 9: "Fashion", 10: "Food",
11: "Film & Video", 12: "Games", 13: "Journalism", 14: "Music", 15: "Photography", 16: "Technology",
17: "Theater", 18: "Publishing", 26: "Crafts"})
#Month launched
X_train = fef.adding_month_launched(X_train)
# Duration
X_train = fef.adding_duration(X_train)
# Preparation
X_train = fef.adding_preparation(X_train)
# Reward Size = pledged/backer
X_train = fef.adding_pledged_per_backer(X_train)
# Coverting Goal to USD
X_train = fef.usd_convert_goal(X_train, 'goal', 'static_usd_rate')
X_train.pledged_per_backer = X_train.pledged_per_backer.fillna(0).astype("int")
X_train['state'] = np.where(X_train['state'] == 'successful', 1, 0)
## Drop rows
#Missings in 'blurb'
X_train = fef.drop_rows_missings(X_train, 'blurb')
#Duplicates
X_train = fef.drop_duplicates(X_train, 'id')
# drop rows with goals = 0
X_train = fef.drop_rows_value(X_train, 'goal', 0)
## drop columns
X_train = fef.drop_columns(X_train, ['backers_count', 'blurb', 'category', 'converted_pledged_amount',
'country', 'created_at', 'creator', 'currency', 'currency_symbol',
'currency_trailing_code', 'current_currency', 'deadline',
'disable_communication', 'friends', 'fx_rate', 'id',
'is_backing', 'is_starrable', 'is_starred', 'launched_at', 'location',
'name', 'permissions', 'photo', 'pledged', 'profile', 'slug',
'source_url', 'spotlight', 'state_changed_at',
'static_usd_rate', 'urls', 'usd_type', 'category_parent_id', 'category_id', 'category_name',
'filled_parent', 'staff_pick'])
## Drop Rows and only keep relevant categories
categories = ["Games", "Art", "Photography", "Film & Video", "Design", "Technology"]
X_train = X_train[X_train.parent_name.isin(categories)]
## get Dummies
# convert the categorical variable parent_name into dummy/indicator variables
X_train_dum2 = | pd.get_dummies(X_train.parent_name, prefix='parent_name') | pandas.get_dummies |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from scipy import signal
def cont2discrete(sys, dt, method='bilinear'):
discrete_sys = signal.cont2discrete(sys, dt, method=method)[:-1]
if len(discrete_sys) == 2:
discrete_sys = tuple(np.squeeze(b_or_a) for b_or_a in discrete_sys)
return discrete_sys
def log_bins(x, y, r=1.5):
"""Average data over logrithmically spaced intervals of fractional size r.
Nice for plotting data on log-log plots."""
if r <= 1:
raise ValueError('r ({}) must be greater than 1.'.format(r))
df = | pd.DataFrame({'x': x, 'y': y}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import shutil
import ipdb
import numpy as np
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
import time
from functools import wraps
import collections
import sklearn
import utils
# from utils import log_time_delta
from tqdm import tqdm
from dataloader import Dataset
import torch
from torch.autograd import Variable
from codecs import open
try:
import cPickle as pickle
except ImportError:
import pickle
data_path = '.data/imdb/'
class IMDBDataset(Dataset):
"""IMDB dataset."""
def __init__(self, data, labels, transform=None):
self.data = data
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x = self.data[idx]
label = self.labels[idx]
sample = {'text': x, 'labels': label}
if self.transform:
sample = self.transform(sample)
return sample
def dev_train_split(data_path=data_path, perc=0.20):
'''
Randomly select `perc` of training set, and save as validation set.
'''
moved = 0
train_dir = os.path.join(data_path,'aclImdb', 'train')
dev_dir = os.path.join(data_path,'aclImdb', 'dev')
# Lists of neg and pos file paths
for label in ['pos', 'neg']:
dir_from = os.path.join(data_path,'aclImdb', 'train', label)
files = os.listdir(dir_from)
random.shuffle(files)
num = int(len(files)*perc)
data = files[:num]
dir_to = os.path.join(data_path,'aclImdb', 'dev', label)
if not os.path.exists(dir_to):
os.makedirs(dir_to)
# Move
for f in data:
file_from = os.path.join(dir_from, f)
file_to = os.path.join(dir_to, f)
shutil.move(file_from, file_to)
moved += 1
print("Moved {} files from {} to {}".format(moved, train_dir, dev_dir))
def read_imdb(data_path=data_path, mode='train'):
'''
return: list of [review, label]
'''
data = []
for label in ['pos', 'neg']:
folder = os.path.join(data_path,'aclImdb', mode, label)
for file_name in os.listdir(folder):
with open(os.path.join(folder, file_name), 'rb') as f:
review = f.read().decode('utf-8').replace('\n', '').strip().lower()
data.append([review, 1 if label=='pos' else 0])
random.shuffle(data)
return data
def tokenize_imdb(data):
'''
return: list of [w1, w2,...,]
'''
def tokenizer(text):
return [tok.lower() for tok in text.split(' ')]
return [tokenizer(review) for review, _ in data]
def get_vocab_imdb(data):
'''
return: text.vocab.Vocabulary, each word appears at least 5 times.
'''
tokenized = tokenize_imdb(data)
counter = collections.Counter([tk for st in tokenized for tk in st])
return utils.Vocabulary(counter, min_freq=5)
def preprocess_imdb(data, vocab, max_len):
'''
truncate or pad sentence to max_len
return: X: list of [list of word index]
y: list of label
'''
def pad(x):
return x[:max_len] if len(x)>max_len else x+[0]*(max_len-len(x))
tokenize = tokenize_imdb(data)
X = np.array([pad(vocab.to_indices(x)) for x in tokenize])
y = np.array([tag for _, tag in data])
return X, y
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
class Alphabet(dict):
def __init__(self, start_feature_id = 1, alphabet_type="text"):
self.fid = start_feature_id
if alphabet_type=="text":
self.add('[PADDING]')
self.add('[UNK]')
self.add('[END]')
self.unknow_token = self.get('[UNK]')
self.end_token = self.get('[END]')
self.padding_token = self.get('[PADDING]')
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def addAll(self,words):
for word in words:
self.add(word)
def dump(self, fname,path="temp"):
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path,fname), "w",encoding="utf-8") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
class DottableDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
self.allowDotting()
def allowDotting(self, state=True):
if state:
self.__dict__ = self
else:
self.__dict__ = dict()
class BucketIterator(object):
def __init__(self,data,opt=None,batch_size=2,shuffle=True,test=False,position=False):
self.shuffle=shuffle
self.data=data
self.batch_size=batch_size
self.test=test
if opt is not None:
self.setup(opt)
def setup(self,opt):
self.batch_size=opt.batch_size
self.shuffle=opt.__dict__.get("shuffle",self.shuffle)
self.position=opt.__dict__.get("position",False)
if self.position:
self.padding_token = opt.alphabet.padding_token
def transform(self,data):
if torch.cuda.is_available():
data=data.reset_index()
text= Variable(torch.LongTensor(data.text).cuda())
label= Variable(torch.LongTensor([int(i) for i in data.label.tolist()]).cuda())
else:
data=data.reset_index()
text= Variable(torch.LongTensor(data.text))
label= Variable(torch.LongTensor(data.label.tolist()))
if self.position:
position_tensor = self.get_position(data.text)
return DottableDict({"text":(text,position_tensor),"label":label})
return DottableDict({"text":text,"label":label})
def get_position(self,inst_data):
inst_position = np.array([[pos_i+1 if w_i != self.padding_token else 0 for pos_i, w_i in enumerate(inst)] for inst in inst_data])
inst_position_tensor = Variable( torch.LongTensor(inst_position), volatile=self.test)
if torch.cuda.is_available():
inst_position_tensor=inst_position_tensor.cuda()
return inst_position_tensor
def __iter__(self):
if self.shuffle:
self.data = self.data.sample(frac=1).reset_index(drop=True)
batch_nums = int(len(self.data)/self.batch_size)
for i in range(batch_nums):
yield self.transform(self.data[i*self.batch_size:(i+1)*self.batch_size])
yield self.transform(self.data[-1*self.batch_size:])
@log_time_delta
def vectors_lookup(vectors,vocab,dim):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size=-1):
vectors = {}
# ipdb.set_trace()
with open(filename,encoding='utf-8') as f:
for line in tqdm(f):
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( 'embedding_size',embedding_size)
print( 'vocab_size in pretrained embedding',vocab_size)
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'words need to be found ',len(alphabet))
print( 'words found in wor2vec embedding ',len(vectors.keys()))
if embedding_size==-1:
embedding_size = len(vectors[list(vectors.keys())[0]])
return vectors,embedding_size
def getEmbeddingFile(opt):
#"glove" "w2v"
embedding_name = opt.__dict__.get("embedding","glove_6b_300")
if embedding_name.startswith("glove"):
return os.path.join( ".vector_cache","glove.6B.300d.txt")
else:
return opt.embedding_dir
# please refer to https://pypi.python.org/pypi/torchwordemb/0.0.7
return
@log_time_delta
def getSubVectors(opt,alphabet):
pickle_filename = "temp/"+opt.dataset+".vec"
if not os.path.exists(pickle_filename) or opt.use_glove:
glove_file = getEmbeddingFile(opt)
ipdb.set_trace()
wordset = list(sorted(set(alphabet.keys()))) # python 2.7
# ipdb.set_trace()
loaded_vectors,embedding_size = load_text_vec(wordset,glove_file)
vectors = vectors_lookup(loaded_vectors,alphabet,embedding_size)
if opt.debug:
if not os.path.exists("temp"):
os.mkdir("temp")
with open("temp/oov.txt","w","utf-8") as f:
unknown_set = set(alphabet.keys()) - set(loaded_vectors.keys())
f.write("\n".join( unknown_set))
if opt.debug:
pickle.dump(vectors,open(pickle_filename,"wb"))
return vectors
else:
print("load cache for SubVector")
return pickle.load(open(pickle_filename,"rb"))
def getDataSet(opt):
import dataloader
dataset= dataloader.getDataset(opt)
# files=[os.path.join(data_dir,data_name) for data_name in ['train.txt','test.txt','dev.txt']]
return dataset.getFormatedData()
#data_dir = os.path.join(".data/clean",opt.dataset)
#if not os.path.exists(data_dir):
# import dataloader
# dataset= dataloader.getDataset(opt)
# return dataset.getFormatedData()
#else:
# for root, dirs, files in os.walk(data_dir):
# for file in files:
# yield os.path.join(root,file)
# files=[os.path.join(data_dir,data_name) for data_name in ['train.txt','test.txt','dev.txt']]
import re
def clean(text):
# text="'tycoon.<br'"
for token in ["<br/>","<br>","<br"]:
text = re.sub(token," ",text)
text = re.sub("[\s+\.\!\/_,$%^*()\(\)<>+\"\[\]\-\?;:\'{}`]+|[+——!,。?、~@#¥%……&*()]+", " ",text)
# print("%s $$$$$ %s" %(pre,text))
return text.lower().split()
@log_time_delta
def get_clean_datas(opt):
pickle_filename = "temp/"+opt.dataset+".data"
if not os.path.exists(pickle_filename) or opt.debug:
datas = []
for filename in getDataSet(opt):
df = pd.read_csv(filename,header = None,sep="\t",names=["text","label"]).fillna('0')
# df["text"]= df["text"].apply(clean).str.lower().str.split() #replace("[\",:#]"," ")
df["text"]= df["text"].apply(clean)
datas.append(df)
if opt.debug:
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump(datas,open(pickle_filename,"wb"))
return datas
else:
print("load cache for data")
return pickle.load(open(pickle_filename,"rb"))
def load_vocab_from_bert(bert_base):
bert_vocab_dir = os.path.join(bert_base,"vocab.txt")
alphabet = Alphabet(start_feature_id = 0,alphabet_type="bert")
from pytorch_pretrained_bert import BertTokenizer
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained(bert_vocab_dir)
for index,word in tokenizer.ids_to_tokens.items():
alphabet.add(word)
return alphabet,tokenizer
def process_with_bert(text,tokenizer,max_seq_len) :
tokens =tokenizer.convert_tokens_to_ids( tokenizer.tokenize(" ".join(text[:max_seq_len])))
return tokens[:max_seq_len] + [0] *int(max_seq_len-len(tokens))
def loadData(opt,embedding=True):
if opt.no_load_embedding==False:
return loadDataWithoutEmbedding(opt)
datas =get_clean_datas(opt)
alphabet = Alphabet(start_feature_id = 0)
label_alphabet= Alphabet(start_feature_id = 0,alphabet_type="label")
df=pd.concat(datas)
df.to_csv("demo.text",sep="\t",index=False)
label_set = set(df["label"])
label_alphabet.addAll(label_set)
opt.label_size= len(label_alphabet)
if opt.max_seq_len==-1:
opt.max_seq_len = df.apply(lambda row: row["text"].__len__(),axis=1).max()
if "bert" not in opt.model.lower():
word_set=set()
[word_set.add(word) for l in df["text"] if l is not None for word in l ]
word_set = sorted(list(word_set))
# ipdb.set_trace()
# from functools import reduce
# word_set=set(reduce(lambda x,y :x+y,df["text"]))
alphabet.addAll(word_set)
vectors = getSubVectors(opt,alphabet)
opt.vocab_size= len(alphabet)
# opt.label_size= len(label_alphabet)
opt.embedding_dim= vectors.shape[-1]
opt.embeddings = torch.FloatTensor(vectors)
else:
alphabet,tokenizer = load_vocab_from_bert(opt.bert_dir)
opt.alphabet=alphabet
# alphabet.dump(opt.dataset+".alphabet")
for data in datas:
if "bert" not in opt.model.lower():
data["text"]= data["text"].apply(lambda text: [alphabet.get(word,alphabet.unknow_token) for word in text[:opt.max_seq_len]] + [alphabet.padding_token] *int(opt.max_seq_len-len(text)) )
else :
data["text"]= data["text"].apply(process_with_bert,tokenizer=tokenizer,max_seq_len = opt.max_seq_len)
data["label"]=data["label"].apply(lambda text: label_alphabet.get(text))
return map(lambda x:BucketIterator(x,opt),datas)#map(BucketIterator,datas) #
def loadDataWithoutEmbedding(opt):
datas=[]
for filename in getDataSet(opt):
df = | pd.read_csv(filename,header = None,sep="\t",names=["text","label"]) | pandas.read_csv |
import pandas as pd
import numpy as np
import copy
import re
import string
# Note: this requires nltk.download() first as described in the README.
# from nltk.book import *
from nltk.corpus import stopwords
from nltk.tokenize import TreebankWordTokenizer
from collections import Counter, OrderedDict
from sklearn.model_selection import train_test_split
from app.lib.utils.jsonl import jsonl_to_df
"""
Sources:
Loading JSONL: https://medium.com/@galea/how-to-love-jsonl-using-json-line-format-in-your-workflow-b6884f65175b
NLTK Reference: http://www.nltk.org/book/ch01.html
NLTK word counter reference: https://www.strehle.de/tim/weblog/archives/2015/09/03/1569
"""
class WordTokenizer(object):
def __init__(self):
pass
def _user_grouper(self, filename):
# For each unique user, join all tweets into one tweet row in the new df.
db_cols = ['search_query', 'id_str', 'full_text', 'created_at', 'favorite_count', 'username', 'user_description']
tweets_df = jsonl_to_df(filename, db_cols)
users = list(tweets_df['username'].unique())
tweets_by_user_df = pd.DataFrame(columns=['username', 'user_description', 'tweets'])
# Iterate through all users.
for i, user in enumerate(users):
trunc_df = tweets_df[tweets_df['username'] == user]
user_description = trunc_df['user_description'].tolist()[0]
string = ' '.join(trunc_df["full_text"])
tweets_by_user_df = tweets_by_user_df.append({'username': user, 'user_description': user_description, 'tweets': string}, ignore_index=True)
# Return the data frame with one row per user, tweets concatenated into one string.
return tweets_by_user_df
def _parse_doc(self, text):
text = text.lower()
text = re.sub(r'&(.)+', "", text) # no & references
text = re.sub(r'pct', 'percent', text) # replace pct abreviation
text = re.sub(r"[^\w\d'\s]+", '', text) # no punct except single quote
text = re.sub(r'[^\x00-\x7f]', r'', text) # no non-ASCII strings
# Omit words that are all digits
if text.isdigit():
text = ""
# # Get rid of escape codes
# for code in codelist:
# text = re.sub(code, ' ', text)
# Replace multiple spacess with one space
text = re.sub('\s+', ' ', text)
return text
def _parse_words(self, text):
# split document into individual words
tokens = text.split()
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
# remove punctuation from each word
tokens = [re_punc.sub('', w) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out tokens that are one or two characters long
tokens = [word for word in tokens if len(word) > 2]
# filter out tokens that are more than twenty characters long
tokens = [word for word in tokens if len(word) < 21]
# recreate the document string from parsed words
text = ''
for token in tokens:
text = text + ' ' + token
return tokens, text
def _get_train_test_data(self, filename, only_known=True):
# Get df, and list of all users' tweets.
tweets_by_user_df = self._user_grouper(filename)
# Get user classes
db_cols = ['class', 'user_description', 'username']
user_class_df = jsonl_to_df('users', db_cols)
user_class_df = user_class_df[['username', 'class']]
tagged_df = pd.merge(tweets_by_user_df, user_class_df, left_on='username', right_on='username')
if only_known:
tagged_df = tagged_df[tagged_df['class'] != 'U']
train, test = train_test_split(tagged_df, test_size=0.2, random_state=60)
train_target = train['class']
test_target = test['class']
return train, test, train_target, test_target
def _get_all_classes(self, filename, sample_ratio=1):
# Get df, and list of all users' tweets.
tweets_by_user_df = self._user_grouper(filename)
# Get user classes
db_cols = ['class', 'user_description', 'username']
user_class_df = jsonl_to_df('users', db_cols)
user_class_df = user_class_df[['username', 'class']]
tagged_df = | pd.merge(tweets_by_user_df, user_class_df, left_on='username', right_on='username') | pandas.merge |
import pandas as pd
pd.options.display.max_rows=9999
pd.options.display.max_columns=15
| pd.set_option("display.max_columns", 100) | pandas.set_option |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.