repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Wonjuseo/Project101 | 2/2-3. FrozenLake3.py | 1 | 2023 | import gym
import numpy as np
import matplotlib.pyplot as plt
from gym.envs.registration import register
import random as pr
import tensorflow as tf
def rargmax(vector):
# random argmax
m = np.max(vector)
indices = np.nonzero(vector == m)[0]
return pr.choice(indices)
# Reward Update Q
# Algorithm
# For each s,a initialize table entry Q(s,a)<-0
# Observe current stat s
# Do foever:
# select an action a and execute it
# receive immediate reward
# observe the new state
# update the table entry for Q(s,a)
# update the state
# Non-deterministic environment
env = gym.make('FrozenLake-v0')
# Intialize table with all zeros
Q = np.zeros([env.observation_space.n,env.action_space.n])
# Discount factor
dis = .99
# Learning_rate
learning_rate = 0.85
# Set learning parameters
num_episodes = 2000
# Create lists to contain total rewards and steps per episode
rList = []
for i in range(num_episodes):
# Reset environment and get first new observation
# Intialize state
state = env.reset()
rAll = 0
done = False
# Decaying E-greedy
e = 1. /((i/100)+1)
while not done:
# Determine actions
# Exploit and Exploration : Decaying E-greedy
if pr.random() < e:
action = env.action_space.sample()
else:
action = rargmax(Q[state,:])
# Add Random noise
# action = np.argmax(Q[state,:]+np.random.randn(1,env.action_space.n)/(i+1))
# Get new state and reward from environment
new_state, reward, done, _ = env.step(action)
# Update Q-table with new knowledge using decay rate and learning rate
Q[state,action] = (1-learning_rate)*Q[state,action] +learning_rate*(reward + dis * np.max(Q[new_state,:]))
# Update the state
state = new_state
# reward every episode
rAll += reward
rList.append(rAll)
# Show the result
print("Success rate:"+str(sum(rList)/num_episodes))
# Show the table
print(Q)
plt.bar(range(len(rList)),rList,color="blue")
plt.show()
| apache-2.0 |
Vimos/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
clancia/TASEP | SSSU coalescence times/SSSUforLinReg/LinRegCTvsSize.py | 1 | 2287 | import matplotlib.pyplot as plt
import numpy as np
from glob import glob
from scipy import stats
p = 0.5
e = 0.1
qth = [25,50,75,90]
nomefile = './N*' + '_B*' + '_p' + str(p) + '_e' + str(e) +'.npy'
nomefile = glob(nomefile)
data = []
N = []
medie = []
mediane = []
massimi = []
perc = []
nomefile.sort(key=lambda x:int(x.split('_')[1][1:]))
'''
questo sort e' la ostia, Carlo tu avevi dimenticato l'int() e non
funzionava!
'''
for f in nomefile:
N.append(2*int(f.split('_')[1][1:]))
data.append(np.load(f))
medie.append(np.mean(data[-1]))
massimi.append(max(data[-1]))
mediane.append(np.median(data[-1]))
perc.append(np.percentile(data[-1], qth))
perc = np.array(perc)
perc= perc.T
xi = np.zeros(len(N))
for i in range(len(N)):
xi[i] = N[i] - 10
Eslope, Eintercept, Er_value, Ep_value, Estd_err = stats.linregress(xi, medie)
Mslope, Mintercept, Mr_value, Mp_value, Mstd_err = stats.linregress(xi, massimi)
MEDslope, MEDintercept, MEDr_value, MEDp_value, Mstd_err = stats.linregress(xi, mediane)
fig, (ax, bx, cx) = plt.subplots(ncols=3)
fig.suptitle('Coalescence Times for Parallel TASEP p0.5 e.1', fontsize=18)
Eline = Eslope*xi + Eintercept
MEDline = MEDslope*xi + MEDintercept
Mline = Mslope*xi + Mintercept
ax.plot(N,Eline,'r-',N,medie,'o')
ax.set_ylabel('Mean of Coalescence Times')
ax.set_xlabel('Number of Sites of the Ring')
ax.text(60, 300, 'Slope = %f \nIntercept = %f' %(Eslope, Eintercept), fontsize=16)
bx.plot(N,MEDline,'r-',N,mediane,'x')
bx.set_ylabel('Median of Coalescence Times')
bx.set_xlabel('Number of Sites of the Ring')
bx.text(60, 300, 'Slope = %f \nIntercept = %f' %(MEDslope, MEDintercept), fontsize=16)
cx.plot(N,Mline,'r-',N,massimi,'g^')
cx.text(65, 600000, 'Slope = %f \nIntercept = %f' %(Mslope, Mintercept), fontsize=16)
cx.set_ylabel('Max of Coalescence Times')
cx.set_xlabel('Number of Sites of the Ring')
plt.show()
fig = plt.figure()
for row, lab in zip(perc[::-1],qth[::-1]):
plt.plot(N,row, label=lab)
'''
ho usato la extended slice syntax solo per avere la legenda in ordine decrescente
'''
plt.legend(loc=2, title= 'Percentiles')
plt.ylabel('Values of Percentiles of Coalescence Times')
plt.xlabel('Number of Sites of the Ring')
plt.title('Percentiles of Coealescence Times of Parallel TASEP p0.5 e0.1')
plt.show(fig) | gpl-2.0 |
a301-teaching/a301_code | notebooks/python/heating_rate.py | 1 | 7077 |
# coding: utf-8
# # Atmospheric heating rate (Cloudsat only) -- modified Nov. 27 --
# This notebook plots vertical cross sections through a cyclone of $Q_R$ the longwave and
# shortwave heating rate in K/hour. It uses the level 2B product FLXHR, which is described
# at [the cloudsat website](http://www.cloudsat.cira.colostate.edu/data-products/level-2b/2b-flxhr?term=0) as follows:
#
# "This algorithm derives estimates of broadband fluxes and heating rates consistent with liquid and ice water content estimates from the CloudSat Profiling Radar (CPR). For each radar profile, a broadband radiative transfer model is used to calculate upwelling and downwelling longwave and shortwave fluxes at each CPR range gate from the surface to the lower stratosphere. Profiles of cloud ice and liquid water content and cloud particle effective radii are defined based on the CloudSat 2B-LWC and 2B-IWC products while precipitation properties are defined using the CloudSat 2C-PRECIP-COLUMN dataset. Ancillary atmospheric state variables are interpolated from ECMWF analyses and surface albedos are assigned based on seasonally-varying maps of surface reflectance properties in combination with daily snow and sea ice cover maps from passive microwave instruments. Equivalent clear sky radiative flux profiles are generated by removing all clouds and repeating the calculations. Corresponding profiles of atmospheric heating are inferred from the vertical derivative of these fluxes."
# #### 1. Reading in the shortwave and longwave radiative heating rates
#
# Format is described on the [Cloudsat web site](http://www.cloudsat.cira.colostate.edu/data-products/level-2b/2b-flxhr?term=31)
#
# * Units: K/hr
#
# * Variable name: Qr
#
# * Shape: Qr[2, 37082, 125] where Qr[0,37082,125] is the shortwave heating rate
# and Qr[1,37082,125] is the longwave heating rate. The other two dimensions are the
# same as the radar reflectivity: there are 37082 radar measurements in an orbit, binned
# into 125 vertical height bins
# In[1]:
import h5py
import numpy as np
import datetime as dt
from datetime import timezone as tz
import matplotlib
from matplotlib import pyplot as plt
import pyproj
from numpy import ma
from a301utils.a301_readfile import download
from a301lib.cloudsat import get_geo
from IPython.display import Image, display
flx_file='2008082060027_10105_CS_2B-FLXHR_GRANULE_P2_R04_E02.h5'
download(flx_file)
meters2km=1.e3
#
# pass the correct field name to get_geo
#
if flx_file.find('FLXHR_GRANULE') > 0:
fieldname='2B-FLXHR'
elif flx_file.find('FLXHR-LIDAR') > 0:
fieldname='2B-FLXHR-LIDAR'
else:
raise Exception('{} not a FLXHR file'.format(args.flxhr_file))
lats,lons,date_times,prof_times,dem_elevation=get_geo(flx_file,fieldname)
with h5py.File(flx_file,'r') as flxin:
#print(list(flxin['2B-FLXHR']['Swath Attributes'].keys()))
flxvals=flxin['2B-FLXHR']['Data Fields']['QR'][...]
height=flxin['2B-FLXHR']['Geolocation Fields']['Height'][...]
factor=flxin['2B-FLXHR']['Swath Attributes']['QR.factor'][0][0]
missing=flxin['2B-FLXHR']['Swath Attributes']['QR.missing'][0][0]
shortwave_hr=flxvals[0,:,:]
longwave_hr=flxvals[1,:,:]
print('\nmissing values given by={}\nfactor={}'.format(missing,factor))
# #### 2. Make a masked array of the reflectivity so that pcolormesh will plot it
#
# note that I need to find the missing data before I divide by factor=100 to
# convert from int16 to float
# In[2]:
hit=( longwave_hr == missing)
print('There are {} longwave_hr missing values'.format(np.sum(hit)))
long_wave_hr=np.ma.masked_where(hit,longwave_hr)
hit=(shortwave_hr == missing)
print('There are {} shortwave_hr missing values'.format(np.sum(hit)))
short_wave_hr=np.ma.masked_where(hit,shortwave_hr)
longwave_hr = longwave_hr/factor
shortwave_hr = shortwave_hr/factor
# #### 3. Find the part of the orbing that corresponds to the 3 minutes containing the storm
#
# You need to enter the start_hour and start_minute for the start time of your cyclone in the granule
# In[3]:
first_time=date_times[0]
print('orbit start: {}'.format(first_time))
start_hour=6
start_minute=45
storm_start=starttime=dt.datetime(first_time.year,first_time.month,first_time.day,
start_hour,start_minute,0,tzinfo=tz.utc)
#
# get 3 minutes of data from the storm_start
#
storm_stop=storm_start + dt.timedelta(minutes=3)
print('storm start: {}'.format(storm_start))
time_hit = np.logical_and(date_times > storm_start,date_times < storm_stop)
storm_lats = lats[time_hit]
storm_lons=lons[time_hit]
storm_prof_times=prof_times[time_hit]
storm_sw_hr=shortwave_hr[time_hit,:]
storm_lw_hr=longwave_hr[time_hit,:]
storm_height=height[time_hit,:]
storm_date_times=date_times[time_hit]
len(date_times)
# #### 4. convert time to distance by using pyproj to get the greatcircle distance between shots
# In[4]:
great_circle=pyproj.Geod(ellps='WGS84')
distance=[0]
start=(storm_lons[0],storm_lats[0])
for index in np.arange(1,len(storm_lons)):
azi12,azi21,step= great_circle.inv(storm_lons[index-1],storm_lats[index-1],
storm_lons[index],storm_lats[index])
distance.append(distance[index-1] + step)
distance=np.array(distance)/meters2km
# #### 5. Make the plot assuming that height is the same for every shot
#
# i.e. assume that height[0,:] = height[1,:] = ...
#
# in reality, the bin heights are depend on the details of the radar returns, so
# we would need to historgram the heights into a uniform set of bins -- ignore that for this qualitative picture
# In[5]:
get_ipython().magic('matplotlib inline')
plt.close('all')
from matplotlib import cm
from matplotlib.colors import Normalize
vmin=-30
vmax=30
the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False)
cmap_ref=cm.RdBu_r
cmap_ref.set_over('pink')
cmap_ref.set_under('k')
cmap_ref.set_bad('0.75') #75% grey
#
# Q-1: What is the difference between the distance,height,field,ax
# and cmap,norm arguments to this function? Why do I structure
# the function signature this way?
#
def plot_field(distance,height,field,ax,cmap=None,norm=None):
if cmap is None:
cmap=cm.jet
col=ax.pcolormesh(distance,height,field,cmap=cmap,
norm=the_norm)
cax=fig.colorbar(col,extend='both',ax=ax,pad= 0.01)
return ax,cax
fig,(ax1,ax2)=plt.subplots(2,1,figsize=(20,10))
cloud_height_km=height[0,:]/meters2km
ax1,cax1=plot_field(distance,cloud_height_km,storm_sw_hr.T,ax1,cmap=cmap_ref,
norm=the_norm)
ax2,cax2=plot_field(distance,cloud_height_km,storm_lw_hr.T,ax2,cmap=cmap_ref,
norm=the_norm)
for colorbar in [cax1,cax2]:
text=colorbar.set_label('heating rate (K/hr)',rotation=-90,verticalalignment='bottom')
for ax in [ax1,ax2]:
ax.set(ylim=[0,17],xlim=(0,1200))
ax.set_xlabel('distance (km)',fontsize=15)
ax.set_ylabel('height (km)',fontsize=15)
text=fig.suptitle('storm radiative heating rates: shortwave (top), longwave (bottom)',size=25)
fig.savefig('heating_rates.png',dpi=100)
# In[ ]:
| mit |
Jimmy-Morzaria/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
zachmath/HFODD_scripts | path_plot.py | 1 | 1287 | import re, math
import matplotlib.pyplot as plt
infile = 'mypath-1Feb2018.out-2D-GCM'
myfile = open(infile,'r')
mydata = myfile.readlines()
x_out, y_out = [], []
x_exit, y_exit = [], []
x_path, y_path = [], []
actn = []
for line in mydata:
if re.match('.*Exit point.*',line): # Actually-attained outer turning points
mycols = line.split(' ')
mycols = filter(None,mycols)
x_exit.append(float(mycols[3]))
y_exit.append(float(mycols[4]))
actn.append(float(mycols[5]))
if re.match('.*path:.*',line): # Minimum action path
mycols = line.split(' ')
mycols = filter(None,mycols)
x_path.append(float(mycols[1]))
y_path.append(float(mycols[2]))
plt.plot(x_out, y_out, 'k+')
plt.plot(x_exit, y_exit, 'r^')
plt.plot(x_path, y_path, 'b')
plt.xlim(0,400)
plt.ylim(0,50)
plt.xlabel('q20')
plt.ylabel('q30')
plt.show()
smin = min(actn)
exit_probs = open('exit_probs.out','w')
for x,y,s in zip(x_exit,y_exit,actn):
# try:
# prob = 1.0/(1+math.exp(2*s))
# except:
# prob = float('inf')
prob = math.exp(2*(smin-s)) # approximate ratio of tunneling prob compared to max tunneling prob/min action path
exit_probs.write('{:>5} {:>5} {:>12.5}\n'.format(x,y,prob))
exit_probs.close()
| gpl-3.0 |
soulmachine/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
bzero/statsmodels | statsmodels/base/tests/test_generic_methods.py | 25 | 16558 | # -*- coding: utf-8 -*-
"""Tests that use cross-checks for generic methods
Should be easy to check consistency across models
Does not cover tsa
Initial cases copied from test_shrink_pickle
Created on Wed Oct 30 14:01:27 2013
Author: Josef Perktold
"""
from statsmodels.compat.python import range
import numpy as np
import statsmodels.api as sm
from statsmodels.compat.scipy import NumpyVersion
from numpy.testing import assert_, assert_allclose, assert_equal
from nose import SkipTest
import platform
iswin = platform.system() == 'Windows'
npversionless15 = NumpyVersion(np.__version__) < '1.5.0'
winoldnp = iswin & npversionless15
class CheckGenericMixin(object):
def __init__(self):
self.predict_kwds = {}
@classmethod
def setup_class(self):
nobs = 500
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x)
self.exog = x
self.xf = 0.25 * np.ones((2, 4))
def test_ttest_tvalues(self):
# test that t_test has same results a params, bse, tvalues, ...
res = self.results
mat = np.eye(len(res.params))
tt = res.t_test(mat)
assert_allclose(tt.effect, res.params, rtol=1e-12)
# TODO: tt.sd and tt.tvalue are 2d also for single regressor, squeeze
assert_allclose(np.squeeze(tt.sd), res.bse, rtol=1e-10)
assert_allclose(np.squeeze(tt.tvalue), res.tvalues, rtol=1e-12)
assert_allclose(tt.pvalue, res.pvalues, rtol=5e-10)
assert_allclose(tt.conf_int(), res.conf_int(), rtol=1e-10)
# test params table frame returned by t_test
table_res = np.column_stack((res.params, res.bse, res.tvalues,
res.pvalues, res.conf_int()))
table1 = np.column_stack((tt.effect, tt.sd, tt.tvalue, tt.pvalue,
tt.conf_int()))
table2 = tt.summary_frame().values
assert_allclose(table2, table_res, rtol=1e-12)
# move this to test_attributes ?
assert_(hasattr(res, 'use_t'))
tt = res.t_test(mat[0])
tt.summary() # smoke test for #1323
assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10)
def test_ftest_pvalues(self):
res = self.results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# sutomatic use_f based on results class use_t
pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
# TODO The following is not (yet) guaranteed across models
#@knownfailureif(True)
def test_fitted(self):
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
raise SkipTest
res = self.results
fitted = res.fittedvalues
assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12)
assert_allclose(fitted, res.predict(), rtol=1e-12)
def test_predict_types(self):
res = self.results
# squeeze to make 1d for single regressor test case
p_exog = np.squeeze(np.asarray(res.model.exog[:2]))
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
# SMOKE test only TODO
res.predict(p_exog)
res.predict(p_exog.tolist())
res.predict(p_exog[0].tolist())
else:
fitted = res.fittedvalues[:2]
assert_allclose(fitted, res.predict(p_exog), rtol=1e-12)
# this needs reshape to column-vector:
assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()),
rtol=1e-12)
# only one prediction:
assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()),
rtol=1e-12)
assert_allclose(fitted[:1], res.predict(p_exog[0]),
rtol=1e-12)
# predict doesn't preserve DataFrame, e.g. dot converts to ndarray
# import pandas
# predicted = res.predict(pandas.DataFrame(p_exog))
# assert_(isinstance(predicted, pandas.DataFrame))
# assert_allclose(predicted, fitted, rtol=1e-12)
######### subclasses for individual models, unchanged from test_shrink_pickle
# TODO: check if setup_class is faster than setup
class TestGenericOLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestGenericOLSOneExog(CheckGenericMixin):
# check with single regressor (no constant)
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog[:, 1]
np.random.seed(987689)
y = x + np.random.randn(x.shape[0])
self.results = sm.OLS(y, x).fit()
class TestGenericWLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit()
class TestGenericPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)
class TestGenericNegativeBinomial(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
np.random.seed(987689)
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
mod = sm.NegativeBinomial(data.endog, data.exog)
start_params = np.array([-0.0565406 , -0.21213599, 0.08783076,
-0.02991835, 0.22901974, 0.0621026,
0.06799283, 0.08406688, 0.18530969,
1.36645452])
self.results = mod.fit(start_params=start_params, disp=0)
class TestGenericLogit(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1.0 / (1 + np.exp(x.sum(1) - x.mean()))).astype(int)
model = sm.Logit(y_bin, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs', disp=0)
class TestGenericRLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.RLM(y, self.exog).fit()
class TestGenericGLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.GLM(y, self.exog).fit()
class TestGenericGEEPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params)
class TestGenericGEEPoissonNaive(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params,
cov_type='naive')
class TestGenericGEEPoissonBC(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# params_est = np.array([-0.0063238 , 0.99463752, 1.02790201, 0.98080081])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
mod = sm.GEE(y_count, self.exog, groups, family=family, cov_struct=vi)
self.results = mod.fit(start_params=start_params,
cov_type='bias_reduced')
# Other test classes
class CheckAnovaMixin(object):
@classmethod
def setup_class(cls):
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setupClass()
cls.data = test.data.drop([0,1,2])
cls.initialize()
def test_combined(self):
res = self.res
wa = res.wald_test_terms(skip_single=False, combine_terms=['Duration', 'Weight'])
eye = np.eye(len(res.params))
c_const = eye[0]
c_w = eye[[2,3]]
c_d = eye[1]
c_dw = eye[[4,5]]
c_weight = eye[2:6]
c_duration = eye[[1, 4, 5]]
compare_waldres(res, wa, [c_const, c_d, c_w, c_dw, c_duration, c_weight])
def test_categories(self):
# test only multicolumn terms
res = self.res
wa = res.wald_test_terms(skip_single=True)
eye = np.eye(len(res.params))
c_w = eye[[2,3]]
c_dw = eye[[4,5]]
compare_waldres(res, wa, [c_w, c_dw])
def compare_waldres(res, wa, constrasts):
for i, c in enumerate(constrasts):
wt = res.wald_test(c)
assert_allclose(wa.table.values[i, 0], wt.statistic)
assert_allclose(wa.table.values[i, 1], wt.pvalue)
df = c.shape[0] if c.ndim == 2 else 1
assert_equal(wa.table.values[i, 2], df)
# attributes
assert_allclose(wa.statistic[i], wt.statistic)
assert_allclose(wa.pvalues[i], wt.pvalue)
assert_equal(wa.df_constraints[i], df)
if res.use_t:
assert_equal(wa.df_denom[i], res.df_resid)
col_names = wa.col_names
if res.use_t:
assert_equal(wa.distribution, 'F')
assert_equal(col_names[0], 'F')
assert_equal(col_names[1], 'P>F')
else:
assert_equal(wa.distribution, 'chi2')
assert_equal(col_names[0], 'chi2')
assert_equal(col_names[1], 'P>chi2')
# SMOKETEST
wa.summary_frame()
class TestWaldAnovaOLS(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)
def test_noformula(self):
endog = self.res.model.endog
exog = self.res.model.data.orig_exog
del exog.design_info
res = sm.OLS(endog, exog).fit()
wa = res.wald_test_terms(skip_single=True,
combine_terms=['Duration', 'Weight'])
eye = np.eye(len(res.params))
c_weight = eye[2:6]
c_duration = eye[[1, 4, 5]]
compare_waldres(res, wa, [c_duration, c_weight])
class TestWaldAnovaOLSF(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit() # default use_t=True
class TestWaldAnovaGLM(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = glm("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)
class TestWaldAnovaPoisson(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import Poisson
mod = Poisson.from_formula("Days ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(cov_type='HC0')
class TestWaldAnovaNegBin(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import NegativeBinomial
formula = "Days ~ C(Duration, Sum)*C(Weight, Sum)"
mod = NegativeBinomial.from_formula(formula, cls.data,
loglike_method='nb2')
cls.res = mod.fit()
class TestWaldAnovaNegBin1(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import NegativeBinomial
formula = "Days ~ C(Duration, Sum)*C(Weight, Sum)"
mod = NegativeBinomial.from_formula(formula, cls.data,
loglike_method='nb1')
cls.res = mod.fit(cov_type='HC0')
class T_estWaldAnovaOLSNoFormula(object):
@classmethod
def initialize(cls):
from statsmodels.formula.api import ols, glm, poisson
from statsmodels.discrete.discrete_model import Poisson
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit() # default use_t=True
if __name__ == '__main__':
pass
| bsd-3-clause |
ercius/openNCEM | ncempy/algo/align.py | 1 | 8740 | import numpy as np
from scipy import ndimage
def image_cross_corr(image, reference, real_filter=1, k_filter=1):
""" Calculate image cross-correlation. See imageCrossCorRealShift and other
similar functions to calculate the shift and apply the shift.
Parameters
----------
image : ndarray
The source image to align to. Should be even sized.
reference : ndarray
The reference image to align to the source image. Should be even sized.
real_filter : ndarray, optional, default 1
Filter to apply to each image in real space before FFT.
k_filter : ndarray, optional default 1
Filter to apply to each image in FFT space
Returns
-------
: ndarray
Cross correlation of image and reference.
"""
if type(image) is not np.ndarray or type(reference) is not np.ndarray:
raise TypeError("Must use ndarrays")
if np.iscomplexobj(image) or np.iscomplexobj(reference):
raise TypeError("Images mst be real")
# Calculate FFT of images
image_f = np.fft.fft2((image - np.mean(image)) * real_filter)
reference_f = np.fft.fft2((reference - np.mean(reference)) * real_filter)
# Calculate cross-correlation
xcor = abs(np.fft.fftshift(np.fft.ifft2(np.conj(image_f) * reference_f * k_filter)))
return xcor
def image_correlate(image, reference, real_filter=1, k_filter=1, shift_func='shift', verbose=False):
""" Align image to reference by cross-correlation. Outputs shifts and shifted images.
Uses the real FFT for ~2x speed improvement. The k_filter must have
a shape that matches the np.fft.rfft2() of image and reference.
Uses scipy.ndimage.shift() or np.roll to move the image. Use 'roll' to avoid losing
data off the edge for multiple shifting operations. Use shift to avoid wrap around problems and when there
is only one shifting operation.
Note
----
image, reference and real_filter must all have the same shape (N, M).
k_filter must have a shape that matches the np.fft.rfft2() of
the other inputs: (N, M/2+1)
Parameters
----------
image : ndarray
A image as a 2D ndarray.
reference : ndarray
The reference image to align to.
real_filter : ndarray, optional, default = 1
A real space filter applied to image and reference before
calculating the shift.
k_filter : ndarray, optional, default = 1
A Fourier space filter applied to the fourier transform of
image and reference before calculating the cross-correlation.
shift_func : str, default is 'shift'
The function to use to shift the images. 'roll' uses np.roll and 'shift' uses ndimage.shift.
verbose : bool
Plots the cross-correlation using matplotlib for debugging purposes.
Returns
-------
: tuple, (ndarray, tuple)
A tuple containing the shifted image and the shifts applied.
"""
output = None
if shift_func is not 'shift' and shift_func is not 'roll':
raise KeyError('Shift function has to be either shift or roll')
image_f = np.fft.rfft2((image - np.mean(image)) * real_filter)
reference_f = np.fft.rfft2((reference - np.mean(reference)) * real_filter)
xcor = abs(np.fft.irfft2(np.conj(image_f) * reference_f * k_filter))
if verbose:
import matplotlib.pyplot as plt
plt.imshow(np.fft.fftshift(xcor))
plt.title('imageCrossCorRealShift xcor')
shifts = np.unravel_index(np.fft.fftshift(xcor).argmax(), xcor.shape)
shifts = (shifts[0] - xcor.shape[0] / 2, shifts[1] - xcor.shape[1] / 2)
shifts = [int(i) for i in shifts] # convert to integers
if shift_func == 'shift':
# shift image using ndimage.shift
output = ndimage.interpolation.shift(image, shifts, order=0)
elif shift_func == 'roll':
# shift image using roll to be reversible
output = np.roll(image, shifts[0], axis=0)
output = np.roll(output, shifts[1], axis=1)
return output, shifts
def image_phase_correlate(image, reference, real_filter=1, k_filter=1, shift_func='shift', verbose=False):
""" Align image to reference by phase-correlation. Outputs shifted images and shift.
Uses np.fft.rfft2 for ~2x speed improvement.
Uses scipy.ndimage.shift() to shift the image and remove border pixels.
NOT WORKING OR TESTED YET
Note
----
image, reference and real_filter must all have the same shape (N, M).
k_filter must have a shape that matches the np.fft.rfft2() of
the other inputs: (N, M/2+1)
Parameters
----------
image : ndarray
A image as a 2D ndarray.
reference : ndarray
The reference image to align to.
real_filter : ndarray, optional, default = 1
A real space filter applied to image and reference before
calculating the shift.
k_filter : ndarray, optional, default = 1
A Fourier space filter applied to the fourier transform of
image and reference before calculating the cross-correlation.
shift_func : str, default is 'shift'
The function to use to shift the images. 'roll' uses np.roll and 'shift' uses ndimage.shift.
verbose : bool
Plots the cross-correlation using matplotlib for debugging purposes.
Returns
------
: tuple, (ndarray, tuple)
A tuple containing the shifted image and the shifts applied.
"""
output = None
image_f = np.fft.rfft2((image - np.mean(image)) * real_filter)
reference_f = np.fft.rfft2((reference - np.mean(reference)) * real_filter)
xcor = abs(np.fft.irfft2(np.conj(image_f) * reference_f * k_filter))
pcor = xcor / (np.abs(xcor) + 0.001)
if verbose:
import matplotlib.pyplot as plt
plt.imshow(np.fft.fftshift(pcor))
plt.title('imageCrossPhaseRealShift pcor')
shifts = np.unravel_index(np.fft.fftshift(pcor).argmax(), pcor.shape)
shifts = (shifts[0] - pcor.shape[0] / 2, shifts[1] - pcor.shape[1] / 2)
shifts = [int(i) for i in shifts] # convert to integers
if shift_func == 'shift':
# shift image using ndimage.shift
output = ndimage.interpolation.shift(image, shifts, order=0)
elif shift_func == 'roll':
# shift image using roll to be reversible
output = np.roll(image, shifts[0], axis=0)
output = np.roll(output, shifts[1], axis=1)
return output, shifts
def stack_align(stack, align_type='static', real_filter=1, k_filter=1, shift_func='shift'):
""" Align a series of images by cross-correlation. All images are aligned to the first image Uses image_correlate
which is based on simple cross correlation.
Notes
-----
You should probably use ncempy.algo.stack_align since it uses mutlicorr and is more
functional.
Parameters
----------
stack : ndarray, 3D
The stack of images to align. Shape [num, Y, X]
real_filter : ndarray, optional, default = 1
A real space filter to apply before cross-correlation
of each image. Shape must be [Y, X]
k_filter : ndarray, optional, default = 1
A Fourier space filter to apply before cross-correlation.
Shape must be [Y, X/2 + 1]
shift_func : str, default is 'shift'
The function to use to shift the images. 'roll' uses np.roll and 'shift' uses ndimage.shift.
align_type: str
static or dynamic alignment. Static aligns all images to the first image. Dynamic aligns
each image to the previous image starting with the first image
Returns
-------
: tuple, aligned stack, shifts
A tuple containing the aligned images as a 3D ndarray of shape
[num, Y, X] and shifts as a 2D ndarray of shape [num, 2]
"""
if align_type is not 'static' and align_type is not 'dynamic':
raise KeyError('Incorrect align type. Must be static or dynamic')
# Pre-allocate the arrays
aligned = np.zeros_like(stack) # shifted data array
shifts = np.zeros((stack.shape[0], 2)) # the applied shifts
aligned[0, :, :] = stack[0, :, :]
jj = 0
ref_sh = np.zeros((2,))
for ii in range(1, stack.shape[0]):
output, sh = image_correlate(stack[ii, :, :], stack[jj, :, :], real_filter, k_filter, shift_func=shift_func)
sh += ref_sh
aligned[ii, :, :] = output
shifts[ii, :] = sh
if align_type is 'dynamic':
ref_sh = sh
jj = ii
return aligned, shifts
| gpl-3.0 |
mne-tools/mne-python | tutorials/evoked/10_evoked_overview.py | 3 | 16694 | """
.. _tut-evoked-class:
The Evoked data structure: evoked/averaged data
===============================================
This tutorial covers the basics of creating and working with :term:`evoked`
data. It introduces the :class:`~mne.Evoked` data structure in detail,
including how to load, query, subselect, export, and plot data from an
:class:`~mne.Evoked` object. For info on creating an :class:`~mne.Evoked`
object from (possibly simulated) data in a :class:`NumPy array
<numpy.ndarray>`, see :ref:`tut-creating-data-structures`.
As usual we'll start by importing the modules we need:
"""
import os
import mne
###############################################################################
# Creating ``Evoked`` objects from ``Epochs``
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`~mne.Evoked` objects typically store an EEG or MEG signal that has
# been *averaged* over multiple :term:`epochs`, which is a common technique for
# estimating stimulus-evoked activity. The data in an :class:`~mne.Evoked`
# object are stored in an :class:`array <numpy.ndarray>` of shape
# ``(n_channels, n_times)`` (in contrast to an :class:`~mne.Epochs` object,
# which stores data of shape ``(n_epochs, n_channels, n_times)``). Thus to
# create an :class:`~mne.Evoked` object, we'll start by epoching some raw data,
# and then averaging together all the epochs from one condition:
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
# we'll skip the "face" and "buttonpress" conditions, to save memory:
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4}
epochs = mne.Epochs(raw, events, tmin=-0.3, tmax=0.7, event_id=event_dict,
preload=True)
evoked = epochs['auditory/left'].average()
del raw # reduce memory usage
###############################################################################
# You may have noticed that MNE informed us that "baseline correction" has been
# applied. This happened automatically by during creation of the
# `~mne.Epochs` object, but may also be initiated (or disabled!) manually:
# We will discuss this in more detail later.
#
# The information about the baseline period of `~mne.Epochs` is transferred to
# derived `~mne.Evoked` objects to maintain provenance as you process your
# data:
print(f'Epochs baseline: {epochs.baseline}')
print(f'Evoked baseline: {evoked.baseline}')
###############################################################################
# Basic visualization of ``Evoked`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We can visualize the average evoked response for left-auditory stimuli using
# the :meth:`~mne.Evoked.plot` method, which yields a butterfly plot of each
# channel type:
evoked.plot()
###############################################################################
# Like the ``plot()`` methods for :meth:`Raw <mne.io.Raw.plot>` and
# :meth:`Epochs <mne.Epochs.plot>` objects,
# :meth:`evoked.plot() <mne.Evoked.plot>` has many parameters for customizing
# the plot output, such as color-coding channel traces by scalp location, or
# plotting the :term:`global field power` alongside the channel traces.
# See :ref:`tut-visualize-evoked` for more information about visualizing
# :class:`~mne.Evoked` objects.
#
#
# Subselecting ``Evoked`` data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# .. sidebar:: Evokeds are not memory-mapped
#
# :class:`~mne.Evoked` objects use a :attr:`~mne.Evoked.data` *attribute*
# rather than a :meth:`~mne.Epochs.get_data` *method*; this reflects the fact
# that the data in :class:`~mne.Evoked` objects are always loaded into
# memory, never `memory-mapped`_ from their location on disk (because they
# are typically *much* smaller than :class:`~mne.io.Raw` or
# :class:`~mne.Epochs` objects).
#
#
# Unlike :class:`~mne.io.Raw` and :class:`~mne.Epochs` objects,
# :class:`~mne.Evoked` objects do not support selection by square-bracket
# indexing. Instead, data can be subselected by indexing the
# :attr:`~mne.Evoked.data` attribute:
print(evoked.data[:2, :3]) # first 2 channels, first 3 timepoints
###############################################################################
# To select based on time in seconds, the :meth:`~mne.Evoked.time_as_index`
# method can be useful, although beware that depending on the sampling
# frequency, the number of samples in a span of given duration may not always
# be the same (see the :ref:`time-as-index` section of the
# :ref:`tutorial about Raw data <tut-raw-class>` for details).
#
#
# Selecting, dropping, and reordering channels
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# By default, when creating :class:`~mne.Evoked` data from an
# :class:`~mne.Epochs` object, only the "data" channels will be retained:
# ``eog``, ``ecg``, ``stim``, and ``misc`` channel types will be dropped. You
# can control which channel types are retained via the ``picks`` parameter of
# :meth:`epochs.average() <mne.Epochs.average>`, by passing ``'all'`` to
# retain all channels, or by passing a list of integers, channel names, or
# channel types. See the documentation of :meth:`~mne.Epochs.average` for
# details.
#
# If you've already created the :class:`~mne.Evoked` object, you can use the
# :meth:`~mne.Evoked.pick`, :meth:`~mne.Evoked.pick_channels`,
# :meth:`~mne.Evoked.pick_types`, and :meth:`~mne.Evoked.drop_channels` methods
# to modify which channels are included in an :class:`~mne.Evoked` object.
# You can also use :meth:`~mne.Evoked.reorder_channels` for this purpose; any
# channel names not provided to :meth:`~mne.Evoked.reorder_channels` will be
# dropped. Note that *channel* selection methods modify the object in-place, so
# in interactive/exploratory sessions you may want to create a
# :meth:`~mne.Evoked.copy` first.
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
print(evoked_eeg.ch_names)
new_order = ['EEG 002', 'MEG 2521', 'EEG 003']
evoked_subset = evoked.copy().reorder_channels(new_order)
print(evoked_subset.ch_names)
###############################################################################
# Similarities among the core data structures
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`~mne.Evoked` objects have many similarities with :class:`~mne.io.Raw`
# and :class:`~mne.Epochs` objects, including:
#
# - They can be loaded from and saved to disk in ``.fif`` format, and their
# data can be exported to a :class:`NumPy array <numpy.ndarray>` (but through
# the :attr:`~mne.Evoked.data` attribute, not through a ``get_data()``
# method). :class:`Pandas DataFrame <pandas.DataFrame>` export is also
# available through the :meth:`~mne.Evoked.to_data_frame` method.
#
# - You can change the name or type of a channel using
# :meth:`evoked.rename_channels() <mne.Evoked.rename_channels>` or
# :meth:`evoked.set_channel_types() <mne.Evoked.set_channel_types>`.
# Both methods take :class:`dictionaries <dict>` where the keys are existing
# channel names, and the values are the new name (or type) for that channel.
# Existing channels that are not in the dictionary will be unchanged.
#
# - :term:`SSP projector <projector>` manipulation is possible through
# :meth:`~mne.Evoked.add_proj`, :meth:`~mne.Evoked.del_proj`, and
# :meth:`~mne.Evoked.plot_projs_topomap` methods, and the
# :attr:`~mne.Evoked.proj` attribute. See :ref:`tut-artifact-ssp` for more
# information on SSP.
#
# - Like :class:`~mne.io.Raw` and :class:`~mne.Epochs` objects,
# :class:`~mne.Evoked` objects have :meth:`~mne.Evoked.copy`,
# :meth:`~mne.Evoked.crop`, :meth:`~mne.Evoked.time_as_index`,
# :meth:`~mne.Evoked.filter`, and :meth:`~mne.Evoked.resample` methods.
#
# - Like :class:`~mne.io.Raw` and :class:`~mne.Epochs` objects,
# :class:`~mne.Evoked` objects have ``evoked.times``,
# :attr:`evoked.ch_names <mne.Evoked.ch_names>`, and :class:`info <mne.Info>`
# attributes.
#
#
# .. _tut-section-load-evk:
#
# Loading and saving ``Evoked`` data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Single :class:`~mne.Evoked` objects can be saved to disk with the
# :meth:`evoked.save() <mne.Evoked.save>` method. One difference between
# :class:`~mne.Evoked` objects and the other data structures is that multiple
# :class:`~mne.Evoked` objects can be saved into a single ``.fif`` file, using
# :func:`mne.write_evokeds`. The :ref:`example data <sample-dataset>`
# includes just such a ``.fif`` file: the data have already been epoched and
# averaged, and the file contains separate :class:`~mne.Evoked` objects for
# each experimental condition:
sample_data_evk_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-ave.fif')
evokeds_list = mne.read_evokeds(sample_data_evk_file, verbose=False)
print(evokeds_list)
print(type(evokeds_list))
###############################################################################
# Notice that :func:`mne.read_evokeds` returned a :class:`list` of
# :class:`~mne.Evoked` objects, and each one has an ``evoked.comment``
# attribute describing the experimental condition that was averaged to
# generate the estimate:
for evok in evokeds_list:
print(evok.comment)
###############################################################################
# If you want to load only some of the conditions present in a ``.fif`` file,
# :func:`~mne.read_evokeds` has a ``condition`` parameter, which takes either a
# string (matched against the comment attribute of the evoked objects on disk),
# or an integer selecting the :class:`~mne.Evoked` object based on the order
# it's stored in the file. Passing lists of integers or strings is also
# possible. If only one object is selected, the :class:`~mne.Evoked` object
# will be returned directly (rather than a length-one list containing it):
right_vis = mne.read_evokeds(sample_data_evk_file, condition='Right visual')
print(right_vis)
print(type(right_vis))
###############################################################################
# Above, when we created an :class:`~mne.Evoked` object by averaging epochs,
# baseline correction was applied by default when we extracted epochs from the
# `~mne.io.Raw` object (the default baseline period is ``(None, 0)``,
# which assured zero mean for times before the stimulus event). In contrast, if
# we plot the first :class:`~mne.Evoked` object in the list that was loaded
# from disk, we'll see that the data have not been baseline-corrected:
evokeds_list[0].plot(picks='eeg')
###############################################################################
# This can be remedied by either passing a ``baseline`` parameter to
# :func:`mne.read_evokeds`, or by applying baseline correction after loading,
# as shown here:
# Original baseline (none set).
print(f'Baseline after loading: {evokeds_list[0].baseline}')
# Apply a custom baseline correction.
evokeds_list[0].apply_baseline((None, 0))
print(f'Baseline after calling apply_baseline(): {evokeds_list[0].baseline}')
# Visualize the evoked response.
evokeds_list[0].plot(picks='eeg')
###############################################################################
# Notice that :meth:`~mne.Evoked.apply_baseline` operated in-place. Similarly,
# :class:`~mne.Evoked` objects may have been saved to disk with or without
# :term:`projectors <projector>` applied; you can pass ``proj=True`` to the
# :func:`~mne.read_evokeds` function, or use the :meth:`~mne.Evoked.apply_proj`
# method after loading.
#
#
# Combining ``Evoked`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# One way to pool data across multiple conditions when estimating evoked
# responses is to do so *prior to averaging* (recall that MNE-Python can select
# based on partial matching of ``/``-separated epoch labels; see
# :ref:`tut-section-subselect-epochs` for more info):
left_right_aud = epochs['auditory'].average()
print(left_right_aud)
###############################################################################
# This approach will weight each epoch equally and create a single
# :class:`~mne.Evoked` object. Notice that the printed representation includes
# ``(average, N=145)``, indicating that the :class:`~mne.Evoked` object was
# created by averaging across 145 epochs. In this case, the event types were
# fairly close in number:
left_aud = epochs['auditory/left'].average()
right_aud = epochs['auditory/right'].average()
print([evok.nave for evok in (left_aud, right_aud)])
###############################################################################
# However, this may not always be the case; if for statistical reasons it is
# important to average *the same number* of epochs from different conditions,
# you can use :meth:`~mne.Epochs.equalize_event_counts` prior to averaging.
#
# Another approach to pooling across conditions is to create separate
# :class:`~mne.Evoked` objects for each condition, and combine them afterward.
# This can be accomplished by the function :func:`mne.combine_evoked`, which
# computes a weighted sum of the :class:`~mne.Evoked` objects given to it. The
# weights can be manually specified as a list or array of float values, or can
# be specified using the keyword ``'equal'`` (weight each `~mne.Evoked` object
# by :math:`\frac{1}{N}`, where :math:`N` is the number of `~mne.Evoked`
# objects given) or the keyword ``'nave'`` (weight each `~mne.Evoked` object
# proportional to the number of epochs averaged together to create it):
left_right_aud = mne.combine_evoked([left_aud, right_aud], weights='nave')
assert left_right_aud.nave == left_aud.nave + right_aud.nave
###############################################################################
# Note that the ``nave`` attribute of the resulting `~mne.Evoked` object will
# reflect the *effective* number of averages, and depends on both the ``nave``
# attributes of the contributing `~mne.Evoked` objects and the weights at
# which they are combined. Keeping track of effective ``nave`` is important for
# inverse imaging, because ``nave`` is used to scale the noise covariance
# estimate (which in turn affects the magnitude of estimated source activity).
# See :ref:`minimum_norm_estimates` for more information (especially the
# :ref:`whitening_and_scaling` section). Note that `mne.grand_average` does
# *not* adjust ``nave`` to reflect effective number of averaged epochs; rather
# it simply sets ``nave`` to the number of *evokeds* that were averaged
# together. For this reason, it is best to use `mne.combine_evoked` rather than
# `mne.grand_average` if you intend to perform inverse imaging on the resulting
# :class:`~mne.Evoked` object.
#
#
# Other uses of ``Evoked`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Although the most common use of :class:`~mne.Evoked` objects is to store
# *averages* of epoched data, there are a couple other uses worth noting here.
# First, the method :meth:`epochs.standard_error() <mne.Epochs.standard_error>`
# will create an :class:`~mne.Evoked` object (just like
# :meth:`epochs.average() <mne.Epochs.average>` does), but the data in the
# :class:`~mne.Evoked` object will be the standard error across epochs instead
# of the average. To indicate this difference, :class:`~mne.Evoked` objects
# have a :attr:`~mne.Evoked.kind` attribute that takes values ``'average'`` or
# ``'standard error'`` as appropriate.
#
# Another use of :class:`~mne.Evoked` objects is to represent *a single trial
# or epoch* of data, usually when looping through epochs. This can be easily
# accomplished with the :meth:`epochs.iter_evoked() <mne.Epochs.iter_evoked>`
# method, and can be useful for applications where you want to do something
# that is only possible for :class:`~mne.Evoked` objects. For example, here
# we use the :meth:`~mne.Evoked.get_peak` method (which isn't available for
# :class:`~mne.Epochs` objects) to get the peak response in each trial:
for ix, trial in enumerate(epochs[:3].iter_evoked()):
channel, latency, value = trial.get_peak(ch_type='eeg',
return_amplitude=True)
latency = int(round(latency * 1e3)) # convert to milliseconds
value = int(round(value * 1e6)) # convert to µV
print('Trial {}: peak of {} µV at {} ms in channel {}'
.format(ix, value, latency, channel))
###############################################################################
# .. REFERENCES
#
# .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
| bsd-3-clause |
mantidproject/mantid | qt/python/mantidqt/widgets/plotconfigdialog/imagestabwidget/presenter.py | 3 | 5060 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from matplotlib.collections import QuadMesh
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from mantid.plots.datafunctions import update_colorbar_scale
from mantidqt.utils.qt import block_signals
from mantidqt.widgets.plotconfigdialog import generate_ax_name, get_images_from_fig, get_colorbars_from_fig
from mantidqt.widgets.plotconfigdialog.imagestabwidget import ImageProperties
from mantidqt.widgets.plotconfigdialog.imagestabwidget.view import ImagesTabWidgetView, SCALES
class ImagesTabWidgetPresenter:
def __init__(self, fig, view=None, parent=None):
self.fig = fig
if not view:
self.view = ImagesTabWidgetView(parent)
else:
self.view = view
self.image_names_dict = dict()
self.populate_select_image_combo_box_and_update_view()
self.view.select_image_combo_box.currentIndexChanged.connect(
self.update_view)
def apply_properties(self):
props = self.view.get_properties()
# if only one colorbar apply settings to all images
if len(get_colorbars_from_fig(self.fig)) == 1:
# flatten the values into one list
images = sum(self.image_names_dict.values(), [])
else:
images = self.get_selected_image()
for image in images:
if image.colorbar:
image.colorbar.set_label(props.label)
image.set_cmap(props.colormap)
if props.interpolation and (not isinstance(image, QuadMesh) or not isinstance(image, Poly3DCollection)):
image.set_interpolation(props.interpolation)
update_colorbar_scale(self.fig, image, SCALES[props.scale], props.vmin, props.vmax)
if props.vmin > props.vmax:
self.view.max_min_value_warning.setVisible(True)
self.view.max_min_value_warning.setText("<html> <head/> <body> <p> <span style=\"color:#ff0000;\">Max "
"value is less than min value so they have been "
"swapped.</span></p></body></html>")
elif props.vmin == props.vmax:
self.view.max_min_value_warning.setVisible(True)
self.view.max_min_value_warning.setText("<html><head/><body><p><span style=\"color:#ff0000;\">Min and max "
"value are the same so they have been "
"adjusted.</span></p></body></html>")
else:
self.view.max_min_value_warning.setVisible(False)
def get_selected_image(self):
return self.image_names_dict[self.view.get_selected_image_name()]
def populate_select_image_combo_box_and_update_view(self):
with block_signals(self.view.select_image_combo_box):
self._populate_select_image_combo_box()
self.update_view()
def update_view(self):
img_props = ImageProperties.from_image(self.get_selected_image())
self.view.label_line_edit.setEnabled(bool(get_colorbars_from_fig(self.fig)))
self.view.set_label(img_props.label)
self.view.set_colormap(img_props.colormap)
self.view.set_reverse_colormap(img_props.reverse_colormap)
self.view.set_min_value(img_props.vmin)
self.view.set_max_value(img_props.vmax)
if img_props.interpolation:
self.view.enable_interpolation(True)
self.view.set_interpolation(img_props.interpolation)
else:
self.view.set_interpolation('None')
self.view.enable_interpolation(False)
self.view.set_scale(img_props.scale)
@staticmethod
def generate_image_name(image):
"""Generate a name for an image"""
label = image.get_label().lstrip('_')
ax_name = generate_ax_name(image.axes)
if label:
return "{} - {}".format(ax_name, label)
else:
return "{}".format(ax_name)
@staticmethod
def set_name_in_names_dict(name, value, name_dict):
"""Set name of image in image_names_dict"""
idx = 1
base_name = name
while name in name_dict:
name = base_name + "({})".format(idx)
idx += 1
name_dict[name] = value
return name_dict
def _populate_select_image_combo_box(self):
self.view.select_image_combo_box.clear()
for img in get_images_from_fig(self.fig):
self.image_names_dict = self.set_name_in_names_dict(
self.generate_image_name(img[0]), img, self.image_names_dict)
self.view.populate_select_image_combo_box(
sorted(self.image_names_dict.keys()))
| gpl-3.0 |
ryanjdillon/smartmove | smartmove/visuals/latex/tables.py | 1 | 6040 | def write_table(out_root, fname, data, cols, headers, adjustwidth=False,
tiny=False, title='', caption='', centering=True, extrarowheight=0,
label='', notes='', dpi=300):
'''Create LaTeX table and write and compile to output directory
Args
----
out_root: str
path where table files should be written
fname: str
name of files without extension
data: pandas dataframe
dataframe with containing columns in `write_cols`
cols: OrderedDict
key, value pairs of column names and format string
headers: iterable
list of lists containing string names of header columns. First should
be names, second units, etc.
adjustwidth: bool
adjust table to use full width of the page
tiny: bool
use `tiny` LaTeX command
title: str
Bolded title of table
caption: str
Caption explaining table
centering: bool
Center table on page
extrarowheight: int
number of points to increase table row height by
label: str
label for linking to tabel in LaTeX
notes: str
Notes regarding table which appear below table
Returns
-------
table: str
Concatenated LaTeX table string
'''
import os
from . import utils
# Write table to file
head = __create_header(headers ,title, caption, adjustwidth,
tiny, centering, extrarowheight)
body = __create_body(data, cols)
footer = __create_footer(label, notes, adjustwidth)
# Concatenate elements
table = head+body+footer
# Concatenate output filename
outfile = os.path.join(out_root, fname+'.tex')
# Remove old table file if present
try:
os.remove(outfile)
except:
'no tex file.'
# Write table to text .tex file
f = open(outfile, 'a')
f.write(table)
f.close()
# Generate pdf image of table in output directory
utils.compile_latex(out_root, fname, dpi=dpi)
return table
def __create_header(headers, title, caption, adjustwidth, tiny, centering,
extrarowheight):
'''create LaTeX multirow table header'''
# Create table template
n_cols = len(headers[0])
head = r'\begin{table}[!ht]'+'\n'
if adjustwidth:
head += r'\begin{adjustwidth}{-2.25in}{0in}'+'\n'
if tiny:
head += r'\tiny'+'\n'
if centering:
head += r'\centering'+'\n'
if title or caption:
cap_str = r'\caption{'
if title:
cap_str += '{'+title+'}'
if caption:
cap_str += caption
cap_str += '}'
head += cap_str+'\n'
if extrarowheight:
if isinstance(extrarowheight, int):
head += r'\setlength\extrarowheight{'+str(extrarowheight)+'pt}\n'
else:
raise TypeError('`extrarowheight` must be an integer value '
'(font point size)')
head += r'\begin{tabular}{ '+('c '*n_cols)+'}'+'\n'
head += r'\hline'+'\n'
# Process each list of column names for table
bold = True
for cols in headers:
col_str = ''
for i in range(len(cols)):
if bold == True:
col_str += r' \textbf{'+cols[i]+'} '
else:
col_str += r' '+cols[i]+' '
if i < len(cols)-1:
col_str += '&'
# Only first iteration/row will be bolded
bold = False
# Append header row to header
head += col_str+r'\\'+'\n'
# Add a horizontal line below header rows
head += r'\hline'+'\n'
return head
def __create_body(data, cols):
'''create LaTeX multirow table body
'''
import datetime
import numpy
# Process each row of body data
val_str = ''
keys = list(cols.keys())
for i in range(len(data)):
row = data[keys].iloc[i]
# Add values to row string
for key in keys:
# Handle datetime and str objects
if isinstance(row[key], datetime.datetime):
val = datetime.datetime.strftime(row[key], '%Y-%m-%d')
elif data.dtypes[key]==object:
val = str(row[key])
# Handle numerics
else:
if numpy.isnan(row[key]):
val = '-'
else:
val = (cols[key] % row[key])
val_str = val_str + (val+' ')
# If not the last column, add an `&`
if key != keys[-1]:
val_str = val_str+'& '
# Add EOF chars to row line
val_str = val_str+r'\\'+'\n'
body = val_str+'\n'
return body
def __create_footer(label, notes, adjustwidth):
'''create LaTeX multirow table footer'''
footer = r'\end{tabular}'+'\n'
# Add table notes
if notes:
notes = (r'\begin{flushleft} '+notes+'\n'
r'\end{flushleft}'+'\n')
footer += notes
if label:
footer += r'\label{'+label+'}\n'
# Add end statement to adjustwidth
if adjustwidth:
footer += r'\end{adjustwidth}'+'\n'
footer += r'\end{table}'+'\n'
return footer
if __name__ == '__main__':
from collections import OrderedDict
import os
import pandas
# Create output filename for .tex table
out_root = 'test/'
os.makedirs(out_root, exist_ok=True)
fname = 'test'
# Create dictionary of columns names and associated value str format
cols = OrderedDict()
cols['date'] = '%str'
cols['a'] = '%.2f'
cols['b'] = '%.0f'
# List of actual header names to write in table
head_names = ['Date', 'A Col.', 'B Col.']
# Units of header columns to write
head_units = ['',
'(degrees)',
r'(m day\textsuperscript{-1})']
headers = [head_names, head_units]
data = pandas.DataFrame(index=range(5), columns=list(cols.keys()))
data['a'] = range(5)
data['b'] = range(5)
table = write_table(out_root, fname, data, cols, headers)
| mit |
fstagni/DIRAC | Core/Utilities/Graphs/CurveGraph.py | 5 | 4781 | ########################################################################
# $HeadURL$
########################################################################
""" CurveGraph represents simple line graphs with markers.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import darkenColor, to_timestamp, PrettyDateLocator, \
PrettyDateFormatter, PrettyScalarFormatter
from matplotlib.lines import Line2D
from matplotlib.dates import date2num
import datetime
class CurveGraph( PlotBase ):
"""
The CurveGraph class is a straightforward line graph with markers
"""
def __init__(self,data,ax,prefs,*args,**kw):
PlotBase.__init__(self,data,ax,prefs,*args,**kw)
def draw( self ):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot',0.)]
color = self.prefs.get('plot_color','Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot',color)
else:
labels = [(color,0.)]
tmp_max_y = []
tmp_min_y = []
tmp_x = []
for label,num in labels:
xdata = []
ydata = []
xerror = []
yerror = []
color = self.palette.getColor(label)
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
continue
tmp_x.append( key )
tmp_max_y.append( value + error )
tmp_min_y.append( value - error )
xdata.append( key )
ydata.append( value )
xerror.append( 0. )
yerror.append( error )
linestyle = self.prefs.get( 'linestyle', '-' )
marker = self.prefs.get( 'marker', 'o' )
markersize = self.prefs.get( 'markersize', 8. )
markeredgewidth = self.prefs.get( 'markeredgewidth', 1. )
if not self.prefs.get( 'error_bars', False ):
line = Line2D( xdata, ydata, color=color, linewidth=1., marker=marker, linestyle=linestyle,
markersize=markersize, markeredgewidth=markeredgewidth,
markeredgecolor = darkenColor( color ) )
self.ax.add_line( line )
else:
self.ax.errorbar( xdata, ydata, color=color, linewidth=2., marker=marker, linestyle=linestyle,
markersize=markersize, markeredgewidth=markeredgewidth,
markeredgecolor = darkenColor( color ), xerr = xerror, yerr = yerror,
ecolor=color )
ymax = max( tmp_max_y )
ymax *= 1.1
ymin = min( tmp_min_y, 0. )
ymin *= 1.1
if 'log_yaxis' in self.prefs:
ymin = 0.001
xmax=max(tmp_x)*1.1
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get( 'ymin', ymin )
ymax = self.prefs.get( 'ymax', ymax )
xmin = self.prefs.get( 'xmin', xmin )
xmax = self.prefs.get( 'xmax', xmax )
self.ax.set_xlim( xmin=xmin, xmax=xmax )
self.ax.set_ylim( ymin=ymin, ymax=ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim( xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i+.5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
labels = ax.get_xticklabels()
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin=xmin,xmax=len(ticks) )
elif self.gdata.key_type == "time":
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter( )
ax.yaxis.set_major_formatter( sf )
else:
return None
| gpl-3.0 |
NelisVerhoef/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
kueihua100/TwanStkEx2 | TwanStkEx2/TwanStkEx2.py | 1 | 4882 | # -*- coding: utf-8 -*-
from sys import exit
# matplotlib requires wxPython 2.8+
import wxversion
try:
wxversion.ensureMinimal('2.8')
except:
exit("wxPython version error: matplotlib requires wxPython 2.8+!!")
import wx
import datetime as dtime
from pandas.io.data import DataReader
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
g_stk_canvas = None
g_stk_plot = None
def PlotHistoryDayPrice(stk_canvas, stk_plot, stock_num = None):
'''
1. The data from Yahoo! Finance is not grabbed by calling url apis, is by using Pandas APIs.
2. This program is to get TWSE data only, if wants OTC data, need to modify code.
'''
#Clear the current axes
stk_plot.cla()
#Turn the axes grids on
stk_plot.grid(True)
#if not assign stock num, give a default one
if not stock_num:
stock_num = "^twii"
#data start from:
startday = dtime.date(2000, 1, 1)
#check for TSEC weighted index
if stock_num == "^twii":
stock_title= "{} day price".format("TSEC weighted index")
stock_str = stock_num
else:
stock_title= "{} day price".format(stock_num)
#append ".TW" after stock_num to tell yahoo!Finance to query TWSE stock data.
# If want to query OTC, please add ".TWO"
stock_str = "{}.TW".format(stock_num)
#print stock_str
#plot title
stk_plot.set_title(stock_title)
#about how the DataReader() works, please refer to data.py from pandas
try:
stock_data = DataReader(stock_str, 'yahoo', startday)
#plot date and price
stk_plot.plot(stock_data.index, stock_data['Close'])
#show
stk_canvas.draw()
except:
# If error happened, show a dialog instead of exit the ap.
#exit("Error happened!!\n")
msg = "Fetch stock data error!\nPlease check the stock num or try again!!"
dlg = wx.MessageDialog(stk_canvas, msg, 'Error', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def StkPlotPanel(parent_widget):
'''
1. Create wxPython canvas: The FigureCanvas contains the figure and does event handling.
'''
plot_panel = wx.Panel(parent_widget)
#Setup figure
stock_fig = Figure()
stock_canvas = FigureCanvasWxAgg(plot_panel, -1, stock_fig)
global g_stk_canvas
g_stk_canvas = stock_canvas
#stock_plt = plt.subplot2grid((1, 1), (0, 0), colspan=1)
stock_plt = stock_fig.add_subplot(1, 1, 1)
global g_stk_plot
g_stk_plot = stock_plt
# Now put stock_canvas into a box
box2 = wx.BoxSizer(wx.VERTICAL)
box2.Add(stock_canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
plot_panel.SetSizer(box2)
#return plot_panel to be added into main_panel for re-sizing handling
return plot_panel
def OnInput(event):
stock_num = event.GetString()
print "[OnInput] stock num is: %s" % stock_num
#show historical day price
PlotHistoryDayPrice(g_stk_canvas, g_stk_plot, stock_num)
def main():
'''
1. Use wxPython to UI for user to input stock num that will query.
2. The data from Yahoo! Finance is not grabbed by calling url apis, is by using Pandas APIs.
'''
#init wx App:
app = wx.App()
#init top level frame and panel
main_frame = wx.Frame(None, -1, "TwanStkEx2")
main_panel = wx.Panel(main_frame)
# widget layout as below:
# ---------------------------
# - Text: input_control - (1 box put 2 widgets in the "input panel")
# ---------------------------
# - -
# - stk_panel - (1 box to 1 plot in thr Stk_panel)
# - -
# - -
# ---------------------------
#create input panel:
input_panel = wx.Panel(main_panel)
#create stk panel:
stk_panel = StkPlotPanel(main_panel)
#create info/input widgets:
stock_text = wx.StaticText(input_panel, -1, "Stock num:")
stock_input = wx.TextCtrl(input_panel, -1, "^twii", style=wx.TE_PROCESS_ENTER)
stock_input.Bind(wx.EVT_TEXT_ENTER, OnInput)
#put info/input widgets into a box
box1 = wx.BoxSizer()
box1.Add(stock_text, 1, wx.LEFT|wx.Top, 10)
#add 10 padding/border to stock_text widget
box1.Add(stock_input, 1, wx.LEFT|wx.Top, 10)
#info/input widgets into input panel
input_panel.SetSizer(box1)
#put input_panel and stk_panel into a box
box3 = wx.BoxSizer(wx.VERTICAL)
box3.Add(input_panel, 0)
box3.Add(stk_panel, 1, wx.LEFT | wx.TOP | wx.GROW)
main_panel.SetSizer(box3)
#show historical day price
PlotHistoryDayPrice(g_stk_canvas, g_stk_plot)
#show UI
main_frame.Show(1)
#start UI event loop
app.MainLoop()
if __name__ == '__main__':
main() | mit |
WneleiGao/SAIGtensor.jl | test/workon/pythonvis.py | 1 | 1062 | import numpy as np
from vispy import io, plot as vp
import matplotlib.pyplot as plt
#dat_dir = '../../../'
fname = '/Users/wenlei/Desktop/seismic_cube.npy'
dat_file = fname
data = np.load(dat_file)
nILnXL, nt = data.shape
# Need to know:
nIL = 194 # number of inlines
nXL = 299 # number of crosslines
nt = 463 # number of samples per trace
dt = 0.004 # sample rate in seconds
data = np.reshape(data, newshape = (nIL, nXL, nt))
norm = max((np.abs(np.amax(data)), np.abs(np.amin(data))))
new_data = data.astype('float16')
# A sloppy way to manipulate amplitude values so they
# go from 0-255 as integers to display on graphics card.
new_data /= -2 * norm
new_data = 0.5 + new_data
new_data = 255 * new_data
new_data = new_data[:, :, :450]
vis_data = new_data.astype('int16')
vol_data = np.flipud(np.rollaxis(vis_data, 2)) # data to send to renderer
###
### Make figure
###
fig = vp.Fig()
clim = [130,200] # This is a hack to scale the colors to something pleasing
vol_pw = fig[0, 0]
vol_pw.volume(vol_data, clim=clim, cmap='grays')
fig.show(run=True)
| mit |
hakonsbm/nest-simulator | pynest/examples/clopath_synapse_spike_pairing.py | 1 | 5835 | # -*- coding: utf-8 -*-
#
# clopath_synapse_spike_pairing.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Spike pairing experiment
----------------------------------------
This script simulates one ``aeif_psc_delta_clopath`` neuron that is connected with
a Clopath connection [1]_. The synapse receives pairs of a pre- and a postsynaptic
spikes that are separated by either 10 ms (pre before post) or -10 ms (post
before pre). The change of the synaptic weight is measured after five of such
pairs. This experiment is repeated five times with different rates of the
sequence of the spike pairs: 10Hz, 20Hz, 30Hz, 40Hz, and 50Hz.
References
~~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import numpy as np
import matplotlib.pyplot as pl
import nest
##############################################################################
# First we specify the neuron parameters. To enable voltage dependent
# prefactor ``A_LTD(u_bar_bar)`` add ``A_LTD_const: False`` to the dictionary.
nrn_params = {'V_m': -70.6,
'E_L': -70.6,
'C_m': 281.0,
'theta_minus': -70.6,
'theta_plus': -45.3,
'A_LTD': 14.0e-5,
'A_LTP': 8.0e-5,
'tau_minus': 10.0,
'tau_plus': 7.0,
'delay_u_bars': 4.0,
'a': 4.0,
'b': 0.0805,
'V_reset': -70.6 + 21.0,
'V_clamp': 33.0,
't_clamp': 2.0,
't_ref': 0.0,
}
##############################################################################
# Hardcoded spike times of presynaptic spike generator
spike_times_pre = [
# Presynaptic spike before the postsynaptic
[20., 120., 220., 320., 420.],
[20., 70., 120., 170., 220.],
[20., 53.3, 86.7, 120., 153.3],
[20., 45., 70., 95., 120.],
[20., 40., 60., 80., 100.],
# Presynaptic spike after the postsynaptic
[120., 220., 320., 420., 520., 620.],
[70., 120., 170., 220., 270., 320.],
[53.3, 86.6, 120., 153.3, 186.6, 220.],
[45., 70., 95., 120., 145., 170.],
[40., 60., 80., 100., 120., 140.]]
##############################################################################
# Hardcoded spike times of postsynaptic spike generator
spike_times_post = [
[10., 110., 210., 310., 410.],
[10., 60., 110., 160., 210.],
[10., 43.3, 76.7, 110., 143.3],
[10., 35., 60., 85., 110.],
[10., 30., 50., 70., 90.],
[130., 230., 330., 430., 530., 630.],
[80., 130., 180., 230., 280., 330.],
[63.3, 96.6, 130., 163.3, 196.6, 230.],
[55., 80., 105., 130., 155., 180.],
[50., 70., 90., 110., 130., 150.]]
init_w = 0.5
syn_weights = []
resolution = 0.1
##############################################################################
# Loop over pairs of spike trains
for (s_t_pre, s_t_post) in zip(spike_times_pre, spike_times_post):
nest.ResetKernel()
nest.SetKernelStatus({"resolution": resolution})
# Create one neuron
nrn = nest.Create("aeif_psc_delta_clopath", 1, nrn_params)
# We need a parrot neuron since spike generators can only
# be connected with static connections
prrt_nrn = nest.Create("parrot_neuron", 1)
# Create and connect spike generators
spike_gen_pre = nest.Create("spike_generator", 1, {
"spike_times": s_t_pre})
nest.Connect(spike_gen_pre, prrt_nrn,
syn_spec={"delay": resolution})
spike_gen_post = nest.Create("spike_generator", 1, {
"spike_times": s_t_post})
nest.Connect(spike_gen_post, nrn, syn_spec={
"delay": resolution, "weight": 80.0})
# Create weight recorder
wr = nest.Create('weight_recorder', 1)
# Create Clopath connection with weight recorder
nest.CopyModel("clopath_synapse", "clopath_synapse_rec",
{"weight_recorder": wr[0]})
syn_dict = {"model": "clopath_synapse_rec",
"weight": init_w, "delay": resolution}
nest.Connect(prrt_nrn, nrn, syn_spec=syn_dict)
# Simulation
simulation_time = (10.0 + max(s_t_pre[-1], s_t_post[-1]))
nest.Simulate(simulation_time)
# Extract and save synaptic weights
w_events = nest.GetStatus(wr)[0]["events"]
weights = w_events["weights"]
syn_weights.append(weights[-1])
syn_weights = np.array(syn_weights)
# scaling of the weights so that they are comparable to [1]
syn_weights = 100.0*15.0*(syn_weights - init_w)/init_w + 100.0
# Plot results
fig1, axA = pl.subplots(1, sharex=False)
axA.plot([10., 20., 30., 40., 50.], syn_weights[5:], color='b', lw=2.5, ls='-',
label="pre-post pairing")
axA.plot([10., 20., 30., 40., 50.], syn_weights[:5], color='g', lw=2.5, ls='-',
label="post-pre pairing")
axA.set_ylabel("normalized weight change")
axA.set_xlabel("rho (Hz)")
axA.legend()
axA.set_title("synaptic weight")
pl.show()
| gpl-2.0 |
enigmampc/catalyst | tests/pipeline/base.py | 1 | 5349 | """
Base class for Pipeline API unit tests.
"""
from functools import wraps
import numpy as np
from numpy import arange, prod
from pandas import DataFrame, Timestamp
from six import iteritems
from catalyst.pipeline.engine import SimplePipelineEngine
from catalyst.pipeline import ExecutionPlan
from catalyst.pipeline.term import AssetExists, InputDates
from catalyst.testing import (
check_arrays,
ExplodingObject,
)
from catalyst.testing.fixtures import (
WithAssetFinder,
WithTradingSessions,
CatalystTestCase,
)
from catalyst.utils.functional import dzip_exact
from catalyst.utils.pandas_utils import explode
def with_defaults(**default_funcs):
"""
Decorator for providing dynamic default values for a method.
Usages:
@with_defaults(foo=lambda self: self.x + self.y)
def func(self, foo):
...
If a value is passed for `foo`, it will be used. Otherwise the function
supplied to `with_defaults` will be called with `self` as an argument.
"""
def decorator(f):
@wraps(f)
def method(self, *args, **kwargs):
for name, func in iteritems(default_funcs):
if name not in kwargs:
kwargs[name] = func(self)
return f(self, *args, **kwargs)
return method
return decorator
with_default_shape = with_defaults(shape=lambda self: self.default_shape)
class BasePipelineTestCase(WithTradingSessions,
WithAssetFinder,
CatalystTestCase):
START_DATE = Timestamp('2014', tz='UTC')
END_DATE = Timestamp('2014-12-31', tz='UTC')
ASSET_FINDER_EQUITY_SIDS = list(range(20))
@classmethod
def init_class_fixtures(cls):
super(BasePipelineTestCase, cls).init_class_fixtures()
cls.default_asset_exists_mask = cls.asset_finder.lifetimes(
cls.nyse_sessions[-30:],
include_start_date=False,
)
@property
def default_shape(self):
"""Default shape for methods that build test data."""
return self.default_asset_exists_mask.shape
def run_graph(self, graph, initial_workspace, mask=None):
"""
Compute the given TermGraph, seeding the workspace of our engine with
`initial_workspace`.
Parameters
----------
graph : catalyst.pipeline.graph.TermGraph
Graph to run.
initial_workspace : dict
Initial workspace to forward to SimplePipelineEngine.compute_chunk.
mask : DataFrame, optional
This is a value to pass to `initial_workspace` as the mask from
`AssetExists()`. Defaults to a frame of shape `self.default_shape`
containing all True values.
Returns
-------
results : dict
Mapping from termname -> computed result.
"""
engine = SimplePipelineEngine(
lambda column: ExplodingObject(),
self.nyse_sessions,
self.asset_finder,
)
if mask is None:
mask = self.default_asset_exists_mask
dates, assets, mask_values = explode(mask)
initial_workspace.setdefault(AssetExists(), mask_values)
initial_workspace.setdefault(InputDates(), dates)
return engine.compute_chunk(
graph,
dates,
assets,
initial_workspace,
)
def check_terms(self,
terms,
expected,
initial_workspace,
mask,
check=check_arrays):
"""
Compile the given terms into a TermGraph, compute it with
initial_workspace, and compare the results with ``expected``.
"""
start_date, end_date = mask.index[[0, -1]]
graph = ExecutionPlan(
terms,
all_dates=self.nyse_sessions,
start_date=start_date,
end_date=end_date,
)
results = self.run_graph(graph, initial_workspace, mask)
for key, (res, exp) in dzip_exact(results, expected).items():
check(res, exp)
return results
def build_mask(self, array):
"""
Helper for constructing an AssetExists mask from a boolean-coercible
array.
"""
ndates, nassets = array.shape
return DataFrame(
array,
# Use the **last** N dates rather than the first N so that we have
# space for lookbacks.
index=self.nyse_sessions[-ndates:],
columns=self.ASSET_FINDER_EQUITY_SIDS[:nassets],
dtype=bool,
)
@with_default_shape
def arange_data(self, shape, dtype=np.float64):
"""
Build a block of testing data from numpy.arange.
"""
return arange(prod(shape), dtype=dtype).reshape(shape)
@with_default_shape
def randn_data(self, seed, shape):
"""
Build a block of testing data from a seeded RandomState.
"""
return np.random.RandomState(seed).randn(*shape)
@with_default_shape
def eye_mask(self, shape):
"""
Build a mask using np.eye.
"""
return ~np.eye(*shape, dtype=bool)
@with_default_shape
def ones_mask(self, shape):
return np.ones(shape, dtype=bool)
| apache-2.0 |
davidgbe/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
carlsonp/kaggle-TrulyNative | scikit_generate_prediction_KNN.py | 1 | 2851 | from __future__ import print_function
import pickle, os, sys, glob, hashlib
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
#https://github.com/ssokolow/fastdupes
import fastdupes
test_files = set(pd.read_csv('./data/sampleSubmission_v2.csv').file.values)
train = pd.read_csv('./data/train_v2.csv')
df_full = pickle.load(open( "df_full.p", "rb"))
#no point using empty files in our training set so we remove them
print('--- Removing empty files')
filepaths = glob.glob('data/*/*.txt')
for filepath in filepaths:
if os.path.getsize(filepath) == 0:
filename = os.path.basename(filepath)
df_full = df_full[df_full.file != filename]
if filename in test_files:
print("Found empty file in submission: ", filename)
print('--- Training KNN')
neigh = KNeighborsClassifier()
train_data = df_full[df_full.sponsored.notnull()].fillna(0)
test = df_full[df_full.sponsored.isnull() & df_full.file.isin(test_files)].fillna(0)
neigh.fit(train_data.drop(['file', 'sponsored'], 1), train_data.sponsored)
print('--- Create predictions and submission')
submission = test[['file']].reset_index(drop=True)
submission['sponsored'] = neigh.predict_proba(test.drop(['file', 'sponsored'], 1))[:, 1]
#if we have duplicate files in the testing set that are in the training set,
#there's no reason to use our prediction, just use the true value!
print("--- Finding duplicate files")
duplicates = 0
dupes = fastdupes.find_dupes(filepaths, exact=True)
for sets in dupes:
counter = 0
total = 0
ratio = None #cached sponsored ratio calculation
for f in dupes[sets]:
filename = os.path.basename(f)
if filename in test_files:
if ratio is None:
#search through same set to find all files in the training set and sum up sponsored totals and increment a counter
#this is needed because there are some instances where there are conflicting reports about duplicate files being
#sponsored or not sponsored, thus we just take an average
for k in dupes[sets]:
past_filename = os.path.basename(k)
if past_filename in train['file'].values:
total += train.loc[train['file'] == past_filename, 'sponsored'].values[0]
counter += 1
if total == 0:
ratio = 0
else:
ratio = float(total) / float(counter)
if ratio is not None:
#average our initial prediction with the calculated ratio
combined_ratio = (submission.loc[submission['file'] == filename, 'sponsored'].values[0] + ratio) / 2
submission.loc[submission['file'] == filename, 'sponsored'] = combined_ratio
duplicates += 1
print("Found: ", duplicates, " duplicate files in the testing set.")
#make sure submission has the correct number of rows
if len(submission) != 66772:
print("Error: wrong dimension! Not generating submission CSV file.")
else:
submission.to_csv('KNN_basic_submission.csv', index=False)
| gpl-3.0 |
skt9/skt9.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| cc0-1.0 |
gxxjjj/QuantEcon.py | docs/source/conf.py | 3 | 10047 | # -*- coding: utf-8 -*-
#
# QuantEcon documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 14 22:09:04 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pip
try:
from mock import Mock as MagicMock
except ImportError:
def install(package):
pip.main(['install', package])
install('mock')
from mock import Mock as MagicMock
# ------------------------------------------------------------------- #
# MOCK MODULES
# ------------------------------------------------------------------- #
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pandas', 'statsmodels', 'numba']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath('../..') + '/quantecon')
sys.path.extend([
# numpy standard doc extensions
os.path.join(os.path.dirname(__file__),
'..', '../..',
'sphinxext')
])
## numpydoc settings
numpydoc_class_members_toctree = False
numydoc_show_class_members= True
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
# 'ipython_directive',
# 'ipython_console_highlighting',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'QuantEcon'
copyright = u'2014, QuantEcon Developer Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from version import version as quantecon_version
version = quantecon_version
# The full version, including alpha/beta/rc tags.
release = quantecon_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else: # Local build. Need to import rtd theme
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'QuantEcondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'QuantEcon.tex', u'QuantEcon Documentation',
u'QuantEcon Developer Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'quantecon', u'QuantEcon Documentation',
[u'QuantEcon Developer Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'QuantEcon', u'QuantEcon Documentation',
u'QuantEcon Developer Team', 'QuantEcon', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-3-clause |
nelson-liu/scikit-learn | examples/tree/plot_unveil_tree_structure.py | 5 | 4823 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
julienr/vispy | vispy/testing/_testing.py | 12 | 12441 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from __future__ import print_function
import numpy as np
import sys
import os
import inspect
from distutils.version import LooseVersion
from ..ext.six import string_types
from ..util import use_log_level
###############################################################################
# Adapted from Python's unittest2
# http://docs.python.org/2/license.html
try:
from unittest.case import SkipTest
except ImportError:
try:
from unittest2.case import SkipTest
except ImportError:
class SkipTest(Exception):
pass
def _safe_rep(obj, short=False):
"""Helper for assert_* ports"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < 80:
return result
return result[:80] + ' [truncated]...'
def _safe_str(obj):
"""Helper for assert_* ports"""
try:
return str(obj)
except Exception:
return object.__str__(obj)
def _format_msg(msg, std_msg):
"""Helper for assert_* ports"""
if msg is None:
msg = std_msg
else:
try:
msg = '%s : %s' % (std_msg, msg)
except UnicodeDecodeError:
msg = '%s : %s' % (_safe_str(std_msg), _safe_str(msg))
return msg
def nottest(func):
"""Decorator to mark a function or method as *not* a test
"""
func.__test__ = False
return func
def assert_raises(exp, func, *args, **kwargs):
"""Backport"""
try:
func(*args, **kwargs)
except exp:
return
std_msg = '%s not raised' % (_safe_rep(exp))
raise AssertionError(_format_msg(None, std_msg))
def assert_in(member, container, msg=None):
"""Backport"""
if member in container:
return
std_msg = '%s not found in %s' % (_safe_rep(member), _safe_rep(container))
raise AssertionError(_format_msg(msg, std_msg))
def assert_true(x, msg=None):
"""Backport"""
if x:
return
std_msg = '%s is not True' % (_safe_rep(x),)
raise AssertionError(_format_msg(msg, std_msg))
def assert_equal(x, y, msg=None):
"""Backport"""
if x == y:
return
std_msg = '%s not equal to %s' % (_safe_rep(x), _safe_rep(y))
raise AssertionError(_format_msg(msg, std_msg))
def assert_not_equal(x, y, msg=None):
"""Backport"""
if x != y:
return
std_msg = '%s equal to %s' % (_safe_rep(x), _safe_rep(y))
raise AssertionError(_format_msg(msg, std_msg))
def assert_not_in(member, container, msg=None):
"""Backport"""
if member not in container:
return
std_msg = '%s found in %s' % (_safe_rep(member), _safe_rep(container))
raise AssertionError(_format_msg(msg, std_msg))
def assert_is(expr1, expr2, msg=None):
"""Backport"""
if expr1 is not expr2:
std_msg = '%s is not %s' % (_safe_rep(expr1), _safe_rep(expr2))
raise AssertionError(_format_msg(msg, std_msg))
class raises(object):
"""Helper class to test exception raising"""
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, exc_typ, exc, tb):
if isinstance(exc, self.exc):
return True
elif exc is None:
raise AssertionError("Expected %s (no exception raised)" %
self.exc.__name__)
else:
raise AssertionError("Expected %s, got %s instead (%s)" %
(self.exc.__name__, type(exc).__name__), exc)
###############################################################################
# GL stuff
def has_pyopengl():
try:
from OpenGL import GL # noqa, analysis:ignore
except Exception:
return False
else:
return True
def requires_pyopengl():
return np.testing.dec.skipif(not has_pyopengl(), 'Requires PyOpenGL')
###############################################################################
# App stuff
def has_backend(backend, has=(), capable=(), out=()):
from ..app.backends import BACKENDMAP
using = os.getenv('_VISPY_TESTING_APP', None)
if using is not None and using != backend:
# e.g., we are on a 'pyglet' run but the test requires PyQt4
ret = (False,) if len(out) > 0 else False
for o in out:
ret += (None,)
return ret
# let's follow the standard code path
module_name = BACKENDMAP[backend.lower()][1]
with use_log_level('warning', print_msg=False):
mod = __import__('app.backends.%s' % module_name, globals(), level=2)
mod = getattr(mod.backends, module_name)
good = mod.testable
for h in has:
good = (good and getattr(mod, 'has_%s' % h))
for cap in capable:
good = (good and mod.capability[cap])
ret = (good,) if len(out) > 0 else good
for o in out:
ret += (getattr(mod, o),)
return ret
def has_application(backend=None, has=(), capable=()):
"""Determine if a suitable app backend exists"""
from ..app.backends import BACKEND_NAMES
# avoid importing other backends if we don't need to
if backend is None:
for backend in BACKEND_NAMES:
if has_backend(backend, has=has, capable=capable):
good = True
msg = backend
break
else:
good = False
msg = 'Requires application backend'
else:
good, why = has_backend(backend, has=has, capable=capable,
out=['why_not'])
if not good:
msg = 'Requires %s: %s' % (backend, why)
else:
msg = backend
return good, msg
def composed(*decs):
def deco(f):
for dec in reversed(decs):
f = dec(f)
return f
return deco
def requires_application(backend=None, has=(), capable=()):
"""Return a decorator for tests that require an application"""
good, msg = has_application(backend, has, capable)
dec_backend = np.testing.dec.skipif(not good, "Skipping test: %s" % msg)
try:
import pytest
except Exception:
return dec_backend
dec_app = pytest.mark.vispy_app_test
return composed(dec_app, dec_backend)
def requires_img_lib():
"""Decorator for tests that require an image library"""
from ..io import _check_img_lib
if sys.platform.startswith('win'):
has_img_lib = False # PIL breaks tests on windows (!)
else:
has_img_lib = not all(c is None for c in _check_img_lib())
return np.testing.dec.skipif(not has_img_lib, 'imageio or PIL required')
def has_ipython(version='3.0'):
"""function that checks the presence of IPython"""
# typecast version to a string, in case an integer is given
version = str(version)
try:
import IPython # noqa
except Exception:
return False, "IPython library not found"
else:
if LooseVersion(IPython.__version__) >= LooseVersion(version):
return True, "IPython present"
else:
message = (
"current IPython version: (%s) is "
"older than expected version: (%s)") % \
(IPython.__version__, version)
return False, message
def requires_ipython(version='3.0'):
ipython_present, message = has_ipython(version)
return np.testing.dec.skipif(not ipython_present, message)
def has_matplotlib(version='1.2'):
"""Determine if mpl is a usable version"""
try:
import matplotlib
except Exception:
has_mpl = False
else:
if LooseVersion(matplotlib.__version__) >= LooseVersion(version):
has_mpl = True
else:
has_mpl = False
return has_mpl
###############################################################################
# Visuals stuff
def _has_scipy(min_version):
try:
assert isinstance(min_version, string_types)
import scipy # noqa, analysis:ignore
from distutils.version import LooseVersion
this_version = LooseVersion(scipy.__version__)
if this_version < min_version:
return False
except Exception:
return False
else:
return True
def requires_scipy(min_version='0.13'):
return np.testing.dec.skipif(not _has_scipy(min_version),
'Requires Scipy version >= %s' % min_version)
@nottest
def TestingCanvas(bgcolor='black', size=(100, 100), dpi=None, decorate=False,
**kwargs):
"""Class wrapper to avoid importing scene until necessary"""
# On Windows decorations can force windows to be an incorrect size
# (e.g., instead of 100x100 they will be 100x248), having no
# decorations works around this
from ..scene import SceneCanvas
class TestingCanvas(SceneCanvas):
def __init__(self, bgcolor, size, dpi, decorate, **kwargs):
self._entered = False
self._wanted_vp = None
SceneCanvas.__init__(self, bgcolor=bgcolor, size=size,
dpi=dpi, decorate=decorate,
**kwargs)
def __enter__(self):
SceneCanvas.__enter__(self)
# sometimes our window can be larger than our requsted draw
# area (e.g. on Windows), and this messes up our tests that
# typically use very small windows. Here we "fix" it.
scale = np.array(self.physical_size) / np.array(self.size, float)
scale = int(np.round(np.mean(scale)))
self._wanted_vp = 0, 0, size[0] * scale, size[1] * scale
self.context.set_state(clear_color=self._bgcolor)
self.context.set_viewport(*self._wanted_vp)
self._entered = True
return self
def draw_visual(self, visual, event=None):
if not self._entered:
return
SceneCanvas.draw_visual(self, visual, event)
self.context.finish()
return TestingCanvas(bgcolor, size, dpi, decorate, **kwargs)
@nottest
def save_testing_image(image, location):
from ..gloo.util import _screenshot
from ..util import make_png
if image == "screenshot":
image = _screenshot(alpha=False)
with open(location+'.png', 'wb') as fid:
fid.write(make_png(image))
@nottest
def run_tests_if_main():
"""Run tests in a given file if it is run as a script"""
local_vars = inspect.currentframe().f_back.f_locals
if not local_vars.get('__name__', '') == '__main__':
return
# we are in a "__main__"
fname = local_vars['__file__']
# Run ourselves. post-mortem debugging!
try:
import faulthandler
faulthandler.enable()
except Exception:
pass
import __main__
try:
import pytest
pytest.main(['-s', '--tb=short', fname])
except ImportError:
print('==== Running tests in script\n==== %s' % fname)
run_tests_in_object(__main__)
print('==== Tests pass')
def run_tests_in_object(ob):
# Setup
for name in dir(ob):
if name.lower().startswith('setup'):
print('Calling %s' % name)
getattr(ob, name)()
# Exec
for name in sorted(dir(ob), key=lambda x: x.lower()): # consistent order
val = getattr(ob, name)
if name.startswith('_'):
continue
elif callable(val) and (name[:4] == 'test' or name[-4:] == 'test'):
print('Running test-func %s ... ' % name, end='')
try:
val()
print('ok')
except Exception as err:
if 'skiptest' in err.__class__.__name__.lower():
print('skip')
else:
raise
elif isinstance(val, type) and 'Test' in name:
print('== Running test-class %s' % name)
run_tests_in_object(val())
print('== Done with test-class %s' % name)
# Teardown
for name in dir(ob):
if name.lower().startswith('teardown'):
print('Calling %s' % name)
getattr(ob, name)()
| bsd-3-clause |
microsoft/LightGBM | tests/python_package_test/test_consistency.py | 1 | 5908 | # coding: utf-8
from pathlib import Path
import numpy as np
from sklearn.datasets import load_svmlight_file
import lightgbm as lgb
EXAMPLES_DIR = Path(__file__).absolute().parents[2] / 'examples'
class FileLoader:
def __init__(self, directory, prefix, config_file='train.conf'):
self.directory = directory
self.prefix = prefix
self.params = {'gpu_use_dp': True}
with open(self.directory / config_file, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
key, value = [token.strip() for token in line.split('=')]
if 'early_stopping' not in key: # disable early_stopping
self.params[key] = value if key != 'num_trees' else int(value)
def load_dataset(self, suffix, is_sparse=False):
filename = str(self.path(suffix))
if is_sparse:
X, Y = load_svmlight_file(filename, dtype=np.float64, zero_based=True)
return X, Y, filename
else:
mat = np.loadtxt(filename, dtype=np.float64)
return mat[:, 1:], mat[:, 0], filename
def load_field(self, suffix):
return np.loadtxt(str(self.directory / f'{self.prefix}{suffix}'))
def load_cpp_result(self, result_file='LightGBM_predict_result.txt'):
return np.loadtxt(str(self.directory / result_file))
def train_predict_check(self, lgb_train, X_test, X_test_fn, sk_pred):
params = dict(self.params)
params['force_row_wise'] = True
gbm = lgb.train(params, lgb_train)
y_pred = gbm.predict(X_test)
cpp_pred = gbm.predict(X_test_fn)
np.testing.assert_allclose(y_pred, cpp_pred)
np.testing.assert_allclose(y_pred, sk_pred)
def file_load_check(self, lgb_train, name):
lgb_train_f = lgb.Dataset(self.path(name), params=self.params).construct()
for f in ('num_data', 'num_feature', 'get_label', 'get_weight', 'get_init_score', 'get_group'):
a = getattr(lgb_train, f)()
b = getattr(lgb_train_f, f)()
if a is None and b is None:
pass
elif a is None:
assert np.all(b == 1), f
elif isinstance(b, (list, np.ndarray)):
np.testing.assert_allclose(a, b)
else:
assert a == b, f
def path(self, suffix):
return self.directory / f'{self.prefix}{suffix}'
def test_binary():
fd = FileLoader(EXAMPLES_DIR / 'binary_classification', 'binary')
X_train, y_train, _ = fd.load_dataset('.train')
X_test, _, X_test_fn = fd.load_dataset('.test')
weight_train = fd.load_field('.train.weight')
lgb_train = lgb.Dataset(X_train, y_train, params=fd.params, weight=weight_train)
gbm = lgb.LGBMClassifier(**fd.params)
gbm.fit(X_train, y_train, sample_weight=weight_train)
sk_pred = gbm.predict_proba(X_test)[:, 1]
fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred)
fd.file_load_check(lgb_train, '.train')
def test_binary_linear():
fd = FileLoader(EXAMPLES_DIR / 'binary_classification', 'binary', 'train_linear.conf')
X_train, y_train, _ = fd.load_dataset('.train')
X_test, _, X_test_fn = fd.load_dataset('.test')
weight_train = fd.load_field('.train.weight')
lgb_train = lgb.Dataset(X_train, y_train, params=fd.params, weight=weight_train)
gbm = lgb.LGBMClassifier(**fd.params)
gbm.fit(X_train, y_train, sample_weight=weight_train)
sk_pred = gbm.predict_proba(X_test)[:, 1]
fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred)
fd.file_load_check(lgb_train, '.train')
def test_multiclass():
fd = FileLoader(EXAMPLES_DIR / 'multiclass_classification', 'multiclass')
X_train, y_train, _ = fd.load_dataset('.train')
X_test, _, X_test_fn = fd.load_dataset('.test')
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.LGBMClassifier(**fd.params)
gbm.fit(X_train, y_train)
sk_pred = gbm.predict_proba(X_test)
fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred)
fd.file_load_check(lgb_train, '.train')
def test_regression():
fd = FileLoader(EXAMPLES_DIR / 'regression', 'regression')
X_train, y_train, _ = fd.load_dataset('.train')
X_test, _, X_test_fn = fd.load_dataset('.test')
init_score_train = fd.load_field('.train.init')
lgb_train = lgb.Dataset(X_train, y_train, init_score=init_score_train)
gbm = lgb.LGBMRegressor(**fd.params)
gbm.fit(X_train, y_train, init_score=init_score_train)
sk_pred = gbm.predict(X_test)
fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred)
fd.file_load_check(lgb_train, '.train')
def test_lambdarank():
fd = FileLoader(EXAMPLES_DIR / 'lambdarank', 'rank')
X_train, y_train, _ = fd.load_dataset('.train', is_sparse=True)
X_test, _, X_test_fn = fd.load_dataset('.test', is_sparse=True)
group_train = fd.load_field('.train.query')
lgb_train = lgb.Dataset(X_train, y_train, group=group_train)
params = dict(fd.params)
params['force_col_wise'] = True
gbm = lgb.LGBMRanker(**params)
gbm.fit(X_train, y_train, group=group_train)
sk_pred = gbm.predict(X_test)
fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred)
fd.file_load_check(lgb_train, '.train')
def test_xendcg():
fd = FileLoader(EXAMPLES_DIR / 'xendcg', 'rank')
X_train, y_train, _ = fd.load_dataset('.train', is_sparse=True)
X_test, _, X_test_fn = fd.load_dataset('.test', is_sparse=True)
group_train = fd.load_field('.train.query')
lgb_train = lgb.Dataset(X_train, y_train, group=group_train)
gbm = lgb.LGBMRanker(**fd.params)
gbm.fit(X_train, y_train, group=group_train)
sk_pred = gbm.predict(X_test)
fd.train_predict_check(lgb_train, X_test, X_test_fn, sk_pred)
fd.file_load_check(lgb_train, '.train')
| mit |
TomAugspurger/pandas | pandas/tests/tslibs/test_liboffsets.py | 3 | 5046 | """
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import roll_qtrday
from pandas import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_last_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_get_last_bday(dt, exp_week_day, exp_last_day):
assert dt.weekday() == exp_week_day
assert liboffsets.get_lastbday(dt.year, dt.month) == exp_last_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_get_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert liboffsets.get_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shift_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(1, "start", Timestamp("1929-06-01")),
(-3, "end", Timestamp("1929-02-28")),
(25, None, Timestamp("1931-06-5")),
(-1, 31, Timestamp("1929-04-30")),
],
)
def test_shift_month_ts(months, day_opt, expected):
ts = Timestamp("1929-05-05")
assert liboffsets.shift_month(ts, months, day_opt=day_opt) == expected
def test_shift_month_error():
dt = datetime(2017, 11, 15)
day_opt = "this should raise"
with pytest.raises(ValueError, match=day_opt):
liboffsets.shift_month(dt, 3, day_opt=day_opt)
@pytest.mark.parametrize(
"other,expected",
[
# Before March 1.
(datetime(2017, 2, 10), {2: 1, -7: -7, 0: 0}),
# After March 1.
(Timestamp("2014-03-15", tz="US/Eastern"), {2: 2, -7: -6, 0: 1}),
],
)
@pytest.mark.parametrize("n", [2, -7, 0])
def test_roll_yearday(other, expected, n):
month = 3
day_opt = "start" # `other` will be compared to March 1.
assert liboffsets.roll_yearday(other, n, month, day_opt) == expected[n]
@pytest.mark.parametrize(
"other,expected",
[
# Before June 30.
(datetime(1999, 6, 29), {5: 4, -7: -7, 0: 0}),
# After June 30.
(Timestamp(2072, 8, 24, 6, 17, 18), {5: 5, -7: -6, 0: 1}),
],
)
@pytest.mark.parametrize("n", [5, -7, 0])
def test_roll_yearday2(other, expected, n):
month = 6
day_opt = "end" # `other` will be compared to June 30.
assert liboffsets.roll_yearday(other, n, month, day_opt) == expected[n]
def test_get_day_of_month_error():
# get_day_of_month is not directly exposed.
# We test it via roll_yearday.
dt = datetime(2017, 11, 15)
day_opt = "foo"
with pytest.raises(ValueError, match=day_opt):
# To hit the raising case we need month == dt.month and n > 0.
liboffsets.roll_yearday(dt, n=3, month=11, day_opt=day_opt)
@pytest.mark.parametrize(
"month",
[3, 5], # (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3)
)
@pytest.mark.parametrize("n", [4, -3])
def test_roll_qtr_day_not_mod_unequal(day_opt, month, n):
expected = {3: {-3: -2, 4: 4}, 5: {-3: -3, 4: 3}}
other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday.
assert roll_qtrday(other, n, month, day_opt, modby=3) == expected[month][n]
@pytest.mark.parametrize(
"other,month,exp_dict",
[
# Monday.
(datetime(1999, 5, 31), 2, {-1: {"start": 0, "business_start": 0}}),
# Saturday.
(
Timestamp(2072, 10, 1, 6, 17, 18),
4,
{2: {"end": 1, "business_end": 1, "business_start": 1}},
),
# First business day.
(
Timestamp(2072, 10, 3, 6, 17, 18),
4,
{2: {"end": 1, "business_end": 1}, -1: {"start": 0}},
),
],
)
@pytest.mark.parametrize("n", [2, -1])
def test_roll_qtr_day_mod_equal(other, month, exp_dict, n, day_opt):
# All cases have (other.month % 3) == (month % 3).
expected = exp_dict.get(n, {}).get(day_opt, n)
assert roll_qtrday(other, n, month, day_opt, modby=3) == expected
@pytest.mark.parametrize(
"n,expected", [(42, {29: 42, 1: 42, 31: 41}), (-4, {29: -4, 1: -3, 31: -4})]
)
@pytest.mark.parametrize("compare", [29, 1, 31])
def test_roll_convention(n, expected, compare):
assert liboffsets.roll_convention(29, n, compare) == expected[compare]
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/utils/tests/test_random.py | 38 | 7410 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
ldirer/scikit-learn | sklearn/utils/deprecation.py | 36 | 2418 | import warnings
__all__ = ["deprecated", ]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra : string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
| bsd-3-clause |
jeasoft/odoo | comunity_modules/commission_payment/model/commission.py | 2 | 63464 | from openerp.osv import osv, fields
import mx.DateTime
from openerp.addons.decimal_precision import decimal_precision as dp
import datetime
from openerp.tools.translate import _
from pandas import DataFrame
COMMISSION_STATES = [
('draft', 'Draft'),
('open', 'In Progress'),
('done', 'Done'),
('cancel', 'Cancelled'),
]
COMMISSION_TYPES = [
('partial_payment', 'Partial Payments'),
('fully_paid_invoice', 'Fully Paid Invoices'),
]
COMMISSION_SALESMAN_POLICY = [
('on_invoice', 'Invoice'),
('on_invoiced_partner', 'Partner'),
('on_accounting_partner', 'Commercial Entity'),
]
COMMISSION_SCOPES = [
('whole_invoice', 'Whole Invoice'),
('product_invoiced', 'Invoiced Products '),
]
COMMISSION_POLICY_DATE_START = [
('invoice_emission_date', 'Emission Date'),
('invoice_due_date', 'Due Date'),
]
COMMISSION_POLICY_DATE_END = [
('last_payment_date', 'Last Payment on Invoice'),
('date_on_payment', 'Date of Payment'),
]
COMMISSION_POLICY_BAREMO = [
('onCompany', 'Company'),
('onPartner', 'Partner'),
('onAccountingPartner', 'Commercial Entity'),
('onUser', 'Salespeople'),
('onCommission', 'Document'),
]
def t_time(date):
'''
Trims time from "%Y-%m-%d %H:%M:%S" to "%Y-%m-%d"
'''
date = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
date = datetime.date(date.year, date.month, date.day)
return date.strftime("%Y-%m-%d")
class commission_payment(osv.Model):
"""
OpenERP Model : commission_payment
"""
_name = 'commission.payment'
_inherit = ['mail.thread', 'message.post.show.all']
_description = __doc__
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid,
context=context)
if not company_id:
raise osv.except_osv(
_('Error!'),
_('There is no default company for the current user!'))
return company_id
_columns = {
'name': fields.char(
'Commission Concept', size=256, required=True,
readonly=True, states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'baremo_id': fields.many2one(
'baremo.book', 'Baremo', required=True,
readonly=True, states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'date_start': fields.date(
'Start Date', required=True, readonly=True,
states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'date_stop': fields.date(
'End Date', required=True, readonly=True,
states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'total_comm': fields.float(
'Total Commission',
digits_compute=dp.get_precision('Commission'),
readonly=True, states={'write': [('readonly', False)]},
track_visibility='onchange',
),
'sale_noids': fields.one2many(
'commission.sale.noid', 'commission_id',
'Articulos sin asociacion', readonly=True,
states={'write': [('readonly', False)]}),
'noprice_ids': fields.one2many(
'commission.noprice', 'commission_id',
'Productos sin precio de lista historico', readonly=True,
states={'write': [('readonly', False)]}),
'comm_line_product_ids': fields.one2many(
'commission.lines', 'commission_id',
'Comision por productos', readonly=True,
domain=[('product_id', '!=', False)],
states={'write': [('readonly', False)]}),
'comm_line_invoice_ids': fields.one2many(
'commission.lines', 'commission_id',
'Comision por productos', readonly=True,
domain=[('product_id', '=', False)],
states={'write': [('readonly', False)]}),
'comm_line_ids': fields.one2many(
'commission.lines', 'commission_id',
'Comision por productos', readonly=True,
states={'write': [('readonly', False)]}),
'salesman_ids': fields.one2many(
'commission.salesman', 'commission_id',
'Salespeople Commissions', readonly=True,
states={'write': [('readonly', False)]}),
'user_ids': fields.many2many(
'res.users', 'commission_users',
'commission_id', 'user_id', 'Salespeople', required=True,
readonly=True, states={'draft': [('readonly', False)]}),
'invoice_ids': fields.many2many(
'account.invoice', 'commission_account_invoice', 'commission_id',
'invoice_id', 'Invoices', readonly=True,
states={'draft': [('readonly', False)]}),
'aml_ids': fields.many2many(
'account.move.line', 'commission_aml_rel', 'commission_id',
'aml_id', 'Journal Items', readonly=True,
),
'comm_voucher_ids': fields.one2many(
'commission.voucher',
'commission_id', 'Vouchers afectados en esta comision',
readonly=True, states={'write': [('readonly', False)]}),
'comm_invoice_ids': fields.one2many(
'commission.invoice',
'commission_id', 'Facturas afectadas en esta comision',
readonly=True, states={'write': [('readonly', False)]}),
'state': fields.selection(
COMMISSION_STATES, 'Estado', readonly=True,
track_visibility='onchange',
),
'commission_type': fields.selection(
COMMISSION_TYPES,
string='Basis', required=True,
readonly=True,
states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'commission_scope': fields.selection(
COMMISSION_SCOPES,
string='Scope', required=False,
readonly=True,
states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'commission_policy_date_start': fields.selection(
COMMISSION_POLICY_DATE_START,
string='Start Date Computation Policy', required=False,
readonly=True,
states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'commission_policy_date_end': fields.selection(
COMMISSION_POLICY_DATE_END,
string='End Date Computation Policy', required=False,
readonly=True,
states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'commission_salesman_policy': fields.selection(
COMMISSION_SALESMAN_POLICY,
string='Salesman Policy', required=False,
readonly=True,
states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'commission_baremo_policy': fields.selection(
COMMISSION_POLICY_BAREMO,
string='Baremo Policy', required=False,
readonly=True,
states={'draft': [('readonly', False)]},
track_visibility='onchange',
),
'company_id': fields.many2one('res.company', 'Company',
readonly='1'),
'currency_id': fields.related(
'company_id', 'currency_id',
string='Currency',
relation='res.currency',
type='many2one',
store=True,
readonly=True,
help=('Currency at which this report will be \
expressed. If not selected will be used the \
one set in the company')),
'exchange_date': fields.date('Exchange Date', help=('Date of change\
that will be\
printed in the\
report, with\
respect to the\
currency of the\
company')),
'comm_fix': fields.boolean('Fix Commissions?'),
'unknown_salespeople': fields.boolean('Allow Unknown Salespeople?'),
}
_defaults = {
'name': lambda *a: None,
'total_comm': lambda *a: 0.00,
'state': lambda *a: 'draft',
'company_id': _get_default_company,
}
def action_view_fixlines(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing Commissions of
given commission payment ids that are required for some details to
provide a proper computation of commissions.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'commission_payment',
'comm_line_fix_act')
idx = result and result[1] or False
result = act_obj.read(cr, uid, [idx], context=context)[0]
# compute the number of payments to display
cl_ids = []
for cp_brw in self.browse(cr, uid, ids, context=context):
cl_ids += [cl_brw.id for cs_brw in cp_brw.salesman_ids
if not cs_brw.salesman_id
for cl_brw in cs_brw.comm_lines_ids
]
# choose the view_mode accordingly
if len(cl_ids) > 0:
result['domain'] = "[('id','in',["+','.join(
[str(cl_id) for cl_id in cl_ids]
)+"])]"
else:
result['domain'] = "[('id','in',[])]"
return result
def action_view_payment(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing Payments of given
commission payment ids. It can either be a in a list or in a form view,
if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'commission_payment',
'action_account_moves_all_tree')
idx = result and result[1] or False
result = act_obj.read(cr, uid, [idx], context=context)[0]
# compute the number of payments to display
aml_ids = []
for cp_brw in self.browse(cr, uid, ids, context=context):
aml_ids += [aml_brw.id for aml_brw in cp_brw.aml_ids]
# choose the view_mode accordingly
if len(aml_ids) > 1:
result['domain'] = "[('id','in',["+','.join(
[str(aml_id) for aml_id in aml_ids]
)+"])]"
else:
result['domain'] = "[('id','in',[])]"
return result
def action_view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given
commission payment ids. It can either be a in a list or in a form view,
if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account',
'action_invoice_tree1')
idx = result and result[1] or False
result = act_obj.read(cr, uid, [idx], context=context)[0]
# compute the number of invoices to display
inv_ids = []
for cp_brw in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in cp_brw.invoice_ids]
# choose the view_mode accordingly
if len(inv_ids) > 1:
result['domain'] = "[('id','in',["+','.join(
[str(inv_id) for inv_id in inv_ids]
)+"])]"
else:
result['domain'] = "[('id','in',[])]"
return result
def _prepare_based_on_payments(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
aml_obj = self.pool.get('account.move.line')
for comm_brw in self.browse(cr, uid, ids, context=context):
date_start = comm_brw.date_start
date_stop = comm_brw.date_stop
# In this search we will restrict domain to those Entry Lines
# coming from a Cash or Bank Journal within the given dates
args = [('state', '=', 'valid'),
('date', '>=', date_start),
('date', '<=', date_stop),
('journal_id.type', 'in', ('bank', 'cash')),
('credit', '>', 0.0),
('paid_comm', '=', False),
]
aml_ids = aml_obj.search(
cr, uid, args + [('rec_invoice', '!=', False)],
context=context)
aml_ids += aml_obj.search(
cr, uid, args + [('rec_aml', '!=', False)],
context=context)
aml_ids = list(set(aml_ids))
comm_brw.write({
'aml_ids': [(6, comm_brw.id, aml_ids)]})
invoice_ids = [aml_brw.rec_invoice.id
for aml_brw in comm_brw.aml_ids
if aml_brw.rec_invoice
]
invoice_ids = list(set(invoice_ids))
comm_brw.write({'invoice_ids': [(6, comm_brw.id, invoice_ids)]})
return True
def _prepare_based_on_invoices(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
inv_obj = self.pool.get('account.invoice')
aml_obj = self.pool.get('account.move.line')
for comm_brw in self.browse(cr, uid, ids, context=context):
comm_brw.write({'aml_ids': []})
date_start = comm_brw.date_start
date_stop = comm_brw.date_stop
# En esta busqueda restringimos que la factura de cliente se haya
# pagado y que este dentro de la fecha estipulada
invoice_ids = inv_obj.search(
cr, uid, [('state', '=', 'paid'),
('type', '=', 'out_invoice'),
('date_last_payment', '>=', date_start),
('date_last_payment', '<=', date_stop),
], context=context)
comm_brw.write({
'invoice_ids': [(6, comm_brw.id, invoice_ids)]})
aml_ids = [aml_brw.id for inv_brw in comm_brw.invoice_ids
for aml_brw in inv_brw.payment_ids
if aml_brw.journal_id.type in ('bank', 'cash')
]
aml_ids2 = aml_obj.search(
cr, uid, [('state', '=', 'valid'),
('reconcile_id', '!=', False),
('journal_id.type', '=', 'sale'),
('date_last_payment', '>=', date_start),
('date_last_payment', '<=', date_stop),
], context=context)
aml_ids2 = aml_obj.search(
cr, uid, [('state', '=', 'valid'),
('reconcile_id', '!=', False),
('journal_id.type', 'in', ('bank', 'cash')),
('rec_aml', 'in', aml_ids2),
# ('date_last_payment', '>=', date_start),
# ('date_last_payment', '<=', date_stop),
], context=context)
aml_ids = list(set(aml_ids + aml_ids2))
comm_brw.write({'aml_ids': [(6, comm_brw.id, aml_ids)]})
return True
def _get_commission_rate(self, cr, uid, ids, pay_date, inv_date, dcto=0.0,
bar_brw=None, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
comm_brw = self.browse(cr, uid, ids[0], context=context)
# Determinar dias entre la emision de la factura del producto y el pago
# del mismo
pay_date = mx.DateTime.strptime(pay_date, '%Y-%m-%d')
inv_date = mx.DateTime.strptime(inv_date, '%Y-%m-%d')
emission_days = (pay_date - inv_date).day
# Teniendose dias y descuento por producto se procede a buscar en el
# baremo el correspondiente valor de comision para el producto en
# cuestion. se entra con el numero de dias
# Esta busqueda devuelve los dias ordenadados de menor a mayor dia, de
# acuerdo con lo estipulado que se ordenaria en el modulo baremo
bar_day_ids = bar_brw and bar_brw.bar_ids or comm_brw.baremo_id.bar_ids
no_days = True
no_dcto = True
for day_id in bar_day_ids:
# Se busca que el baremo tenga un rango que cubra a emision_days
if emission_days <= day_id.number:
bar_day = day_id.number
no_days = False
no_dcto = True
for dcto_id in day_id.disc_ids:
# Se busca que el baremo tenga un rango para el valor de
# descuento en producto
if (dcto - dcto_id.porc_disc) <= 0.01:
bardctdsc = dcto_id.porc_disc
if bardctdsc == 0.0:
# cuando el descuento en baremo es cero (0) no
# aparece reflejado, forzamos a que sea un cero (0)
# string.
bardctdsc = 0.0
bar_dcto_comm = dcto_id.porc_com
no_dcto = False
break
break
if (not no_days) and no_dcto:
bar_dcto_comm = 0.0
bardctdsc = 0.0
# Si emission_days no es cubierto por ningun rango del baremo diremos
# entonces que la comision es cero (0) %
elif no_days and no_dcto:
# Diremos que los dias de baremo es menos uno (-1) cuando los dias
# de emision no esten dentro del rango del baremo
bar_day = '0.0'
bardctdsc = 0.0
bar_dcto_comm = 0.0
res = dict(
bar_day=bar_day,
bar_dcto_comm=bar_dcto_comm,
bardctdsc=bardctdsc,
emission_days=emission_days,
)
return res
def _get_commission_policy_start_date(self, cr, uid, ids, pay_id,
context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
aml_obj = self.pool.get('account.move.line')
comm_brw = self.browse(cr, uid, ids[0], context=context)
aml_brw = aml_obj.browse(cr, uid, pay_id, context=context)
date = False
if comm_brw.commission_policy_date_start == 'invoice_emission_date':
if aml_brw.rec_invoice:
date = aml_brw.rec_invoice.date_invoice
else:
date = aml_brw.rec_aml.date
elif comm_brw.commission_policy_date_start == 'invoice_due_date':
if aml_brw.rec_invoice:
date = aml_brw.rec_invoice.date_due
else:
date = aml_brw.rec_aml.date_maturity
return date
def _get_commission_policy_end_date(self, cr, uid, ids, pay_id,
context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
aml_obj = self.pool.get('account.move.line')
comm_brw = self.browse(cr, uid, ids[0], context=context)
aml_brw = aml_obj.browse(cr, uid, pay_id, context=context)
date = False
if comm_brw.commission_policy_date_end == 'last_payment_date':
date = aml_brw.rec_aml.date_last_payment
elif comm_brw.commission_policy_date_end == 'date_on_payment':
date = aml_brw.date
return date
def _get_commission_saleman(self, cr, uid, ids, salesman_brw,
context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
if not salesman_brw:
return None
comm_brw = self.browse(cr, uid, ids[0], context=context)
user_ids = [usr_brw.id for usr_brw in comm_brw.user_ids]
if not user_ids:
return salesman_brw
if salesman_brw.id not in user_ids:
return None
return salesman_brw
def _get_commission_salesman_policy(self, cr, uid, ids, pay_id,
salesman_id=None, context=None):
if salesman_id:
return salesman_id
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
aml_obj = self.pool.get('account.move.line')
rp_obj = self.pool.get('res.partner')
comm_brw = self.browse(cr, uid, ids[0], context=context)
aml_brw = aml_obj.browse(cr, uid, pay_id, context=context)
res = None
if aml_brw.rec_invoice:
if comm_brw.commission_salesman_policy == 'on_invoice':
res = aml_brw.rec_invoice.user_id
elif comm_brw.commission_salesman_policy == \
'on_invoiced_partner':
res = aml_brw.rec_invoice.partner_id.user_id
elif comm_brw.commission_salesman_policy == \
'on_accounting_partner':
res = rp_obj._find_accounting_partner(
aml_brw.rec_invoice.partner_id).user_id
else:
if comm_brw.commission_salesman_policy in \
('on_invoiced_partner', 'on_invoice'):
res = aml_brw.rec_aml.partner_id.user_id
elif comm_brw.commission_salesman_policy == \
'on_accounting_partner':
res = rp_obj._find_accounting_partner(
aml_brw.rec_aml.partner_id).user_id
return res
def _get_commission_policy_baremo(self, cr, uid, ids, pay_id,
partner_id=None, salesman_id=None,
context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
partner_id = partner_id or None
aml_obj = self.pool.get('account.move.line')
rp_obj = self.pool.get('res.partner')
comm_brw = self.browse(cr, uid, ids[0], context=context)
aml_brw = aml_obj.browse(cr, uid, pay_id, context=context)
res = None
if comm_brw.commission_baremo_policy == 'onCompany':
partner_id = comm_brw.company_id.partner_id
elif comm_brw.commission_baremo_policy == 'onPartner':
if aml_brw.rec_invoice:
partner_id = partner_id or aml_brw.rec_invoice.partner_id
else:
partner_id = partner_id or aml_brw.rec_aml.partner_id
elif comm_brw.commission_baremo_policy == 'onAccountingPartner':
if aml_brw.rec_invoice:
partner_id = partner_id or aml_brw.rec_invoice.partner_id
else:
partner_id = partner_id or aml_brw.rec_aml.partner_id
partner_id = rp_obj._find_accounting_partner(partner_id)
elif comm_brw.commission_baremo_policy == 'onUser':
partner_id = self._get_commission_salesman_policy(
cr, uid, ids[0], pay_id, salesman_id=salesman_id,
context=context).partner_id
elif comm_brw.commission_baremo_policy == 'onCommission':
res = comm_brw.baremo_id
# Fall back to baremo in Commission
if partner_id:
res = partner_id.baremo_id
else:
res = comm_brw.baremo_id
return res
def _get_commission_payment_on_invoice_line(self, cr, uid, ids, pay_id,
context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
comm_brw = self.browse(cr, uid, ids[0], context=context)
aml_obj = self.pool.get('account.move.line')
prod_prices = self.pool.get('product.historic.price')
sale_noids = self.pool.get('commission.sale.noid')
noprice_ids = self.pool.get('commission.noprice')
comm_line_ids = self.pool.get('commission.lines')
aml_brw = aml_obj.browse(cr, uid, pay_id, context=context)
if not aml_brw.credit:
return True
# Retrieve Partner's Salesman
salesman = self._get_commission_salesman_policy(cr, uid, ids, pay_id,
context=context)
salesman_ok = self._get_commission_saleman(cr, uid, ids, salesman,
context=context)
if not salesman_ok:
if not (comm_brw.unknown_salespeople and not salesman):
return True
commission_policy_date_start = \
self._get_commission_policy_start_date(cr, uid, ids, pay_id,
context=context)
commission_policy_date_end = \
self._get_commission_policy_end_date(cr, uid, ids, pay_id,
context=context)
# Si esta aqui dentro es porque esta linea tiene una id valida
# de una factura.
inv_brw = aml_brw.rec_invoice
commission_policy_baremo = \
self._get_commission_policy_baremo(cr, uid, ids, pay_id,
context=context)
# Revision de cada linea de factura (productos)
for inv_lin in inv_brw.invoice_line:
# Verificar si tiene producto asociado
if inv_lin.product_id:
# DETERMINAR EL PORCENTAJE DE IVA EN LA LINEA (perc_iva)
# =============================================================
# =============================================================
# Determinar si la linea de la factura tiene un impuesto
# (perc_iva). El impuesto aplicado a una linea es igual a la
# suma de los impuestos se asume que todos los impuestos son
# porcentuales
perc_iva = (inv_lin.invoice_line_tax_id and
sum([tax.amount for tax in
inv_lin.invoice_line_tax_id]) * 100 or 0.0)
# Si esta aqui es porque hay un producto asociado
prod_id = inv_lin.product_id.product_tmpl_id.id
# se obtienen las listas de precio, vienen ordenadas
# por defecto, de acuerdo al objeto product.historic de
# mayor a menor fecha
price_ids = prod_prices.search(
cr, uid,
[('product_id', '=', prod_id)])
# Buscar Precio Historico de Venta de este producto @
# la fecha de facturacion
no_price = True
for price_id in price_ids:
prod_prices_brw = \
prod_prices.browse(cr, uid, price_id, context=context)
if inv_brw.date_invoice >= t_time(prod_prices_brw.name):
list_price = prod_prices_brw.price
list_date = prod_prices_brw.name
no_price = False
break
if not no_price:
# Determinar cuanto fue el
# descuento en este producto en
# aquel momento de la venta
if abs((inv_lin.price_subtotal / inv_lin.quantity) -
inv_lin.price_unit) > 0.05:
# con esto se asegura que no se esta pasando
# por alto el descuento en linea
price_unit = round((inv_lin.price_subtotal /
inv_lin.quantity), 2)
else:
price_unit = inv_lin.price_unit
if list_price:
dcto = round((list_price - price_unit) * 100 /
list_price, 1)
rate_item = dcto
commission_params = self._get_commission_rate(
cr, uid, comm_brw.id,
commission_policy_date_end,
commission_policy_date_start, dcto=0.0,
bar_brw=commission_policy_baremo)
bar_day = commission_params['bar_day']
bar_dcto_comm = commission_params['bar_dcto_comm']
bardctdsc = commission_params['bardctdsc']
emission_days = commission_params['emission_days']
#############################################
# CALCULO DE COMISION POR LINEA DE PRODUCTO #
#############################################
penbxlinea = aml_brw.credit * (
inv_lin.price_subtotal /
inv_brw.amount_untaxed)
fact_sup = 1 - 0.0 / 100 - 0.0 / 100
fact_inf = 1 + (perc_iva / 100) * (1 - 0.0 / 100) - \
0.0 / 100 - 0.0 / 100
comm_line = penbxlinea * fact_sup * (
bar_dcto_comm / 100) / fact_inf
if aml_brw.currency_id and aml_brw.amount_currency:
payxlinea_curr = aml_brw.amount_currency * (
inv_lin.price_subtotal /
inv_brw.amount_untaxed)
commission_currency = (abs(payxlinea_curr) * fact_sup *
(bar_dcto_comm / 100) /
fact_inf)
elif aml_brw.currency_id and not aml_brw.amount_currency:
return True
else:
commission_currency = comm_line
# Generar las lineas de comision por cada producto
comm_line_ids.create(
cr, uid, {
'commission_id': comm_brw.id,
'aml_id': aml_brw.id,
'am_rec': inv_brw.move_id.id,
'name':
aml_brw.move_id.name and
aml_brw.move_id.name or '/',
'pay_date': aml_brw.date,
'pay_off': aml_brw.credit,
'partner_id': inv_brw.partner_id.id,
'salesman_id': salesman and salesman.id,
'pay_inv': aml_brw.credit,
'inv_date': inv_brw.date_invoice,
'date_start': commission_policy_date_start,
'date_stop': commission_policy_date_end,
'days': emission_days,
'inv_subtotal': inv_brw.amount_untaxed,
'product_id': inv_lin.product_id.id,
'price_unit': price_unit,
'price_subtotal': inv_lin.price_subtotal,
'price_list': list_price,
'price_date': list_date,
'perc_iva': perc_iva,
'rate_item': rate_item,
'rate_number': bardctdsc,
'timespan': bar_day,
'baremo_comm': bar_dcto_comm,
'commission': comm_line,
'commission_currency': commission_currency,
'currency_id': inv_brw.currency_id and
inv_brw.currency_id.id or
inv_brw.company_id.currency_id.id,
}, context=context)
else:
# Se genera un lista de tuplas con las lineas,
# productos y sus correspondientes fechas en las
# cuales no aparece precio de lista, luego al final
# se escriben los valores en la correspondiente
# bitacora para su inspeccion. ~ #~ print 'No hubo
# precio de lista para la fecha estipulada, hay que
# generar el precio en este producto \n'
noprice_ids.create(cr, uid, {'commission_id': comm_brw.id,
'product_id': prod_id,
'date': inv_brw.date_invoice,
'invoice_num':
inv_brw.number},
context=context)
else:
# cuando una linea no tiene product_id asociado se
# escribe en una tabla para alertar al operador sobre
# esta parte no llego a un acuerdo de si se podria
# permitir al operador cambiar las lineas de la factura
# puesto que es un asunto muy delicado.
sale_noids.create(cr, uid, {'commission_id': comm_brw.id,
'inv_line_id': inv_lin.id, },
context=context)
return True
def _get_commission_payment_on_invoice(self, cr, uid, ids, aml_id,
context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
comm_brw = self.browse(cr, uid, ids[0], context=context)
aml_obj = self.pool.get('account.move.line')
comm_line_ids = self.pool.get('commission.lines')
aml_brw = aml_obj.browse(cr, uid, aml_id, context=context)
if not aml_brw.credit:
return True
# Retrieve Partner's Salesman
salesman = self._get_commission_salesman_policy(cr, uid, ids, aml_id,
context=context)
salesman_ok = self._get_commission_saleman(cr, uid, ids, salesman,
context=context)
if not salesman_ok:
if not (comm_brw.unknown_salespeople and not salesman):
return True
commission_policy_date_start = \
self._get_commission_policy_start_date(cr, uid, ids, aml_id,
context=context)
commission_policy_date_end = \
self._get_commission_policy_end_date(cr, uid, ids, aml_id,
context=context)
# Si esta aqui dentro es porque esta linea tiene una id valida
# de una factura.
inv_brw = aml_brw.rec_invoice
# DETERMINAR EL PORCENTAJE DE IVA EN LA FACTUR (perc_iva)
# =======================================================
# =======================================================
perc_iva = (inv_brw.amount_total / inv_brw.amount_untaxed - 1) * 100
commission_policy_baremo = \
self._get_commission_policy_baremo(cr, uid, ids, aml_id,
context=context)
commission_params = self._get_commission_rate(
cr, uid, comm_brw.id,
commission_policy_date_end,
commission_policy_date_start, dcto=0.0,
bar_brw=commission_policy_baremo)
bar_day = commission_params['bar_day']
bar_dcto_comm = commission_params['bar_dcto_comm']
bardctdsc = commission_params['bardctdsc']
emission_days = commission_params['emission_days']
###################################
# CALCULO DE COMISION POR FACTURA #
###################################
penbxlinea = aml_brw.credit
fact_sup = 1 - 0.0 / 100 - 0.0 / 100
fact_inf = 1 + (perc_iva / 100) * (1 - 0.0 / 100) - \
0.0 / 100 - 0.0 / 100
comm_line = penbxlinea * fact_sup * (
bar_dcto_comm / 100) / fact_inf
if aml_brw.currency_id and aml_brw.amount_currency:
commission_currency = abs(aml_brw.amount_currency) * fact_sup * (
bar_dcto_comm / 100) / fact_inf
elif aml_brw.currency_id and not aml_brw.amount_currency:
return True
else:
commission_currency = comm_line
# Generar las lineas de comision por cada factura
comm_line_ids.create(
cr, uid, {
'commission_id': comm_brw.id,
'aml_id': aml_brw.id,
'am_rec': inv_brw.move_id.id,
'name':
aml_brw.move_id.name and
aml_brw.move_id.name or '/',
'pay_date': aml_brw.date,
'pay_off': aml_brw.credit,
'partner_id': inv_brw.partner_id.id,
'salesman_id': salesman and salesman.id,
'pay_inv': aml_brw.credit,
'inv_date': inv_brw.date_invoice,
'date_start': commission_policy_date_start,
'date_stop': commission_policy_date_end,
'days': emission_days,
'inv_subtotal': inv_brw.amount_untaxed,
'perc_iva': perc_iva,
'rate_number': bardctdsc,
'timespan': bar_day,
'baremo_comm': bar_dcto_comm,
'commission': comm_line,
'commission_currency': commission_currency,
'currency_id': inv_brw.currency_id and inv_brw.currency_id.id
or inv_brw.company_id.currency_id.id,
}, context=context)
return True
def _get_commission_payment_on_aml(self, cr, uid, ids, aml_id,
context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
comm_brw = self.browse(cr, uid, ids[0], context=context)
if not comm_brw.unknown_salespeople:
return True
aml_obj = self.pool.get('account.move.line')
comm_line_ids = self.pool.get('commission.lines')
aml_brw = aml_obj.browse(cr, uid, aml_id, context=context)
if not aml_brw.credit:
return True
commission_policy_date_start = \
self._get_commission_policy_start_date(cr, uid, ids, aml_id,
context=context)
commission_policy_date_end = \
self._get_commission_policy_end_date(cr, uid, ids, aml_id,
context=context)
commission_policy_baremo = \
self._get_commission_policy_baremo(cr, uid, ids, aml_id,
context=context)
commission_params = self._get_commission_rate(
cr, uid, comm_brw.id,
commission_policy_date_end,
commission_policy_date_start, dcto=0.0,
bar_brw=commission_policy_baremo)
bar_day = commission_params['bar_day']
bar_dcto_comm = commission_params['bar_dcto_comm']
bardctdsc = commission_params['bardctdsc']
emission_days = commission_params['emission_days']
# Generar las lineas de comision por cada factura
comm_line_ids.create(
cr, uid, {
'commission_id': comm_brw.id,
'aml_id': aml_brw.id,
'am_rec': aml_brw.rec_aml.move_id.id,
'name': aml_brw.move_id.name and aml_brw.move_id.name or '/',
'pay_date': aml_brw.date,
'pay_off': aml_brw.credit,
'partner_id': aml_brw.partner_id.id,
'salesman_id': None,
'pay_inv': aml_brw.credit,
'inv_date': aml_brw.rec_aml.date,
'date_start': commission_policy_date_start,
'date_stop': commission_policy_date_end,
'days': emission_days,
'inv_subtotal': None,
'perc_iva': None,
'rate_number': bardctdsc,
'timespan': bar_day,
'baremo_comm': bar_dcto_comm,
'commission': 0.0,
'commission_currency': None,
'currency_id': aml_brw.currency_id and aml_brw.currency_id.id
or aml_brw.company_id.currency_id.id,
}, context=context)
return True
def _get_commission_payment(self, cr, uid, ids, aml_id, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
comm_brw = self.browse(cr, uid, ids[0], context=context)
if comm_brw.commission_scope == 'product_invoiced':
self._get_commission_payment_on_invoice_line(cr, uid, ids, aml_id,
context=context)
elif comm_brw.commission_scope == 'whole_invoice':
self._get_commission_payment_on_invoice(cr, uid, ids, aml_id,
context=context)
return True
def _commission_based_on_payments(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
for comm_brw in self.browse(cr, uid, ids, context=context):
payment_ids = set([])
uninvoice_payment_ids = set([])
# Read each Journal Entry Line
for aml_brw in comm_brw.aml_ids:
# Verificar si la comision del pago ya se ha pagado
if aml_brw.paid_comm:
continue
# Verificar si esta linea tiene factura
if not aml_brw.rec_invoice:
# TODO: Here we have to deal with the lines that comes from
# another system
uninvoice_payment_ids.add(aml_brw.id)
continue
payment_ids.add(aml_brw.id)
for pay_id in payment_ids:
# se procede con la preparacion de las comisiones.
self._get_commission_payment(cr, uid, ids, pay_id,
context=context)
for aml_id in uninvoice_payment_ids:
# se procede con la preparacion de las comisiones.
self._get_commission_payment_on_aml(cr, uid, ids, aml_id,
context=context)
return True
def _post_processing(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
salesman_ids = self.pool.get('commission.salesman')
comm_line_obj = self.pool.get('commission.lines')
comm_voucher_ids = self.pool.get('commission.voucher')
comm_invoice_ids = self.pool.get('commission.invoice')
# habiendo recorrido todos los vouchers, mostrado todos los elementos
# que necesitan correccion se procede a agrupar las comisiones por
# vendedor para mayor facilidad de uso
cl_fields = ['id', 'salesman_id', 'currency_id', 'commission',
'commission_currency', 'am_id', 'invoice_id',
'comm_salespeople_id', 'comm_voucher_id', ]
for commission in self.browse(cr, uid, ids, context=context):
# Erasing what was previously set as Commission per Salesman
commission.salesman_ids.unlink()
commission.comm_invoice_ids.unlink()
commission.comm_voucher_ids.unlink()
# recoge todos los vendedores y suma el total de sus comisiones
sale_comm = {}
# ordena en un arbol todas las lineas de comisiones de producto
cl_ids = commission.comm_line_ids.read(cl_fields, load=None)
if not cl_ids:
continue
cl_data = DataFrame(cl_ids).set_index('id')
cl_data_grouped = cl_data.groupby(['salesman_id', 'currency_id'])
cl_data_agg = cl_data_grouped.sum()
sale_comm_data = cl_data_agg.to_dict()
sale_comm_cl = cl_data_grouped.groups
sale_comm = sale_comm_data.get('commission')
sale_comm_curr = sale_comm_data.get('commission_currency')
for key, value in sale_comm.iteritems():
salesman_id, currency_id = key
vendor_id = salesman_ids.create(cr, uid, {
'commission_id': commission.id,
'salesman_id': salesman_id,
'currency_id': currency_id,
'comm_total': value,
'comm_total_currency': sale_comm_curr[key],
}, context=context)
comm_line_obj.write(cr, uid, sale_comm_cl[key],
{'comm_salespeople_id': vendor_id},
context=context)
commission.write({
'total_comm': cl_data.sum().get('commission'),
'comm_fix': not all(
cl_data.groupby('salesman_id').groups.keys()),
})
cl_ids = commission.comm_line_ids.read(cl_fields, load=None)
cl_data = DataFrame(cl_ids).set_index('id')
vc_group = cl_data.groupby(['comm_salespeople_id', 'am_id']).groups
for key, values in vc_group.iteritems():
comm_salespeople_id, am_id = key
comm_voucher_id = comm_voucher_ids.create(cr, uid, {
'commission_id': commission.id,
'comm_sale_id': comm_salespeople_id,
'am_id': am_id,
}, context=context)
comm_line_obj.write(cr, uid, values,
{'comm_voucher_id': comm_voucher_id},
context=context)
cl_ids = commission.comm_line_ids.read(cl_fields, load=None)
cl_data = DataFrame(cl_ids).set_index('id')
vc_group = cl_data.groupby(['comm_voucher_id',
'invoice_id']).groups
for key, values in vc_group.iteritems():
comm_voucher_id, invoice_id = key
comm_invoice_id = comm_invoice_ids.create(cr, uid, {
'commission_id': commission.id,
'comm_voucher_id': comm_voucher_id,
'invoice_id': invoice_id,
}, context=context)
comm_line_obj.write(cr, uid, values,
{'comm_invoice_id': comm_invoice_id},
context=context)
return True
def prepare(self, cr, uid, ids, context=None):
"""
Este metodo recorre los elementos de lineas de asiento y verifica al
menos tres (3) caracteristicas primordiales para continuar con los
mismos: estas caracteristicas son:
- journal_id.type in ('cash', 'bank'): quiere decir que la linea es de
un deposito bancario (aqui aun no se ha considerado el trato que se le
da a los cheques devueltos).
- state == 'valid' : quiere decir que la linea ya se ha contabilizado y
que esta cuadrado el asiento, condicion necesaria pero no suficiente.
- paid_comm: que la linea aun no se ha considerado para una comision.
Si estas tres (3) condiciones se cumplen entonces se puede proceder a
realizar la revision de las lineas de pago.
@param cr: cursor to database
@param uid: id of current user
@param ids: list of record ids to be process
@param context: context arguments, like lang, time zone
@return: return a result
"""
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
comm_brw = self.browse(cr, uid, ids[0], context=context)
# Desvincular lineas existentes, si las hubiere
comm_brw.clear()
if comm_brw.commission_type == 'partial_payment':
self._prepare_based_on_payments(cr, uid, ids, context=context)
elif comm_brw.commission_type == 'fully_paid_invoice':
self._prepare_based_on_invoices(cr, uid, ids, context=context)
self._commission_based_on_payments(cr, uid, ids, context=context)
self._post_processing(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'open'}, context=context)
return True
def _recompute_commission(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
for comm_brw in self.browse(cr, uid, ids, context=context):
for cs_brw in comm_brw.salesman_ids:
if cs_brw.salesman_id:
continue
for cl_brw in cs_brw.comm_lines_ids:
cl_brw._recompute_commission()
return True
def action_recompute(self, cr, uid, ids, context=None):
self._recompute_commission(cr, uid, ids, context=context)
self._post_processing(cr, uid, ids, context=context)
return True
def action_draft(self, cr, user, ids, context=None):
context = context or {}
ids = isinstance(ids, (int, long)) and [ids] or ids
aml_obj = self.pool.get('account.move.line')
for comm_brw in self.browse(cr, user, ids, context=context):
if comm_brw.state == 'done':
aml_obj.write(
cr, user,
[line.aml_id.id for line in comm_brw.comm_line_ids],
{'paid_comm': False}, context=context)
self.clear(cr, user, ids, context=context)
self.write(cr, user, ids, {'state': 'draft', 'total_comm': None},
context=context)
return True
def clear(self, cr, user, ids, context=None):
'''
Deletes all associated record from Commission Payment
'''
context = context or {}
ids = isinstance(ids, (int, long)) and [ids] or ids
for comm_brw in self.browse(cr, user, ids, context=context):
comm_brw.sale_noids.unlink()
comm_brw.noprice_ids.unlink()
comm_brw.comm_line_ids.unlink()
comm_brw.salesman_ids.unlink()
comm_brw.comm_voucher_ids.unlink()
comm_brw.comm_invoice_ids.unlink()
comm_brw.write(
{'aml_ids': [(3, aml_brw.id) for aml_brw in comm_brw.aml_ids],
'invoice_ids': [
(3, inv_brw.id) for inv_brw in comm_brw.invoice_ids]})
def validate(self, cr, user, ids, context=None):
aml_obj = self.pool.get('account.move.line')
# escribir en el aml el estado buleano de paid_comm a True para indicar
# que ya esta comision se esta pagando
# TODO: prior to write anything here paid_comm field has to be check
# first if any of the line has being paid arise a warning
for comm_brw in self.browse(cr, user, ids, context=context):
if comm_brw.comm_fix:
raise osv.except_osv(_('Error!'), _('There are items to fix'))
aml_obj.write(cr, user,
[line.aml_id.id for line in comm_brw.comm_line_ids],
{'paid_comm': True}, context=context)
# TODO: write the real list of payments and invoices that were taken
# into account
self.write(cr, user, ids, {'state': 'done', }, context=context)
return True
class commission_sale_noid(osv.Model):
"""
Commission Payment : commission_sale_noid
"""
_name = 'commission.sale.noid'
_columns = {
'name': fields.char('Comentario', size=256),
'commission_id': fields.many2one('commission.payment', 'Comision'),
'inv_line_id': fields.many2one(
'account.invoice.line', 'Descripcion de Articulo'),
}
_defaults = {
'name': lambda *a: None,
}
class commission_noprice(osv.Model):
"""
Commission Payment : commission_sale_noid
"""
_name = 'commission.noprice'
_order = 'product_id'
_columns = {
'name': fields.char('Comentario', size=256),
'commission_id': fields.many2one('commission.payment', 'Comision'),
'product_id': fields.many2one('product.product', 'Producto'),
'date': fields.date('Date'),
'invoice_num': fields.char('Invoice Number', size=256),
}
_defaults = {
'name': lambda *a: None,
}
class commission_lines(osv.Model):
"""
Commission Payment : commission_lines
"""
_name = 'commission.lines'
_order = 'pay_date'
_columns = {
'commission_id': fields.many2one(
'commission.payment', 'Commission Document', required=True),
'name': fields.char('Transaccion', size=256, required=True),
'pay_date': fields.date('Payment Date', required=True),
'pay_off': fields.float(
'Pago',
digits_compute=dp.get_precision('Commission')),
'aml_id': fields.many2one('account.move.line', 'Entry Line'),
'am_rec': fields.many2one('account.move', 'Reconciling Entry'),
'am_id': fields.related(
'aml_id', 'move_id',
string='Journal Entry', relation='account.move',
type='many2one', store=True, readonly=True),
'invoice_id': fields.related(
'aml_id', 'rec_invoice',
string='Reconciling Invoice', relation='account.invoice',
type='many2one', store=True, readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'salesman_id': fields.many2one('res.users', 'Salesman',
required=False),
'comm_salespeople_id': fields.many2one(
'commission.salesman', 'Salespeople Commission', required=False),
'comm_voucher_id': fields.many2one(
'commission.voucher', 'Voucher Commission', required=False),
'pay_inv': fields.float(
'Pay. to Doc.',
digits_compute=dp.get_precision('Commission')),
'inv_date': fields.date('Invoice Date'),
'date_start': fields.date(
'Start Date', required=False, readonly=True,
),
'date_stop': fields.date(
'End Date', required=False, readonly=True,
),
'days': fields.float(
'Comm. Days',
digits_compute=dp.get_precision('Commission')),
'inv_subtotal': fields.float(
'SubTot. Doc.',
digits_compute=dp.get_precision('Commission')),
'product_id': fields.many2one('product.product', 'Product'),
'price_unit': fields.float(
'Prec. Unit.',
digits_compute=dp.get_precision('Commission')),
'price_subtotal': fields.float(
'SubTot. Product',
digits_compute=dp.get_precision('Commission')),
'price_list': fields.float(
'Price List',
digits_compute=dp.get_precision('Commission')),
'price_date': fields.date('List Date'),
'perc_iva': fields.float(
'Tax (%)',
digits_compute=dp.get_precision('Commission')),
'rate_item': fields.float(
'Dsct. (%)',
digits_compute=dp.get_precision('Commission')),
'rate_number': fields.float(
'B./Rate (%)',
digits_compute=dp.get_precision('Commission')),
'timespan': fields.float(
'B./Days',
digits_compute=dp.get_precision('Commission')),
'baremo_comm': fields.float(
'B./%Comm.',
digits_compute=dp.get_precision('Commission')),
'commission': fields.float(
'Commission Amount',
digits_compute=dp.get_precision('Commission')),
'commission_currency': fields.float(
'Currency Amount',
digits_compute=dp.get_precision('Commission')),
'currency_id': fields.many2one('res.currency', 'Currency'),
}
_defaults = {
'name': lambda *a: None,
}
def _recompute_commission(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
context = context or {}
cl_brw = self.browse(cr, uid, ids[0], context=context)
comm_brw = cl_brw.commission_id
aml_brw = cl_brw.aml_id
aml_id = cl_brw.aml_id.id
if not aml_brw.credit:
return True
commission_policy_date_start = \
comm_brw._get_commission_policy_start_date(aml_id)
commission_policy_date_end = \
comm_brw._get_commission_policy_end_date(aml_id)
commission_policy_baremo = \
comm_brw._get_commission_policy_baremo(
aml_id, partner_id=cl_brw.partner_id,
salesman_id=cl_brw.salesman_id)
commission_params = comm_brw._get_commission_rate(
commission_policy_date_end,
commission_policy_date_start, dcto=0.0,
bar_brw=commission_policy_baremo)
bar_day = commission_params['bar_day']
bar_dcto_comm = commission_params['bar_dcto_comm']
bardctdsc = commission_params['bardctdsc']
emission_days = commission_params['emission_days']
###############################
# CALCULO DE COMISION POR AML #
###############################
# Right now I have not figure out a way to know how much was taxed
perc_iva = comm_brw.company_id.comm_tax
penbxlinea = aml_brw.credit
fact_sup = 1 - 0.0 / 100 - 0.0 / 100
fact_inf = 1 + (perc_iva / 100) * (1 - 0.0 / 100) - \
0.0 / 100 - 0.0 / 100
comm_line = penbxlinea * fact_sup * (
bar_dcto_comm / 100) / fact_inf
if aml_brw.currency_id and aml_brw.amount_currency:
commission_currency = abs(aml_brw.amount_currency) * fact_sup * (
bar_dcto_comm / 100) / fact_inf
elif aml_brw.currency_id and not aml_brw.amount_currency:
return True
else:
commission_currency = comm_line
# Generar las lineas de comision por cada factura
cl_brw.write({
'pay_date': aml_brw.date,
'pay_off': aml_brw.credit,
'pay_inv': aml_brw.credit,
'inv_date': aml_brw.rec_aml.date,
'date_start': commission_policy_date_start,
'date_stop': commission_policy_date_end,
'days': emission_days,
'inv_subtotal': (aml_brw.rec_aml.debit / (1 + perc_iva / 100)),
'perc_iva': perc_iva,
'rate_number': bardctdsc,
'timespan': bar_day,
'baremo_comm': bar_dcto_comm,
'commission': comm_line,
'commission_currency': commission_currency,
'currency_id': aml_brw.currency_id and aml_brw.currency_id.id
or aml_brw.company_id.currency_id.id,
})
return True
class commission_salesman(osv.Model):
"""
Commission Payment : commission_salesman
"""
_name = 'commission.salesman'
_rec_name = 'salesman_id'
_columns = {
'commission_id': fields.many2one(
'commission.payment', 'Commission Document', readonly=True),
'salesman_id': fields.many2one(
'res.users', 'Salesman', required=False, readonly=True),
'comm_total': fields.float(
'Commission Amount',
digits_compute=dp.get_precision('Commission'), readonly=True),
'comm_voucher_ids': fields.one2many(
'commission.voucher',
'comm_sale_id', 'Vouchers Affected in this commission',
required=False),
'comm_lines_ids': fields.one2many(
'commission.lines',
'comm_salespeople_id', 'Salespeople Commission Details',
required=False),
'currency_id':
fields.many2one('res.currency', 'Currency', readonly=True),
'comm_total_currency': fields.float(
'Currency Amount',
digits_compute=dp.get_precision('Commission'), readonly=True),
'company_id': fields.related(
'commission_id', 'company_id',
string='Company',
relation='res.company',
type='many2one',
store=True,
readonly=True,
help=('Currency at which this report will be \
expressed. If not selected will be used the \
one set in the company')),
}
class commission_voucher(osv.Model):
"""
Commission Payment : commission_voucher
"""
_name = 'commission.voucher'
_order = 'date'
_rec_name = 'am_id'
_columns = {
'commission_id': fields.many2one('commission.payment', 'Commission'),
'comm_sale_id': fields.many2one('commission.salesman', 'Salesman'),
'am_id': fields.many2one('account.move', 'Journal Entry'),
'comm_invoice_ids': fields.one2many(
'commission.invoice',
'comm_voucher_id', 'Facturas afectadas en esta comision',
required=False),
'date': fields.related('am_id', 'date', string='Date', type='date',
store=True, readonly=True),
}
class commission_invoice(osv.Model):
"""
Commission Payment : commission_invoice
"""
_name = 'commission.invoice'
_order = 'invoice_id'
_columns = {
'name': fields.char('Comentario', size=256),
'commission_id': fields.many2one('commission.payment', 'Comision'),
'comm_voucher_id': fields.many2one('commission.voucher', 'Voucher'),
'invoice_id': fields.many2one('account.invoice', 'Factura'),
'comm_line_ids': fields.one2many(
'commission.lines',
'comm_invoice_id', 'Comision por productos', required=False),
'pay_inv': fields.float(
'Abono Fact.',
digits_compute=dp.get_precision('Commission')),
}
_defaults = {
'name': lambda *a: None,
}
class commission_lines_2(osv.Model):
"""
Commission Payment : commission_lines_2
"""
_inherit = 'commission.lines'
_columns = {
'comm_invoice_id': fields.many2one('commission.invoice',
'Invoice Commission'),
}
class res_company(osv.Model):
_inherit = "res.company"
_description = 'Companies'
_columns = {
'comm_tax': fields.float('Default Tax for Commissions'),
}
| agpl-3.0 |
RachitKansal/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
rachelalbert/image-analogies-python | viz_debug.py | 1 | 3059 | import pickle
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
from img_preprocess import compute_gaussian_pyramid
def show_pair(src_img, out_img, sa, sc, rs, s):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.imshow(out_img, interpolation='nearest')
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.imshow(src_img, interpolation='nearest')
plt.show(block=False)
while True:
c_out = fig1.ginput(n=1, timeout=0)[0]
px = tuple(np.rint(c_out[::-1]).astype(int))
ax1.clear()
ax2.clear()
ax1.imshow(out_img, interpolation='nearest')
ax2.imshow(src_img, interpolation='nearest')
imh, imw = out_img.shape[:2]
ix = px[0] * imw + px[1]
rstar = rs[ix]
s_rstar = s[rstar[0] * imw + rstar[1]]
print('source row/col', s[ix])
print('output row/col', px)
if sa[ix] == sc[ix]:
ec = 'black'
elif s[ix] == sa[ix]:
ec = 'red'
elif s[ix] == sc[ix]:
ec = 'yellow'
else:
print('You made a mistake, try again!')
print('s[ix], sa[ix], sc[ix]:', s[ix], sa[ix], sc[ix])
raise
#print('px, s[ix], sa[ix], sc[ix]:', px, s[ix], sa[ix], sc[ix])
ax1.add_patch(Rectangle(np.array( px)[::-1] - 0.5, 1, 1, fill=None, alpha=1, linewidth=2, edgecolor=ec))
ax1.add_patch(Rectangle(np.array( rstar)[::-1] - 0.5, 1, 1, fill=None, alpha=1, linewidth=2, edgecolor='blue'))
ax2.add_patch(Rectangle(np.array( sa[ix])[::-1] - 0.5, 1, 1, fill=None, alpha=1, linewidth=2, edgecolor='red'))
ax2.add_patch(Rectangle(np.array( sc[ix])[::-1] - 0.5, 1, 1, fill=None, alpha=1, linewidth=2, edgecolor='yellow'))
ax2.add_patch(Rectangle(np.array(s_rstar)[::-1] - 0.5, 1, 1, fill=None, alpha=1, linewidth=2, edgecolor='blue'))
fig1.canvas.draw()
fig2.canvas.draw()
def load_imgs(src_path, out_path):
src_img = plt.imread(src_path)
src_pyr = compute_gaussian_pyramid(src_img, min_size = 3)
out_pyr = [[]]
sas = [[]]
scs = [[]]
rss = [[]]
ss = [[]]
ims = [[]]
for level in range(1, len(src_pyr)):
with open(out_path + '%d_srcs.pickle' % level) as f:
sa, sc, rstars, s, im = pickle.load(f)
assert(len(sa) == len(sc) == len(rstars) == len(s) == len(im))
sas.append(sa)
scs.append(sc)
rss.append(rstars)
ss.append(s)
ims.append(im)
out_img = plt.imread(out_path + 'im_out_color_%d.jpg' % level)
out_pyr.append(out_img)
return src_pyr, out_pyr, sas, scs, rss, ss, ims
src_path = './images/lf_originals/half_size/fruit-filt.jpg'
out_path = './images/lf_originals/output/boat/working_test_2/'
level = 6
src_pyr, out_pyr, sas, scs, rss, ss, ims = load_imgs(src_path, out_path)
print('Images Loaded! Level = %d' % level)
show_pair(src_pyr[level], out_pyr[level], sas[level], scs[level], rss[level], ss[level], ims[level])
| mit |
yfiua/TraNet | role/role-none.py | 1 | 4041 | from __future__ import division
import sys
import igraph
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import *
from sklearn.metrics import *
from sklearn.preprocessing import MinMaxScaler
from transform_funcs import *
from utils import *
from scipy.sparse import coo_matrix, linalg
def graph_to_sparse_matrix(G):
n = G.vcount()
xs, ys = map(np.array, zip(*G.get_edgelist()))
if not G.is_directed():
xs, ys = np.hstack((xs, ys)).T, np.hstack((ys, xs)).T
else:
xs, ys = xs.T, ys.T
return coo_matrix((np.ones(xs.shape), (xs, ys)), shape=(n, n), dtype=np.int16)
def get_feature(G, f):
return _transform_func_degree(getattr(G, f)()) if callable(getattr(G, f)) else _transform_func(getattr(G, f))
# aggregate by the mean value of feature of neighbours
def mean_neighbour(A, d, feature):
return A.dot(feature) / d
def get_feature_matrix(G, features, rounds=5):
# local clustering coefficient
G_sim = G.as_directed().simplify(multiple=False) # remove loops
lcc = np.array(G_sim.transitivity_local_undirected(mode='zero'))
lcc[lcc < 0] = 0 # implementation of igraph is really shitty
G.clustering_coefficient = lcc
# compute PageRank
G_sim = G.copy()
G_sim = G_sim.simplify(multiple=False) # remove loops
alpha = 0.15
pagerank = np.array(G_sim.pagerank(damping=1-alpha))
G.pr = pagerank
feature_matrix = [ get_feature(G, f) for f in features ]
X = np.array(feature_matrix).T
# adjacency matrix (simplified)
A = graph_to_sparse_matrix(G.as_undirected().simplify())
d = np.squeeze(np.array(A.sum(axis=1))).astype(np.int)
d[d == 0] = 1
for i in range(rounds):
feature_matrix = [ mean_neighbour(A, d, f) for f in feature_matrix ]
X = np.concatenate((X, np.array(feature_matrix).T), axis=1)
#X = np.hstack((X, np.array([pagerank]).T))
return X
def read_data(lang, features):
# dataset (network)
df = pd.read_csv('data/' + lang + '-wiki-talk', sep='\t', header=None)
nodes = np.unique(df[[0, 1]].values);
max_node_num = max(nodes) + 1
num_nodes = len(nodes)
G = igraph.Graph(directed=True)
G.add_vertices(max_node_num)
G.add_edges(df[[0, 1]].values)
G = G.subgraph(nodes)
# features
X = get_feature_matrix(G, features)
# dataset (roles)
df_role = pd.read_csv('data/' + lang + '-user-group', sep='\t', header=None)
roles = df_role[[0,1]].values
y = [0] * max_node_num
for r in roles:
y[r[0]] = r[1]
y = np.array([y[i] for i in nodes])
return np.squeeze(X), y
# main
def main():
# params
n_trees = 200
features = [ 'clustering_coefficient' , 'degree' , 'indegree' , 'outdegree', 'pr' ]
langs = [ 'ar', 'bn', 'br', 'ca', 'cy', 'de', 'el' , 'en', 'eo', 'es', 'eu', 'fr', 'gl', 'ht', 'it', 'ja', 'lv', 'nds', 'nl', 'oc', 'pl', 'pt', 'ru', 'sk', 'sr', 'sv', 'vi', 'zh' ]
#langs = [ 'br', 'cy', 'ar', 'lv', 'zh' ]
# read datasets
X = {}
y = {}
for lang in langs:
X[lang], y[lang] = read_data(lang, features)
# admin classifier
for lang_source in langs:
y_source = (y[lang_source] == 2)
## classifier
clf = RandomForestClassifier(n_estimators=n_trees, random_state=42)
clf.fit(X[lang_source], y_source)
## evaluation
for lang_target in langs:
y_target = (y[lang_target] == 2)
if (len(np.unique(y_source)) == 1) or (len(np.unique(y_target)) == 1): # ROC not defined
auc = np.nan
else:
y_predict = clf.predict_proba(X[lang_target])[:,1]
auc = roc_auc_score(y_target, y_predict)
print lang_source + ',' + lang_target + ',' + str(auc)
if __name__ == '__main__':
# init
_transform_func_degree = no_transform
_transform_func = no_transform
main()
| gpl-3.0 |
manashmndl/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
iagapov/ocelot | optimizer/GP/bayes_optimization.py | 1 | 16041 | """
Contains the Bayes optimization class.
Initialization parameters:
model: an object with methods 'predict', 'fit', and 'update'
interface: an object which supplies the state of the system and
allows for changing the system's x-value.
Should have methods '(x,y) = intfc.getState()' and 'intfc.setX(x_new)'.
Note that this interface system is rough, and used for testing and
as a placeholder for the machine interface.
acq_func: specifies how the optimizer should choose its next point.
'EI': uses expected improvement. The interface should supply y-values.
'testEI': uses EI over a finite set of points. This set must be
provided as alt_param, and the interface need not supply
meaningful y-values.
xi: exploration parameter suggested in some Bayesian opt. literature
alt_param: currently only used when acq_func=='testEI'
m: the maximum size of model; can be ignored unless passing an untrained
SPGP or other model which doesn't already know its own size
bounds: a tuple of (min,max) tuples specifying search bounds for each
input dimension. Generally leads to better performance.
Has a different interpretation when iter_bounds is True.
iter_bounds: if True, bounds the distance that can be moved in a single
iteration in terms of the length scale in each dimension. Uses the
bounds variable as a multiple of the length scales, so bounds==2
with iter_bounds==True limits movement per iteration to two length
scales in each dimension. Generally a good idea for safety, etc.
prior_data: input data to train the model on initially. For convenience,
since the model can be trained externally as well.
Assumed to be a pandas DataFrame of shape (n, dim+1) where the last
column contains y-values.
Methods:
acquire(): Returns the point that maximizes the acquisition function.
For 'testEI', returns the index of the point instead.
For normal acquisition, currently uses the bounded L-BFGS optimizer.
Haven't tested alternatives much.
best_seen(): Uses the model to make predictions at every observed point,
returning the best-performing (x,y) pair. This is more robust to noise
than returning the best observation, but could be replaced by other,
faster methods.
OptIter(): The main method for Bayesian optimization. Maximizes the
acquisition function, then uses the interface to test this point and
update the model.
"""
import operator as op
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
import time
from copy import deepcopy
class BayesOpt:
def __init__(self, model, target_func, acq_func='EI', xi=0.0, alt_param=-1, m=200, bounds=None, iter_bound=False, prior_data=None):
self.model = model
self.m = m
#self.delay = 0
self.bounds = bounds
self.iter_bound = iter_bound
#self.interface = interface
self.target_func = target_func
#self.devices = devices
self.acq_func = (acq_func, xi, alt_param)
self.max_iter = 100
self.check = None
self.alpha = 1.
self.kill = False
#if(acq_func=='testEI'):
# (x_init, y_init) = np.array(alt_param.iloc[0, :-1],ndmin=2), alt_param.iloc[0, -1]
#else:
# x_init = np.array([dev.get_value for dev in self.devices], ndmin=2)
# y_init = np.array([[target_func.get_penalty()]])
# #(x_init, y_init) = interface.getState()
#
#self.X_obs = np.array(x_init)
#self.Y_obs = [y_init]
#self.current_x = x_init
# initialize model on prior data
if(prior_data is not None):
p_X = prior_data.iloc[:, :-1]
p_Y = prior_data.iloc[:, -1]
num = len(prior_data.index)
self.model.fit(p_X, p_Y, min(m, num))
def terminate(self, devices):
"""
Sets the position back to the location that seems best in hindsight.
It's a good idea to run this at the end of the optimization, since
Bayesian optimization tries to explore and might not always end in
a good place.
"""
print("TERMINATE", self.x_best)
if(self.acq_func[0] == 'EI'):
# set position back to something reasonable
for i, dev in enumerate(devices):
dev.set_value(self.x_best[i])
#error_func(self.x_best)
if(self.acq_func[0] == 'UCB'):
# UCB doesn't keep track of x_best, so find it
(x_best, y_best) = self.best_seen()
for i, dev in enumerate(devices):
dev.set_value(x_best[i])
def minimize(self, error_func, x):
# weighting for exploration vs exploitation in the GP at the end of scan, alpha array goes from 1 to zero
#alpha = [1.0 for i in range(40)]+[np.sqrt(50-i)/3.0 for i in range(41,51)]
inverse_sign = -1
self.current_x = np.array(np.array(x).flatten(), ndmin=2)
self.X_obs = np.array(self.current_x)
self.Y_obs = [np.array([[inverse_sign*error_func(x)]])]
# iterate though the GP method
#print("GP minimize", error_func, x, error_func(x))
for i in range(self.max_iter):
# get next point to try using acquisition function
x_next = self.acquire(self.alpha)
#print("XNEXT ", x_next)
#check for problems with the beam
if self.check != None: self.check.errorCheck()
y_new = error_func(x_next.flatten())
if self.kill:
#disable so user does not start another scan while the data is being saved
break
y_new = np.array([[inverse_sign *y_new]])
#advance the optimizer to the next iteration
#self.opt.OptIter(alpha=alpha[i])
#self.OptIter() # no alpha
# change position of interface and get resulting y-value
x_new = deepcopy(x_next)
#(x_new, y_new) = self.interface.getState()
self.current_x = x_new
# add new entry to observed data
self.X_obs = np.concatenate((self.X_obs, x_new), axis=0)
self.Y_obs.append(y_new)
# update the model (may want to add noise if using testEI)
self.model.update(x_new, y_new)# + .5*np.random.randn())
def best_seen(self):
"""
Checks the observed points to see which is predicted to be best.
Probably safer than just returning the maximum observed, since the
model has noise. It takes longer this way, though; you could
instead take the model's prediction at the x-value that has
done best if this needs to be faster.
"""
(mu, var) = self.model.predict(self.X_obs)
(ind_best, mu_best) = max(enumerate(mu), key=op.itemgetter(1))
return (self.X_obs[ind_best], mu_best)
def acquire(self, alpha=None):
"""
Computes the next point for the optimizer to try by maximizing
the acquisition function. If movement per iteration is bounded,
starts search at current position.
"""
if(self.acq_func[0] == 'EI'):
(x_best, y_best) = self.best_seen()
self.x_best = x_best
x_start = x_best
if(self.iter_bound):
x_start = self.current_x
if(self.bounds is None):
self.bounds = 1.0
lengths = 1/np.sqrt(np.exp(self.model.covar_params[0]))
iter_bounds = [(x_start[:,i] - self.bounds*lengths[:,i],x_start[:,i] + self.bounds*lengths[:,i]) for i in x_start.shape[1]]
else:
iter_bounds = self.bounds
# maximize the EI (by minimizing negative EI)
try:
res = minimize(negExpImprove, x_start, args=(self.model, y_best, self.acq_func[1], alpha),
bounds=iter_bounds, method='L-BFGS-B', options={'maxfun':100})
except:
raise
# return resulting x value as a (1 x dim) vector
return np.array(res.x,ndmin=2)
if(self.acq_func[0] == 'UCB'):
mult = 0
#curr_x = self.interface.getState()[0]
curr_x = self.current_x
res = minimize(negUCB, curr_x, args=(self.model, mult), bounds=self.bounds, method='L-BFGS-B')
return np.array(res.x,ndmin=2)
elif(self.acq_func[0] == 'testEI'):
# collect all possible x values
options = np.array(self.acq_func[2].iloc[:, :-1])
(x_best, y_best) = self.best_seen()
# find the option with best EI
best_option_score = (-1,1e12)
for i in range(options.shape[0]):
result = negExpImprove(options[i],self.model,y_best,self.acq_func[1])
if(result < best_option_score[1]):
best_option_score = (i, result)
# return the index of the best option
return best_option_score[0]
else:
print('Unknown acquisition function.')
return 0
class HyperParams:
def __init__(self, pvs, filename):
self.pvs = pvs
self.filename = filename
pass
def extract_hypdata(self, energy):
key = str(energy)
#energy = str(round(float(self.mi.get_energy())))
#if len(energy) is 3: key = energy[0:1]
#if len(energy) is 4: key = energy[0:2]
#print ("Loading raw data for", key, "GeV from", self.filename)
#print()
f = np.load(str(self.filename), fix_imports=True, encoding='latin1')
filedata = f[0][key]
return filedata
def loadHyperParams(self, energy, detector_stat_params):
"""
Method to load in the hyperparameters from a .npy file.
Sorts data, ordering parameters with this objects pv list.
Formats data into tuple format that the GP model object can accept.
( [device_1, ..., device_N ], coefficent, noise)
Args:
filename (str): String for the file directory.
energy:
Returns:
List of hyperparameters, ordered using the UI's "self.pvs" list.
"""
#Load in a npy file containing hyperparameters binned for every 1 GeV of beam energy
extention = self.filename[-4:]
if extention == ".npy":
filedata = self.extract_hypdata(energy)
#sort list to match the UIs PV list order
#if they are loaded in the wrong order, the optimzer will get the wrong params for a device
keys = []
hyps = []
match_count = 0
for pv in self.pvs:
names = filedata.keys()
if pv in names:
keys.append(pv)
ave = float(filedata[pv][0])
std = float(filedata[pv][1])
hyp = self.calcLengthScaleHP(ave, std)
hyps.append(hyp)
print ("calculate hyper params", pv, ave, std, hyp)
match_count+=1
if match_count != len(self.pvs):
# TODO: what is it?
# self.parent.scanFinished()
raise Exception("Number of PVs in list does not match PVs found in hyperparameter file")
ave, std = detector_stat_params
print ("DETECTOR AVE", ave)
print ("DETECTOR STD", std)
coeff = self.calcAmpCoeffHP(ave, std)
noise = self.calcNoiseHP(ave, std)
dout = ( np.array([hyps]), coeff, noise )
#prints for debug
print()
print ("Calculated Hyperparameters ( [device_1, ..., device_N ], amplitude coefficent, noise coefficent)")
print()
for i in range(len(hyps)):
print(self.pvs[i], hyps[i])
print ("AMP COEFF = ", coeff)
print ("NOISE COEFF = ", noise)
print()
return dout
def calcLengthScaleHP(self, ave, std, c = 1.0, pv = None):
"""
Method to calculate the GP length scale hyperparameters using history data
Formula for hyperparameters are from Mitch and some papers he read on the GP.
Args:
ave (float): Mean of the device, binned around current machine energy
std (float): Standard deviation of the device
c (float): Scaling factor to change the output to be larger or smaller, determined empirically
pv (str): PV input string to scale hyps depending on pv, not currently used
Returns:
Float of the calculated length scale hyperparameter
"""
#for future use
if pv is not None:
#[pv,val]
pass
#+- 1 std around the mean
hi = ave+std
lo = ave-std
hyp = -2*np.log( ( ( c*(hi-lo) ) / 4.0 ) + 0.01 )
return hyp
def calcAmpCoeffHP(self, ave, std, c = 0.5):
"""
Method to calculate the GP amplitude hyperparameter
Formula for hyperparameters are from Mitch and some papers he read on the GP.
First we tried using the standard deviation to calc this but we found it needed to scale with mean instead
Args:
ave (float): Mean of of the objective function (GDET or something else)
std (float): Standard deviation of the objective function
c (float): Scaling factor to change the output to be larger or smaller, determined empirically
Returns:
Float of the calculated amplitude hyperparameter
"""
#We would c = 0.5 to work well, could get changed at some point
hyp2 = np.log( ( ((c*ave)**2) + 0.1 ) )
return hyp2
def calcNoiseHP(self, ave, std, c = 1.0):
"""
Method to calculate the GP noise hyperparameter
Formula for hyperparameters are from Mitch and some papers he read on the GP.
Args:
ave (float): Mean of of the objective function (GDET or something else)
std (float): Standard deviation of the objective function
c (float): Scaling factor to change the output to be larger or smaller, determined empirically
Returns:
Float of the calculated noise hyperparameter
"""
hyp = np.log((c*std / 4.0) + 0.01)
return hyp
def negExpImprove(x_new, model, y_best, xi, alpha=1.0):
"""
The common acquisition function, expected improvement. Returns the
negative for the minimizer (so that EI is maximized). Alpha attempts
to control the ratio of exploration to exploitation, but seems to not
work well in practice. The terminate() method is a better choice.
"""
(y_new, var) = model.predict(np.array(x_new, ndmin=2))
diff = y_new - y_best - xi
if(var == 0):
return 0
else:
Z = diff / np.sqrt(var)
EI = diff * norm.cdf(Z) + np.sqrt(var) * norm.pdf(Z)
#print(x_new, EI)
return alpha * (-EI) + (1 - alpha) * (-y_new)
def negUCB(x_new, model, mult):
"""
The upper confidence bound acquisition function. Currently only partially
implemented. The mult parameter specifies how wide the confidence bound
should be, and there currently is no way to compute this parameter. This
acquisition function shouldn't be used until there is a proper mult.
"""
(y_new, var) = model.predict(np.array(x_new,ndmin=2))
UCB = y_new + mult * np.sqrt(var)
return -UCB
def negProbImprove(x_new, model, y_best, xi):
"""
The probability of improvement acquisition function. Untested.
Performs worse than EI according to the literature.
"""
(y_new, var) = model.predict(np.array(x_new,ndmin=2))
diff = y_new - y_best - xi
if(var == 0):
return 0
else:
Z = diff / np.sqrt(var)
return -norm.cdf(Z)
| gpl-3.0 |
befelix/scipy | scipy/stats/stats.py | 3 | 190576 | # Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
zmap
zscore
iqr
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma, zeros
from scipy._lib.six import callable, string_types
from scipy._lib._version import NumpyVersion
import scipy.special as special
import scipy.linalg as linalg
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import _find_repeats, linregress, theilslopes
from ._stats import _kendall_dis, _toint64, _weightedrankedtau
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'iqr', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau',
'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata',
'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0):
# Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n / (n - 1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Compute the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5])
0.47140452079103173
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Compute the skewness of a data set.
For normally distributed data, the skewness should be about 0. For
unimodal continuous distributions, a skewness value > 0 means that
there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import kurtosis
>>> kurtosis([1, 2, 3, 4, 5])
-1.3
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, each column is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.31210983614218968)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.65540666312754592)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.5717735103604071, pvalue=0.00035457199058231331)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Test whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.088043383325283484)
>>> np.random.seed(28041990)
>>> s = np.random.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> pts = 1000
>>> np.random.seed(28041990)
>>> a = np.random.normal(0, 1, size=pts)
>>> b = np.random.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 3.27207e-11
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
@np.deprecate(message=("scipy.stats.histogram is deprecated in scipy 0.17.0; "
"use np.histogram instead"))
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
# _histogram is used in relfreq/cumfreq, so need to keep it
res = _histogram(a, numbins=numbins, defaultlimits=defaultlimits,
weights=weights, printextras=printextras)
return res
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Return the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculate the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the
calculations ignoring nan values. Default is 'propagate'.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`:
* 'linear' : `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower' : `i`.
* 'higher' : `j`.
* 'nearest' : `i` or `j` whichever is nearest.
* 'midpoint' : `(i + j) / 2`.
Default is 'linear'.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False,
contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation,
keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculate a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
r : float
Pearson's correlation coefficient
p-value : float
2-tailed p-value
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / float(c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculate a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
if a_contains_nan:
a = ma.masked_invalid(a)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
b_contains_nan, nan_policy = _contains_nan(b, nan_policy)
if a_contains_nan or b_contains_nan:
b = ma.masked_invalid(b)
if nan_policy == 'propagate':
rho, pval = mstats_basic.spearmanr(a, b, axis)
return SpearmanrResult(rho * np.nan, pval * np.nan)
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate'):
"""
Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the 1945 "tau-b" version of Kendall's
tau [2]_, which can account for ties and which reduces to the 1938 "tau-a"
version [1]_ in absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that if the input contains nan
'omit' delegates to mstats_basic.kendalltau(), which has a different
implementation.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.where(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# con_minus_dis is approx normally distributed with this variance [3]_
var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + (
2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. *
size * (size - 1) * (size - 2))
pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2))
# Limit range to fix computational errors
return KendalltauResult(min(1., max(-1., tau)), pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""
Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element, and a weigher function, which
assigns a weight based from the rank to each element. The weight of an
exchange is then the sum or the product of the weights of the ranks of
the exchanged elements. The default parameters compute
:math:`\tau_\mathrm h`: an exchange between elements with rank
:math:`r` and :math:`s` (starting from zero) has weight
:math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters.
Note that if you are computing the weighted :math:`\tau` on arrays of
ranks, rather than of scores (i.e., a larger value implies a lower
rank) you must negate the ranks, so that elements of higher rank are
associated with a larger value.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank: array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.71813413296990281, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty
# If there are NaNs we apply _toint64()
if np.isnan(np.min(x)):
x = _toint64(x)
if np.isnan(np.min(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size))
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculate the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.90513580933102689, pvalue=0.37519967975814872)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.37519967975814861)
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculate the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculate a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Compute the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for ties in the Mann-Whitney U and
Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Compute the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : None (deprecated), 'less', 'two-sided', or 'greater'
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to None, which results in a p-value half the size of
the 'two-sided' p-value and a different U statistic. The
default behavior is not the same as using 'less' or 'greater':
it only exists for backward compatibility and is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Compute the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Return the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Return an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Return a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a, method='average'):
"""
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| bsd-3-clause |
pprett/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
yuxng/Deep_ISM | ISM/lib/ism/test_seg.py | 1 | 5931 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from ism.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
import cPickle
from utils.blob import im_list_to_blob, pad_im
import os
import math
import scipy.io
from scipy.optimize import minimize
def _get_image_blob(im, im_depth):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
# RGB
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
processed_ims = []
im_scale_factors = []
assert len(cfg.TEST.SCALES_BASE) == 1
im_scale = cfg.TEST.SCALES_BASE[0]
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# depth
im_orig = im_depth.astype(np.float32, copy=True)
im_orig = im_orig / im_orig.max() * 255
im_orig = np.tile(im_orig[:,:,np.newaxis], (1,1,3))
im_orig -= cfg.PIXEL_MEANS
processed_ims_depth = []
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
processed_ims_depth.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims, 3)
blob_depth = im_list_to_blob(processed_ims_depth, 3)
return blob, blob_depth, np.array(im_scale_factors)
def im_segment(net, im, im_depth, num_classes):
"""Detect object classes in an image given boxes on grids.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of boxes
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
# compute image blob
im_blob, im_depth_blob, im_scale_factors = _get_image_blob(im, im_depth)
# reshape network inputs
net.blobs['data_image'].reshape(*(im_blob.shape))
blobs_out = net.forward(data_image=im_blob.astype(np.float32, copy=False))
# get outputs
cls_prob = blobs_out['prob']
height = cls_prob.shape[2]
width = cls_prob.shape[3]
labels = np.argmax(cls_prob, axis = 1).reshape((height, width))
return labels
def vis_segmentations(im, im_depth, labels, labels_gt, colors):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
# show image
ax = fig.add_subplot(221)
im = im[:, :, (2, 1, 0)]
plt.imshow(im)
ax.set_title('input image')
# show depth
ax = fig.add_subplot(222)
plt.imshow(im_depth)
ax.set_title('input depth')
# show class label
ax = fig.add_subplot(223)
plt.imshow(labels)
ax.set_title('class labels')
ax = fig.add_subplot(224)
plt.imshow(labels_gt)
ax.set_title('gt class labels')
plt.show()
def test_net(net, imdb):
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
seg_file = os.path.join(output_dir, 'segmentations.pkl')
print imdb.name
if os.path.exists(seg_file):
with open(seg_file, 'rb') as fid:
segmentations = cPickle.load(fid)
imdb.evaluate_segmentations(segmentations, output_dir)
return
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
segmentations = [[] for _ in xrange(num_images)]
# timers
_t = {'im_segment' : Timer(), 'misc' : Timer()}
if cfg.TEST.VISUALIZE:
perm = np.random.permutation(np.arange(num_images))
else:
perm = xrange(num_images)
for i in perm:
# read color image
rgba = pad_im(cv2.imread(imdb.image_path_at(i), cv2.IMREAD_UNCHANGED), 16)
if rgba.shape[2] == 4:
im = np.copy(rgba[:,:,:3])
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
im[I[0], I[1], :] = 255
else:
im = rgba
# read depth image
im_depth = cv2.imread(imdb.depth_path_at(i), cv2.IMREAD_UNCHANGED)
_t['im_segment'].tic()
labels = im_segment(net, im, im_depth, imdb.num_classes)
_t['im_segment'].toc()
# build the label image
im_label = imdb.labels_to_image(im, labels)
_t['misc'].tic()
seg = {'labels': labels}
segmentations[i] = seg
_t['misc'].toc()
# read label image
labels_gt = pad_im(cv2.imread(imdb.label_path_at(i), cv2.IMREAD_UNCHANGED), 16)
if len(labels_gt.shape) == 2:
im_label_gt = imdb.labels_to_image(im, labels_gt)
else:
im_label_gt = np.copy(labels_gt[:,:,:3])
im_label_gt[:,:,0] = labels_gt[:,:,2]
im_label_gt[:,:,2] = labels_gt[:,:,0]
if cfg.TEST.VISUALIZE:
vis_segmentations(im, im_depth, im_label, im_label_gt, imdb._class_colors)
print 'im_segment: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_segment'].average_time, _t['misc'].average_time)
seg_file = os.path.join(output_dir, 'segmentations.pkl')
with open(seg_file, 'wb') as f:
cPickle.dump(segmentations, f, cPickle.HIGHEST_PROTOCOL)
# evaluation
imdb.evaluate_segmentations(segmentations, output_dir)
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardShear/Area/A_1e2/Normalized_Shear_Stress_Plot.py | 48 | 3533 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(shear_strain*5,shear_stress/normal_stress,'-r',label='Analytical Solution', Linewidth=4)
plt.xlabel(r"Shear Displacement $\Delta_t [mm]$")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Tangential_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(shear_strain*5,shear_stress/normal_stress,'-k',label='Numerical Solution', Linewidth=4)
plt.xlabel(r"Shear Displacement $\Delta_t [mm]$")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Normalized_Shear_Stress.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
gewaltig/cython-neuron | examples/nest/plot_tsodyks_depr_fac.py | 2 | 1129 | #! /usr/bin/env python
#
# plot_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-4-0.dat')
figure(1)
clf()
plot(vm[:,0], vm[:,1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
| gpl-2.0 |
lepmik/miindio | miindio/lost_tools.py | 1 | 3976 | import matplotlib
import matplotlib.pyplot as plt
import sys
import ast
import xml.etree.ElementTree as ET
def write_fid(fid_fname, quadlist):
with open(fid_fname, 'w') as f:
f.write('<Fiducial>\n')
for quad in quadlist:
vs = [ point[0] for point in quad ]
ws = [ point[1] for point in quad ]
f.write('<Quadrilateral type= \"Contain\">')
f.write('<vline>')
for v in vs:
f.write(str(v) + ' ')
f.write('</vline>')
f.write('<wline>')
for w in ws:
f.write(str(w) + ' ')
f.write('</wline>')
f.write('</Quadrilateral>\n')
f.write('</Fiducial>\n')
def read_file(fn):
f = open(fn)
lines = f.readlines()
x = [float(item.split()[0]) for item in lines]
y = [float(item.split()[1]) for item in lines]
return x, y
def plot_lost(fn):
ax = plt.gcf().add_subplot(111)
x, y = read_file(fn)
plt.plot(x, y, '.')
return ax
def add_fiducial(ax, point_list):
from matplotlib.path import Path
verts = [(x[0], x[1]) for x in point_list]
verts.append((point_list[0][0], point_list[0][1]))
codes = [Path.MOVETO]
for x in range(1, len(point_list)):
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
path = Path(verts, codes)
patch = matplotlib.patches.PathPatch(path, color='orange')
ax.add_patch(patch)
def read_fiducial(fn):
patches = []
tree = ET.parse(fn)
root = tree.getroot()
for child in root:
if child.tag != "Quadrilateral":
raise ValueError
babys = [baby for baby in child]
vs = [ float(x) for x in babys[0].text.split() ]
ws = [ float(x) for x in babys[1].text.split() ]
points = zip(vs,ws)
patches.append(points)
return patches
def extract_base(fn):
return fn.replace('.lost', '').split('_')[-6]
def zoom_fun(event, ax, base_scale=1.5):
# get the current x and y limits
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
cur_xrange = (cur_xlim[1] - cur_xlim[0])*.5
cur_yrange = (cur_ylim[1] - cur_ylim[0])*.5
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'up':
# deal with zoom in
scale_factor = 1/base_scale
elif event.button == 'down':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1
print event.button
# set new limits
ax.set_xlim([xdata - cur_xrange*scale_factor,
xdata + cur_xrange*scale_factor])
ax.set_ylim([ydata - cur_yrange*scale_factor,
ydata + cur_yrange*scale_factor])
plt.draw() # force re-draw
def onclick(event, ax, fid_fname, curr_points, quads):
if event.dblclick:
write_fid(fid_fname, quads)
raise SystemExit
inv = ax.transData.inverted()
coords = inv.transform((event.x, event.y))
if len(curr_points) < 4:
curr_points.append(coords)
plt.plot(coords[0], coords[1], 'r+')
plt.draw()
else:
del curr_points[:]
curr_points.append(coords)
plt.plot(coords[0], coords[1], 'r+')
plt.draw()
if len(curr_points) == 4:
patch = [point for point in curr_points]
quads.append(patch)
add_fiducial(ax, patch)
def onkey(event, ax, fid_fname, quads):
if event.key == 'd':
print('Deleting previous Fiducial.')
del quads[-1]
write_fid(fid_fname, quads)
ax.patches[-1].remove()
plt.draw()
elif event.key == 'c':
print('Clearing canvas and redrawing fiducials.')
plt.cla()
for patch in read_fiducial(fid_fname):
add_fiducial(ax, patch)
plt.draw()
else:
print('Key "{}" not recognized.'.format(event.key))
return
| gpl-3.0 |
jreback/pandas | pandas/tests/frame/common.py | 2 | 1753 | from typing import List
from pandas import DataFrame, concat
def _check_mixed_float(df, dtype=None):
# float16 are most likely to be upcasted to float32
dtypes = {"A": "float32", "B": "float32", "C": "float16", "D": "float64"}
if isinstance(dtype, str):
dtypes = {k: dtype for k, v in dtypes.items()}
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get("A"):
assert df.dtypes["A"] == dtypes["A"]
if dtypes.get("B"):
assert df.dtypes["B"] == dtypes["B"]
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
assert df.dtypes["D"] == dtypes["D"]
def _check_mixed_int(df, dtype=None):
dtypes = {"A": "int32", "B": "uint64", "C": "uint8", "D": "int64"}
if isinstance(dtype, str):
dtypes = {k: dtype for k, v in dtypes.items()}
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get("A"):
assert df.dtypes["A"] == dtypes["A"]
if dtypes.get("B"):
assert df.dtypes["B"] == dtypes["B"]
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
assert df.dtypes["D"] == dtypes["D"]
def zip_frames(frames: List[DataFrame], axis: int = 1) -> DataFrame:
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return DataFrame(zipped)
| bsd-3-clause |
ToFuProject/tofu | tofu/geom/_plot.py | 2 | 66660 | # Built-in
import os
import itertools as itt
import warnings
# Generic common libraries
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon as mPolygon
from matplotlib.patches import Wedge as mWedge
from matplotlib.patches import Rectangle as mRectangle
from matplotlib.axes._axes import Axes
import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import Axes3D
# ToFu-specific
try:
from tofu.version import __version__
import tofu.utils as utils
import tofu.geom._def as _def
import tofu.geom._GG as _GG
except Exception:
from tofu.version import __version__
from .. import utils as utils
from . import _def as _def
from . import _GG as _GG
#__author_email__ = '[email protected]'
_fontsize = 8
_labelpad = 0
__github = 'https://github.com/ToFuProject/tofu/issues'
_wintit = 'tofu-%s report issues / requests at %s'%(__version__, __github)
_nchMax = 4
_cdef = 'k'
_cbck = (0.8,0.8,0.8)
_lcch = [plt.cm.tab20.colors[ii] for ii in [6,8,10,7,9,11]]
# Generic
def _check_Lax(lax=None, n=2):
assert n in [1,2]
c0 = lax is None
c1 = issubclass(lax.__class__,Axes)
c2 = hasattr(lax, '__iter__')
if c2:
c2 = all([aa is None or issubclass(aa.__class__,Axes) for aa in lax])
c2 = c2 and len(lax) in [1,2]
if n==1:
assert c0 or c1, "Arg ax must be None or a plt.Axes instance !"
else:
assert c0 or c1 or c2, "Arg lax must be an Axes or a list/tuple of such !"
if c0:
lax = [None,None]
elif c1:
lax = [lax,None]
elif c2 and len(lax)==1:
lax = [lax[0],None]
else:
lax = list(lax)
return lax, c0, c1, c2
"""
###############################################################################
###############################################################################
Ves class and functions
###############################################################################
"""
############################################
##### Plotting functions
############################################
def _Struct_plot_format(ss, proj='all', **kwdargs):
# Local default
defplot = {'cross':{'Elt':'PIBsBvV',
'dP':{'empty':_def.TorPd , 'full':_def.StructPd},
'dI':_def.TorId,
'dBs':_def.TorBsd,
'dBv':_def.TorBvd,
'dVect':_def.TorVind},
'hor':{'Elt':'PIBsBvV',
'dP':{'empty':_def.TorPd , 'full':_def.StructPd_Tor},
'dI':_def.TorITord,
'dBs':_def.TorBsTord,
'dBv':_def.TorBvTord,
'Nstep':_def.TorNTheta},
'3d':{'Elt':'P',
'dP':{'color':(0.8,0.8,0.8,1.),
'rstride':1,'cstride':1,
'linewidth':0., 'antialiased':False},
'Lim':None,
'Nstep':_def.TorNTheta}}
# Select keys for proj
lproj = ['cross','hor'] if proj=='all' else [proj]
# Match with kwdargs
dk = {}
dk['cross']= dict([(k,k) for k in defplot['cross'].keys()])
dk['hor']= dict([(k,k+'Hor') for k in defplot['hor'].keys()])
dk['hor']['Elt'] = 'Elt'
dk['hor']['dP'] = 'dP'
dk['hor']['Nstep'] = 'Nstep'
dk['3d']= dict([(k,k) for k in defplot['3d'].keys()])
# Rename keys (retro-compatibility)
lrepl = [('Elt','Elt'),('dP','Pdict'),('dI','Idict'),('dBs','Bsdict'),
('dBv','Bvdict'),('dVect','Vdict'),('dIHor','IdictHor'),
('dBsHor','BsdictHor'),('dBvHor','BvdictHor'),
('Nstep','Nstep'),('Lim','Lim')]
dnk = dict(lrepl)
# Map out dict
dout = {}
for pp in lproj:
dout[pp] = {}
for k in defplot[pp].keys():
v = kwdargs[dk[pp][k]]
if v is None:
if k in ss._dplot[pp].keys():
dout[pp][dnk[k]] = ss._dplot[pp][k]
else:
if k=='dP':
if ss.Id.Cls=='Ves':
dout[pp][dnk[k]] = defplot[pp][k]['empty']
else:
dout[pp][dnk[k]] = defplot[pp][k]['full']
else:
dout[pp][dnk[k]] = defplot[pp][k]
else:
dout[pp][dnk[k]] = v
return dout
def Struct_plot(lS, lax=None, proj='all', element=None, dP=None,
dI=None, dBs=None, dBv=None,
dVect=None, dIHor=None, dBsHor=None, dBvHor=None,
Lim=None, Nstep=None, dLeg=None, indices=False,
draw=True, fs=None, wintit=None, tit=None, Test=True):
""" Plot the projections of a list of Struct subclass instances
D. VEZINET, Aug. 2014
Inputs :
V A Ves instance
Nstep An int (the number of points for evaluation of theta by np.linspace)
axP A plt.Axes instance (if given) on which to plot the poloidal projection, otherwise ('None') a new figure/axes is created
axT A plt.Axes instance (if given) on which to plot the toroidal projection, otherwise ('None') a new figure/axes is created
Tdict A dictionnary specifying the style of the polygon plot
dLeg A dictionnary specifying the style of the legend box (if None => no legend)
Outputs :
axP The plt.Axes instance on which the poloidal plot was performed
axT The plt.Axes instance on which the toroidal plot was performed
"""
proj = proj.lower()
if Test:
msg = "Arg proj must be in ['cross','hor','all','3d'] !"
assert proj in ['cross','hor','all','3d'], msg
lax, C0, C1, C2 = _check_Lax(lax,n=2)
assert type(draw) is bool, "Arg draw must be a bool !"
C0 = issubclass(lS.__class__, utils.ToFuObject)
C1 = (isinstance(lS,list)
and all([issubclass(ss.__class__, utils.ToFuObject) for ss in lS]))
msg = "Arg lves must be a Struct subclass or a list of such !"
assert C0 or C1, msg
if C0:
lS = [lS]
nS = len(lS)
if wintit is None:
wintit = _wintit
kwa = dict(fs=fs, wintit=wintit, Test=Test)
if proj=='3d':
# Temporary matplotlib issue
dLeg = None
for ii in range(0,nS):
dplot = _Struct_plot_format(lS[ii], proj=proj, Elt=element,
dP=dP, dI=dI, dBs=dBs,
dBv=dBv, dVect=dVect, dIHor=dIHor,
dBsHor=dBsHor, dBvHor=dBvHor,
Lim=Lim, Nstep=Nstep)
for k in dplot.keys():
dplot[k].update(kwa)
if proj=='3d':
lax[0] = _Plot_3D_plt_Ves(lS[ii], ax=lax[0], LegDict=None,
draw=False, **dplot[proj])
else:
if proj=='cross':
lax[0] = _Plot_CrossProj_Ves(lS[ii], ax=lax[0],
indices=indices, LegDict=None,
draw=False, **dplot[proj])
elif proj=='hor':
lax[0] = _Plot_HorProj_Ves(lS[ii], ax=lax[0],
indices=indices, LegDict=None,
draw=False, **dplot[proj])
elif proj=='all':
if lax[0] is None or lax[1] is None:
lax = list(_def.Plot_LOSProj_DefAxes('All', fs=fs,
wintit=wintit,
Type=lS[ii].Id.Type))
lax[0] = _Plot_CrossProj_Ves(lS[ii], ax=lax[0], LegDict=None,
indices=indices,
draw=False, **dplot['cross'])
lax[1] = _Plot_HorProj_Ves(lS[ii], ax=lax[1], LegDict=None,
indices=indices,
draw=False, **dplot['hor'])
# recompute the ax.dataLim
lax[0].relim()
if proj=='all':
lax[1].relim()
# update ax.viewLim using the new dataLim
lax[0].autoscale_view()
if proj=='all':
lax[1].autoscale_view()
if tit != False:
lax[0].figure.suptitle(tit)
if not dLeg is None:
lax[0].legend(**dLeg)
if draw:
lax[0].relim()
lax[0].autoscale_view()
if len(lax)==2 and lax[1] is not None:
lax[1].relim()
lax[1].autoscale_view()
lax[0].figure.canvas.draw()
lax = lax if proj=='all' else lax[0]
return lax
def _Plot_CrossProj_Ves(V, ax=None, Elt='PIBsBvV',
Pdict=_def.TorPd, Idict=_def.TorId, Bsdict=_def.TorBsd,
Bvdict=_def.TorBvd, Vdict=_def.TorVind,
LegDict=_def.TorLegd, indices=False,
draw=True, fs=None, wintit=_wintit, Test=True):
""" Plot the poloidal projection of a Ves instance
Parameters
----------
V : tfg.Ves / tfg.Struct
A Ves instance
ax : None / plt.Axes
A plt.Axes instance (if given) on which to plot, otherwise ('None') a new figure/axes is created
Pdict : dict
A dictionnary specifying the style of the polygon plot
LegDict : None / dict
A dictionnary specifying the style of the legend box (if None => no legend)
Returns
-------
ax The plt.Axes instance on which the plot was performed
"""
if Test:
ax, C0, C1, C2 = _check_Lax(ax,n=1)
assert type(Pdict) is dict, 'Arg Pdict should be a dictionary !'
assert type(Idict) is dict, "Arg Idict should be a dictionary !"
assert type(Bsdict) is dict, "Arg Bsdict should be a dictionary !"
assert type(Bvdict) is dict, "Arg Bvdict should be a dictionary !"
assert type(Vdict) is dict, "Arg Vdict should be a dictionary !"
assert type(LegDict) is dict or LegDict is None, 'Arg LegDict should be a dictionary !'
assert type(indices) is bool
if indices:
assert 'P' in Elt
if ax is None:
ax = _def.Plot_LOSProj_DefAxes('Cross', fs=fs,
wintit=wintit, Type=V.Id.Type)
if 'P' in Elt or 'V' in Elt:
P_closed = V.Poly_closed
if 'V' in Elt or indices:
midX = (P_closed[0,:-1]+P_closed[0,1:])/2.
midY = (P_closed[1,:-1]+P_closed[1,1:])/2.
VInX, VInY = V.dgeom['VIn'][0,:], V.dgeom['VIn'][1,:]
if 'P' in Elt:
if V._InOut=='in':
ax.plot(P_closed[0,:], P_closed[1,:],
label=V.Id.NameLTX,**Pdict)
elif V._InOut=='out':
ax.add_patch(mPolygon(V.Poly.T, closed=True,
label=V.Id.NameLTX, **Pdict))
else:
msg = "self._InOut not defined !"
raise Exception(msg)
if 'I' in Elt:
ax.plot(V.dsino['RefPt'][0], V.dsino['RefPt'][1],
label=V.Id.NameLTX+" Imp", **Idict)
if 'Bs' in Elt:
ax.plot(V.dgeom['BaryS'][0], V.dgeom['BaryS'][1],
label=V.Id.NameLTX+" Bs", **Bsdict)
if 'Bv' in Elt and V.Id.Type=='Tor':
ax.plot(V.dgeom['BaryV'][0], V.dgeom['BaryV'][1],
label=V.Id.NameLTX+" Bv", **Bvdict)
if 'V' in Elt:
ax.quiver(midX, midY, VInX, VInY,
angles='xy', scale_units='xy',
label=V.Id.NameLTX+" Vin", **Vdict)
if indices:
for ii in range(0,V.dgeom['nP']):
ax.annotate(r"{0}".format(ii), size=10,
xy = (midX[ii],midY[ii]),
xytext = (midX[ii]-0.01*VInX[ii],
midY[ii]-0.01*VInY[ii]),
horizontalalignment='center',
verticalalignment='center')
if not LegDict is None:
ax.legend(**LegDict)
if draw:
ax.relim()
ax.autoscale_view()
ax.figure.canvas.draw()
return ax
def _Plot_HorProj_Ves(V, ax=None, Elt='PI', Nstep=_def.TorNTheta,
Pdict=_def.TorPd, Idict=_def.TorITord,
Bsdict=_def.TorBsTord, Bvdict=_def.TorBvTord,
LegDict=_def.TorLegd, indices=False,
draw=True, fs=None, wintit=_wintit, Test=True):
""" Plotting the toroidal projection of a Ves instance
Parameters
----------
V A Ves instance
Nstep An int (the number of points for evaluation of theta by np.linspace)
ax A plt.Axes instance (if given) on which to plot, otherwise ('None') a new figure/axes is created
Tdict A dictionnary specifying the style of the polygon plot
LegDict A dictionnary specifying the style of the legend box (if None => no legend)
Returns
-------
ax The plt.Axes instance on which the plot was performed
"""
if Test:
assert type(Nstep) is int
ax, C0, C1, C2 = _check_Lax(ax,n=1)
assert type(Pdict) is dict, 'Arg Pdict should be a dictionary !'
assert type(Idict) is dict, 'Arg Idict should be a dictionary !'
assert type(LegDict) is dict or LegDict is None, 'Arg LegDict should be a dictionary !'
if ax is None:
ax = _def.Plot_LOSProj_DefAxes('Hor', Type=V.Id.Type,
fs=fs, wintit=wintit)
P1Min = V.dgeom['P1Min']
P1Max = V.dgeom['P1Max']
if 'P' in Elt:
if V._InOut=='in':
if V.Id.Type=='Tor':
Theta = np.linspace(0, 2*np.pi, num=Nstep,
endpoint=True, retstep=False)
lx = np.concatenate((P1Min[0]*np.cos(Theta),np.array([np.nan]),
P1Max[0]*np.cos(Theta)))
ly = np.concatenate((P1Min[0]*np.sin(Theta),np.array([np.nan]),
P1Max[0]*np.sin(Theta)))
elif V.Id.Type=='Lin':
lx = np.array([V.Lim[0,0],V.Lim[0,1],V.Lim[0,1],
V.Lim[0,0],V.Lim[0,0]])
ly = np.array([P1Min[0],P1Min[0],P1Max[0],P1Max[0],P1Min[0]])
ax.plot(lx,ly,label=V.Id.NameLTX,**Pdict)
elif V._InOut=='out':
if V.Id.Type=='Tor':
Theta = np.linspace(0, 2*np.pi, num=Nstep,
endpoint=True, retstep=False)
if V.noccur==0:
lx = np.concatenate((P1Min[0]*np.cos(Theta),
P1Max[0]*np.cos(Theta[::-1])))
ly = np.concatenate((P1Min[0]*np.sin(Theta),
P1Max[0]*np.sin(Theta[::-1])))
Lp = [mPolygon(np.array([lx,ly]).T, closed=True,
label=V.Id.NameLTX, **Pdict)]
else:
Lp = [mWedge((0,0), P1Max[0],
V.Lim[ii][0]*180./np.pi,
V.Lim[ii][1]*180./np.pi,
width=P1Max[0]-P1Min[0],
label=V.Id.NameLTX, **Pdict)
for ii in range(0,len(V.Lim))]
elif V.Id.Type=='Lin':
ly = np.array([P1Min[0],P1Min[0],
P1Max[0],P1Max[0],P1Min[0]])
Lp = []
for ii in range(0,len(V.Lim)):
lx = np.array([V.Lim[ii][0],V.Lim[ii][1],
V.Lim[ii][1],V.Lim[ii][0],
V.Lim[ii][0]])
Lp.append(mPolygon(np.array([lx,ly]).T,
closed=True, label=V.Id.NameLTX,
**Pdict))
for pp in Lp:
ax.add_patch(pp)
else:
msg = "Unknown self._InOut !"
raise Exception(msg)
if 'I' in Elt:
if V.Id.Type=='Tor':
lx = V.dsino['RefPt'][0]*np.cos(Theta)
ly = V.dsino['RefPt'][0]*np.sin(Theta)
elif V.Id.Type=='Lin':
lx = np.array([np.min(V.Lim),np.max(V.Lim)])
ly = V.dsino['RefPt'][0]*np.ones((2,))
ax.plot(lx,ly,label=V.Id.NameLTX+" Imp",**Idict)
if 'Bs' in Elt:
if V.Id.Type=='Tor':
lx = V.dgeom['BaryS'][0]*np.cos(Theta)
ly = V.dgeom['BaryS'][0]*np.sin(Theta)
elif V.Id.Type=='Lin':
lx = np.array([np.min(V.Lim),np.max(V.Lim)])
ly = V.dgeom['BaryS'][0]*np.ones((2,))
ax.plot(lx,ly,label=V.Id.NameLTX+" Bs", **Bsdict)
if 'Bv' in Elt and V.Type=='Tor':
lx = V.dgeom['BaryV'][0]*np.cos(Theta)
ly = V.dgeom['BaryV'][0]*np.sin(Theta)
ax.plot(lx,ly,label=V.Id.NameLTX+" Bv", **Bvdict)
if indices and V.noccur>1:
if V.Id.Type=='Tor':
for ii in range(0,V.noccur):
R, theta = V.dgeom['P1Max'][0], np.mean(V.Lim[ii])
X, Y = R*np.cos(theta), R*np.sin(theta)
ax.annotate(r"{0}".format(ii), size=10,
xy = (X,Y),
xytext = (X+0.02*np.cos(theta),
Y+0.02*np.sin(theta)),
horizontalalignment='center',
verticalalignment='center')
elif V.Id.Type=='Lin':
for ii in range(0,V.noccur):
X, Y = np.mean(V.Lim[ii]), V.dgeom['P1Max'][0]
ax.annotate(r"{0}".format(ii), size=10,
xy = (X,Y),
xytext = (X, Y+0.02),
horizontalalignment='center',
verticalalignment='center')
if not LegDict is None:
ax.legend(**LegDict)
if draw:
ax.relim()
ax.autoscale_view()
ax.figure.canvas.draw()
return ax
def _Plot_3D_plt_Ves(V, ax=None, Elt='P', Lim=None,
Nstep=_def.Tor3DThetamin, Pdict=_def.TorP3Dd,
LegDict=_def.TorLegd,
draw=True, fs=None, wintit=_wintit, Test=True):
if Test:
msg = 'Arg ax should a plt.Axes instance !'
assert isinstance(ax,Axes3D) or ax is None, msg
assert Lim is None or (hasattr(Lim,'__iter__') and len(Lim)==2), "Arg Lim should be an iterable of 2 elements !"
assert type(Pdict) is dict and (type(LegDict) is dict or LegDict is None), "Args Pdict and LegDict should be dictionnaries !"
assert type(Elt)is str, "Arg Elt must be a str !"
if ax is None:
ax = _def.Plot_3D_plt_Tor_DefAxes(fs=fs, wintit=wintit)
if V.Type=='Lin':
lim = np.array(V.Lim)
lim = lim.reshape((1,2)) if lim.ndim==1 else lim
if Lim is not None:
for ii in range(lim.shape[0]):
lim[ii,:] = [max(Lim[0],lim[ii,0]),min(lim[1],Lim[1])]
else:
lim = np.array([[0.,2.*np.pi]]) if V.noccur==0 else np.array(V.Lim)
lim = lim.reshape((1,2)) if lim.ndim==1 else lim
if Lim is not None and V.Id.Cls=='Ves':
Lim[0] = np.arctan2(np.sin(Lim[0]),np.cos(Lim[0]))
Lim[1] = np.arctan2(np.sin(Lim[1]),np.cos(Lim[1]))
for ii in range(lim.shape[0]):
lim[ii,:] = Lim
if 'P' in Elt:
handles, labels = ax.get_legend_handles_labels()
for ii in range(lim.shape[0]):
theta = np.linspace(lim[ii,0],lim[ii,1],Nstep)
theta = theta.reshape((1,Nstep))
if V.Type=='Tor':
X = np.dot(V.Poly_closed[0:1,:].T,np.cos(theta))
Y = np.dot(V.Poly_closed[0:1,:].T,np.sin(theta))
Z = np.dot(V.Poly_closed[1:2,:].T,np.ones(theta.shape))
elif V.Type=='Lin':
X = np.dot(theta.reshape((Nstep,1)),
np.ones((1,V.Poly_closed.shape[1]))).T
Y = np.dot(V.Poly_closed[0:1,:].T,np.ones((1,Nstep)))
Z = np.dot(V.Poly_closed[1:2,:].T,np.ones((1,Nstep)))
ax.plot_surface(X,Y,Z, label=V.Id.NameLTX, **Pdict)
proxy = plt.Rectangle((0,0),1,1, fc=Pdict['color'])
handles.append(proxy)
labels.append(V.Id.NameLTX)
return ax
"""
def Plot_3D_mlab_Tor(T,fig='None',thetaLim=(np.pi/2,2*np.pi),Tdict=Dict_3D_mlab_Tor_Def,LegDict=LegDict_Def):
if fig=='None':
fig = Plot_3D_mlab_Tor_DefFig()
thetamin = np.pi/20
N = np.ceil((thetaLim[1]-thetaLim[0])/thetamin)
theta = np.linspace(thetaLim[0],thetaLim[1],N).reshape((1,N))
X = np.dot(T.Poly[0:1,:].T,np.cos(theta))
Y = np.dot(T.Poly[0:1,:].T,np.sin(theta))
Z = np.dot(T.POly[1:2,:].T,np.ones(theta.shape))
Theta = np.dot(np.ones(T.Poly[1:2,:].T.shape),theta)
S = mlab.mesh(X,Y,Z,figure=fig,name=T.Id.NameLTX, scalars=Theta,**Tdict)
mlab.orientation_axes(figure=fig)
#mlab.axes(S)
return fig,S
"""
def Plot_Impact_PolProjPoly(lS, Leg="", ax=None, Ang='theta', AngUnit='rad',
Sketch=True, dP=None,
dLeg=_def.TorLegd, draw=True, fs=None,
wintit=None, tit=None, Test=True):
""" Plotting the toroidal projection of a Ves instance
D. VEZINET, Aug. 2014
Inputs :
T A Ves instance
Leg A str (the legend label to be used if T is not a Ves instance)
ax A plt.Axes instance (if given) on which to plot the projection space, otherwise ('None') a new figure/axes is created
Dict A dictionnary specifying the style of the boundary polygon plot
dLeg A dictionnary specifying the style of the legend box
Outputs :
ax The plt.Axes instance on which the poloidal plot was performed
"""
if Test:
Lax, C0, C1, C2 = _check_Lax(ax,n=1)
assert C0 or C1, 'Arg ax should a plt.Axes instance !'
assert dP is None or type(dP) is dict, "Arg dP must be a dictionary !"
assert dLeg is None or type(dLeg) is dict, "Arg dLeg must be a dictionary !"
assert Ang in ['theta','xi'], "Arg Ang must be in ['theta','xi'] !"
assert AngUnit in ['rad','deg'], "Arg AngUnit must be in ['rad','deg'] !"
C0 = issubclass(lS.__class__, utils.ToFuObject)
C1 = (isinstance(lS,list)
and all([issubclass(ss.__class__, utils.ToFuObject) for ss in lS]))
msg = "Arg lves must be a Struct subclass or a list of such !"
assert C0 or C1, msg
if C0:
lS = [lS]
nS = len(lS)
# Get Sketch
if ax is None:
if wintit is None:
wintit = _wintit
ax, axsketch = _def.Plot_Impact_DefAxes('Cross', fs=fs, wintit=wintit,
Ang=Ang, AngUnit=AngUnit,
Sketch=Sketch)
if dP is not None:
dp = dP
# Get up/down limits
pPmax, pPmin = 0, 0
for ss in lS:
pmax = np.max(ss.dsino['EnvMinMax'])
if pmax>pPmax:
pPmax = pmax
pmin = np.min(ss.dsino['EnvMinMax'])
if pmin<pPmin:
pPmin = pmin
if nS>0:
DoUp = (pPmin,pPmax)
nP = pmax.size
handles, labels = ax.get_legend_handles_labels()
for ii in range(0,nS):
Theta, pP = lS[ii].dsino['EnvTheta'], lS[ii].dsino['EnvMinMax'][0,:]
pN = lS[ii].dsino['EnvMinMax'][1,:]
if Ang=='xi':
Theta, pP, pN = _GG.ConvertImpact_Theta2Xi(Theta, pP, pN)
Theta = Theta.ravel()
if dP is None:
dp = {'facecolor':lS[ii].get_color(), 'edgecolor':'k',
'linewidth':1., 'linestyle':'-'}
if lS[ii]._InOut=='in':
ax.fill_between(Theta, pP, DoUp[1]*np.ones((nP,)),**dp)
ax.fill_between(Theta, DoUp[0]*np.ones((nP,)), pN,**dp)
elif lS[ii]._InOut=='out':
ax.fill_between(Theta, pP, pN, **dp)
else:
msg = "self._InOut not defined for {0}".format(lS[ii].Id.Cls)
raise Exception(msg)
proxy = plt.Rectangle((0,0),1,1, fc=dp['facecolor'])
handles.append(proxy)
labels.append(lS[ii].Id.Cls+' '+lS[ii].Id.Name)
if nS>0:
ax.set_ylim(DoUp)
if not dLeg is None:
ax.legend(handles,labels,**dLeg)
if draw:
ax.figure.canvas.draw()
return ax
# Deprecated ?
def Plot_Impact_3DPoly(T, Leg="", ax=None, Ang=_def.TorPAng,
AngUnit=_def.TorPAngUnit, Pdict=_def.TorP3DFilld,
dLeg=_def.TorLegd,
draw=True, fs=None, wintit=_wintit, Test=True):
""" Plotting the toroidal projection of a Ves instance
D. VEZINET, Aug. 2014
Inputs :
T A Ves instance
Leg A str (the legend label to be used if T is not a Ves instance)
ax A plt.Axes instance (if given) on which to plot the projection space, otherwise ('None') a new figure/axes is created
Dict A dictionnary specifying the style of the boundary polygon plot
dLeg A dictionnary specifying the style of the legend box
Outputs :
ax The plt.Axes instance on which the poloidal plot was performed
"""
if Test:
assert T.Id.Cls in ['Ves','Struct'] or (isinstance(T,tuple) and len(T)==3), "Arg T must be Ves instance or tuple with (Theta,pP,pN) 3 ndarrays !"
assert isinstance(ax,Axes3D) or ax is None, "Arg ax must be a Axes instance !"
assert type(Pdict) is dict, "Arg Pdict must be a dictionary !"
assert type(dLeg) is dict or dLeg is None, "Arg dLeg must be a dictionary !"
assert Ang in ['theta','xi'], "Arg Ang must be in ['theta','xi'] !"
assert AngUnit in ['rad','deg'], "Arg AngUnit must be in ['rad','deg'] !"
if ax is None:
ax = _def.Plot_Impact_DefAxes('3D', fs=fs, wintit=wintit)
handles, labels = ax.get_legend_handles_labels()
if T.Id.Cls == "Ves":
Leg = T.Id.NameLTX
Theta, pP, pN = T._Imp_EnvTheta, T._Imp_EnvMinMax[0,:], T._Imp_EnvMinMax[1,:]
else:
assert isinstance(T[0],np.ndarray) and isinstance(T[1],np.ndarray) and isinstance(T[2],np.ndarray), "Args Theta, pP and pN should be np.ndarrays !"
assert T[0].shape==T[1].shape==T[2].shape, "Args Theta, pP and pN must have same shape !"
Theta, pP, pN = T
AngName = r"$\theta$"
if Ang=='xi':
Theta, pP, pN = _GG.ConvertImpact_Theta2Xi(Theta, pP, pN)
AngName = r"$\xi$"
yDoUp, zDoUp = ax.get_ylim(), ax.get_zlim()
x = np.outer(Theta.flatten(),np.ones(zDoUp.shape))
yP = np.outer(pP.flatten(),np.ones(zDoUp.shape))
yN = np.outer(pN.flatten(),np.ones(zDoUp.shape))
z = np.outer(np.ones(pP.flatten().shape),zDoUp)
ax.plot_surface(x,yP,z,rstride=1,cstride=1,label=Leg,**Pdict)
ax.plot_surface(x,yN,z,rstride=1,cstride=1,label=Leg,**Pdict)
proxy = plt.Rectangle((0,0),1,1, fc=Pdict['color'])
handles.append(proxy)
labels.append(Leg)
ax.set_xticks([0,np.pi/4.,np.pi/2.,3.*np.pi/4.,np.pi])
ax.set_zticks([-np.pi/2.,-np.pi/4.,0.,np.pi/4.,np.pi/2.])
if AngUnit=='rad':
ax.set_xticklabels([r"$0$",r"$\pi/4$",r"$\pi/2$",r"$3\pi/4$",r"$\pi$"])
ax.set_zticklabels([r"$-\pi/2$",r"$-\pi/4$",r"$0$",r"$\pi/4$",r"$\pi/2$"])
AngUnit = r"$(rad.)$"
elif AngUnit=='deg':
ax.set_xticklabels([r"$0$",r"$90$",r"$180$",r"$270$",r"$360$"])
ax.set_zticklabels([r"$-180$",r"$-90$",r"$0$",r"$90$",r"$180$"])
AngUnit = r"$(deg.)$"
ax.set_xlabel(AngName+r" "+AngUnit)
ax.set_zlabel(r"$\phi$ "+AngUnit)
if not dLeg is None:
ax.legend(handles,labels,**dLeg)
if draw:
ax.figure.canvas.draw()
return ax
############################################
# Phi Theta Prof dist plotting
############################################
def Config_phithetaproj_dist(config, refpt, dist, indStruct,
distonly=False,
cmap=None, vmin=None, vmax=None, invertx=None,
ax=None, fs=None, cbck=(0.8,0.8,0.8,0.8),
tit=None, wintit=None, legend=None, draw=None):
if cmap is None:
cmap = 'touch'
lS = config.lStruct
indsu = np.unique(indStruct)
if invertx is None:
invertx = True
# set extent
ratio = refpt[0] / np.nanmin(dist)
extent = np.pi*np.r_[-1., 1., -1.,1.]
# set colors
vmin = np.nanmin(dist) if vmin is None else vmin
vmax = np.nanmax(dist) if vmax is None else vmax
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
colshape = (dist.shape[0], dist.shape[1], 4)
if cmap == 'touch':
cols = np.array(np.broadcast_to(mpl.colors.to_rgba(cbck), colshape))
for ii in indsu:
ind = indStruct == ii
cols[ind,:] = np.r_[mpl.colors.to_rgba(lS[ii].get_color())][None,None,:]
else:
cols = np.tile(mpl.colors.to_rgba(cmap), colshape)
cols[:,:,-1] = 1.-norm(dist)
# Plotting
if not distonly or ax is None:
fig, dax = _def._Config_phithetaproj_default()
if tit is not None:
fig.suptitle(tit)
dax['dist'][0].imshow(cols, extent=extent, aspect='auto',
interpolation='nearest', origin='lower', zorder=-1)
dax['cross'][0], dax['hor'][0] = config.plot(lax=[dax['cross'][0],
dax['hor'][0]],
draw=False)
dax['dist'][0].set_xlim(np.pi*np.r_[-1.,1.])
dax['dist'][0].set_ylim(np.pi*np.r_[-1.,1.])
dax['dist'][0].set_aspect(aspect=1./ratio)
# legend proxy
# if legend != False:
# handles, labels = dax['cross'][0].get_legend_handles_labels()
# for ii in indsu:
# handles.append( mRectangle((0.,0.), 1, 1, fc=lS[ii].get_color()) )
# labels.append( '%s_%s'%(lS[ii].Id.Cls, lS[ii].Id.Name) )
# dax['cross'][0].legend(handles, labels, frameon=False,
# bbox_to_anchor=(1.01,1.), loc=2, borderaxespad=0.)
if invertx is True:
dax['dist'][0].invert_xaxis()
if draw:
fig.canvas.draw()
return dax
"""
###############################################################################
###############################################################################
LOS class and functions
###############################################################################
"""
############################################
# Utility functions
############################################
def _LOS_calc_InOutPolProj_Debug(config, Ds, us ,PIns, POuts,
L=3, nptstot=None, Lim=None, Nstep=100,
fs=None, wintit=_wintit, draw=True):
# Preformat
assert Ds.shape==us.shape==PIns.shape==POuts.shape
if Ds.ndim==1:
Ds, us = Ds.reshape((3,1)), us.reshape((3,1))
PIns, POuts = PIns.reshape((3,1)), POuts.reshape((3,1))
nP = Ds.shape[1]
pts = (Ds[:,:,None]
+ np.r_[0., L, np.nan][None,None,:]*us[:,:,None]).reshape((3,nP*3))
# Plot
ax = config.plot(element='P', proj='3d', Lim=Lim, Nstep=Nstep, dLeg=None,
fs=fs, wintit=wintit, draw=False)
msg = '_LOS_calc_InOutPolProj - Debugging %s / %s pts'%(str(nP),str(nptstot))
ax.set_title(msg)
ax.plot(pts[0,:], pts[1,:], pts[2,:], c='k', lw=1, ls='-')
# ax.plot(PIns[0,:],PIns[1,:],PIns[2,:],
# c='b', ls='None', marker='o', label=r"PIn")
# ax.plot(POuts[0,:],POuts[1,:],POuts[2,:],
# c='r', ls='None', marker='x', label=r"POut")
# ax.legend(**_def.TorLegd)
if draw:
ax.figure.canvas.draw()
def _get_LLOS_Leg(GLLOS, Leg=None, ind=None, Val=None, Crit='Name', PreExp=None,
PostExp=None, Log='any', InOut='In'):
# Get Legend
if type(Leg) is not str:
Leg = GLLOS.Id.NameLTX
if not type(GLLOS) is list and GLLOS.Id.Cls=='LOS':
GLLOS = [GLLOS]
elif type(GLLOS) is list:
assert all([dd.Id.Cls=='LOS' for dd in GLLOS]), "GLD must be a list of TFG.LOS instances !"
elif GLLOS.Id.Cls=='GLOS':
Leg = GLLOS.Id.NameLTX if Leg is None else Leg
if ind is None and Val is None and PreExp is None and PostExp is None:
ind = np.arange(0,GLLOS.nLOS)
elif not ind is None:
assert type(ind) is np.ndarray and ind.ndim==1, "Arg ind must be a np.ndarray with ndim=1 !"
ind = ind.nonzero()[0] if ind.dtype==bool else ind
elif not (Val is None and PreExp is None and PostExp is None):
ind = GLLOS.select(Val=Val, Crit=Crit, PreExp=PreExp, PostExp=PostExp, Log=Log, InOut=InOut, Out=int)
GLLOS = [GLLOS.LLOS[ii] for ii in ind]
return GLLOS, Leg
def Get_FieldsFrom_LLOS(L,Fields):
# Returns a list of outputs
assert type(L) is list and all([ll.Id.Cls=='LOS' for ll in L]), "Arg L should be a list of LOS"
assert type(Fields) is list and all([type(ff) in [str,tuple] for ff in Fields]), "Arg Fields must be a list of str or tuples !"
Out = []
for ii in range(len(Fields)):
if type(Fields[ii]) is str:
F = getattr(L[0],Fields[ii])
else:
F = getattr(L[0],Fields[ii][0])[Fields[ii][1]]
if type(F) is np.ndarray:
ndim, shape = F.ndim, F.shape
Shape = tuple([1]+[shape[ss] for ss in range(ndim-1,-1,-1)])
if type(Fields[ii]) is str:
F = np.concatenate(tuple([np.resize(getattr(ll,Fields[ii]).T,Shape).T for ll in L]),axis=ndim)
else:
F = np.concatenate(tuple([np.resize(getattr(ll,Fields[ii][0])[Fields[ii][1]].T,Shape).T for ll in L]),axis=ndim)
elif type(F) in [int,float,np.int64,np.float64]:
if type(Fields[ii]) is str:
F = np.asarray([getattr(ll,Fields[ii]) for ll in L])
else:
F = np.asarray([getattr(ll,Fields[ii][0])[Fields[ii][1]] for ll in L])
else:
if type(Fields[ii]) is str:
for ij in range(1,len(L)):
F += getattr(L[ij],Fields[ii])
else:
for ij in range(1,len(L)):
F += getattr(L[ij],Fields[ii][0])[Fields[ii][1]]
Out = Out + [F]
return Out
############################################
# Plotting functions
############################################
def Rays_plot(GLos, Lax=None, Proj='all', reflections=True,
Lplot=_def.LOSLplot, element='LDIORP', element_config='P',
Leg=None, dL=None, dPtD=_def.LOSMd,
dPtI=_def.LOSMd, dPtO=_def.LOSMd, dPtR=_def.LOSMd,
dPtP=_def.LOSMd, dLeg=_def.TorLegd, multi=False,
draw=True, fs=None, wintit=None, tit=None, Test=True, ind=None):
if Test:
C = GLos.Id.Cls in ['Rays','CamLOS1D','CamLOS2D']
assert C, "Arg GLos must be an object child of tfg.Rays !"
Proj = Proj.lower()
C = Proj in ['cross','hor','all','3d']
assert C, "Arg Proj must be in ['cross','hor','all','3d'] !"
Lax, C0, C1, C2 = _check_Lax(Lax, n=2)
assert type(element) is str, "Arg element must be a str !"
C = element_config is None or type(element_config) is str
msg = "Arg element must be None or a str !"
assert C, msg
assert Lplot in ['Tot','In'], "Arg Lplot must be in ['Tot','In']"
C = all([type(dd) is dict for dd in [dPtD,dPtI,dPtO,dPtR,dPtP]])
assert C, "Args dPtD,dPtI,dPtO,dPtR,dPtP must all be dict !"
assert dL is None or type(dL) is dict, "Arg dL must be None or a dict"
assert Leg is None or type(Leg) is str, "Arg Leg must be a str !"
assert type(dLeg) is dict or dLeg is None, 'dLeg must be dict !'
assert type(draw) is bool, "Arg draw must be a bool !"
if wintit is None:
wintit = _wintit
if element_config != '':
Lax = GLos.config.plot(lax=Lax, element=element_config,
proj=Proj, indices=False, fs=fs, tit=False,
draw=False, dLeg=None, wintit=wintit, Test=Test)
Lax, C0, C1, C2 = _check_Lax(Lax, n=2)
# Select subset
if ind is None:
ind = np.arange(0,GLos.nRays)
ind = np.asarray(ind)
Leg = GLos.Id.NameLTX if Leg is None else Leg
dL = _def.LOSLd if dL is None else dL
if multi:
if GLos.Id.Cls in ['Rays','LOS']:
Leg = [None for ii in ind]
else:
Leg = GLos.LNames
if 'c' in dL.keys():
del dL['c']
# Check sino
if GLos._dsino['RefPt'] is None:
element = element.replace('P','')
if len(ind)>0 and not element=='':
if Proj=='3d':
Lax[0] = _Rays_plot_3D(GLos, ax=Lax[0],
reflections=reflections,
Elt=element, Lplot=Lplot,
Leg=Leg, dL=dL, dPtD=dPtD, dPtI=dPtI,
dPtO=dPtO, dPtR=dPtR, dPtP=dPtP, dLeg=None,
multi=multi, ind=ind,
draw=False, fs=fs, wintit=wintit, Test=Test)
else:
if Proj=='all' and None in Lax:
Lax = list(_def.Plot_LOSProj_DefAxes('All',
fs=fs, wintit=wintit,
Type=GLos.config.Type))
if Proj in ['cross','all']:
Lax[0] = _Rays_plot_Cross(GLos, ax=Lax[0],
reflections=reflections,
Elt=element, Lplot=Lplot,
Leg=Leg, dL=dL, dPtD=dPtD, dPtI=dPtI,
dPtO=dPtO, dPtR=dPtR, dPtP=dPtP,
dLeg=None, multi=multi, ind=ind,
draw=False, fs=fs, wintit=wintit,
Test=Test)
if Proj in ['hor','all']:
ii = 0 if Proj=='hor' else 1
Lax[ii] = _Rays_plot_Hor(GLos, ax=Lax[ii],
reflections=reflections,
Elt=element, Lplot=Lplot,
Leg=Leg, dL=dL, dPtD=dPtD, dPtI=dPtI,
dPtO=dPtO, dPtR=dPtR, dPtP=dPtP,
dLeg=None, multi=multi, ind=ind,
draw=False, fs=fs, wintit=wintit,
Test=Test)
if dLeg is not None:
Lax[0].legend(**dLeg)
if draw:
Lax[0].figure.canvas.draw()
Lax = Lax if Proj=='all' else Lax[0]
return Lax
def _Rays_plot_Cross(L,Leg=None, reflections=True,
Lplot='Tot', Elt='LDIORP',ax=None,
dL=_def.LOSLd, dPtD=_def.LOSMd, dPtI=_def.LOSMd,
dPtO=_def.LOSMd, dPtR=_def.LOSMd, dPtP=_def.LOSMd,
dLeg=_def.TorLegd, multi=False, ind=None,
draw=True, fs=None, wintit=_wintit, Test=True):
assert ax is None or isinstance(ax,plt.Axes), 'Wrong input for ax !'
dPts = {'D':('D',dPtD), 'I':('PkIn',dPtI), 'O':('PkOut',dPtO),
'R':('PRMin',dPtR),'P':('RefPt',dPtP)}
if ax is None:
ax = _def.Plot_LOSProj_DefAxes('Cross', fs=fs, wintit=wintit,
Type=L.Ves.Type)
if 'L' in Elt:
R, Z, _, _, _ = L._get_plotL(Lplot=Lplot, proj='Cross',
reflections=reflections, ind=ind, multi=multi)
if multi:
for ii in range(0,len(R)):
ax.plot(R[ii], Z[ii], label=Leg[ii], **dL)
else:
ax.plot(R, Z, label=Leg, **dL)
for kk in dPts.keys():
if kk in Elt:
if kk=='P' and L._dsino['RefPt'] is not None:
P = L._dsino['pts'][:,ind]
elif kk=='D':
P = L.D[:,ind]
elif not (kk == 'R' and L.config.Id.Type == 'Lin'):
P = L._dgeom[dPts[kk][0]][:,ind]
if len(ind)==1:
P = P.reshape((3,1))
if L.config.Id.Type=='Tor':
P = np.array([np.hypot(P[0,:],P[1,:]),P[2,:]])
else:
P = P[1:,:]
if multi:
for ii in range(0,len(ind)):
leg = kk if Leg[ii] is None else Leg[ii]+""+kk
ax.plot(P[0,ii],P[1,ii], label=leg, **dPts[kk][1])
else:
ax.plot(P[0,:],P[1,:], label=Leg, **dPts[kk][1])
if dLeg is not None:
ax.legend(**dLeg)
if draw:
ax.figure.canvas.draw()
return ax
def _Rays_plot_Hor(L, Leg=None, reflections=True,
Lplot='Tot', Elt='LDIORP',ax=None,
dL=_def.LOSLd, dPtD=_def.LOSMd, dPtI=_def.LOSMd,
dPtO=_def.LOSMd, dPtR=_def.LOSMd, dPtP=_def.LOSMd,
dLeg=_def.TorLegd, multi=False, ind=None,
draw=True, fs=None, wintit=_wintit, Test=True):
assert ax is None or isinstance(ax,plt.Axes), 'Wrong input for ax !'
dPts = {'D':('D',dPtD), 'I':('PkIn',dPtI), 'O':('PkOut',dPtO),
'R':('PRMin',dPtR),'P':('RefPt',dPtP)}
if ax is None:
ax = _def.Plot_LOSProj_DefAxes('Hor', fs=fs,
wintit=wintit, Type=L.Ves.Type)
if 'L' in Elt:
_, _, x, y, _ = L._get_plotL(Lplot=Lplot, proj='hor',
reflections=reflections, ind=ind, multi=multi)
if multi:
for ii in range(0,len(x)):
ax.plot(x[ii], y[ii], label=Leg[ii], **dL)
else:
ax.plot(x, y, label=Leg, **dL)
for kk in dPts.keys():
if kk in Elt:
if kk=='P' and L._dsino['RefPt'] is not None:
P = L._dsino['pts'][:,ind]
elif kk=='D':
P = L.D[:,ind]
elif not (kk=='R' and L.config.Id.Type=='Lin'):
P = L._dgeom[dPts[kk][0]][:,ind]
if len(ind)==1:
P = P.reshape((3,1))
if multi:
for ii in range(0,len(ind)):
leg = kk if Leg[ii] is None else Leg[ii]+""+kk
ax.plot(P[0,ii],P[1,ii], label=leg, **dPts[kk][1])
else:
ax.plot(P[0,:],P[1,:], label=Leg, **dPts[kk][1])
if dLeg is not None:
ax.legend(**dLeg)
if draw:
ax.figure.canvas.draw()
return ax
def _Rays_plot_3D(L, Leg=None, reflections=True,
Lplot='Tot',Elt='LDIORr',ax=None,
dL=_def.LOSLd, dPtD=_def.LOSMd, dPtI=_def.LOSMd,
dPtO=_def.LOSMd, dPtR=_def.LOSMd, dPtP=_def.LOSMd,
dLeg=_def.TorLegd, multi=False, ind=None,
draw=True, fs=None, wintit=_wintit, Test=True):
assert ax is None or isinstance(ax,Axes3D), 'Arg ax should be plt.Axes instance !'
dPts = {'D':('D',dPtD), 'I':('PkIn',dPtI), 'O':('PkOut',dPtO),
'R':('PRMin',dPtR),'P':('RefPt',dPtP)}
if ax is None:
ax = _def.Plot_3D_plt_Tor_DefAxes(fs=fs, wintit=wintit)
if 'L' in Elt:
_, _, x, y, z = L._get_plotL(Lplot=Lplot, proj='3d',
reflections=reflections, ind=ind, multi=multi)
if multi:
for ii in range(0,len(x)):
ax.plot(x[ii], y[ii], z[ii],
label=Leg[ii], **dL)
else:
ax.plot(x, y, z, label=Leg, **dL)
for kk in dPts.keys():
if kk in Elt:
P = L._dsino['RefPt'][:,ind] if kk=='P' else L._dgeom[dPts[kk][0]][:,ind]
if len(ind)==1:
P = P.reshape((3,1))
if multi:
for ii in range(0,len(ind)):
leg = kk if Leg[ii] is None else Leg[ii]+""+kk
ax.plot(P[0,ii],P[1,ii],P[2,ii], label=leg, **dPts[kk][1])
else:
ax.plot(P[0,:],P[1,:],P[2,:], label=Leg, **dPts[kk][1])
if not dLeg is None:
ax.legend(**dLeg)
if draw:
ax.figure.canvas.draw()
return ax
"""
def Plot_3D_mlab_GLOS(L,Leg ='',Lplot='Tot',PDIOR='DIOR',fig='None', dL=dL_mlab_Def, Mdict=Mdict_mlab_Def,LegDict=LegDict_Def):
assert isinstance(L,LOS) or isinstance(L,list) or isinstance(L,GLOS), 'Arg L should a LOS instance or a list of LOS !'
assert Lplot=='Tot' or Lplot=='In', "Arg Lplot should be str 'Tot' or 'In' !"
assert isinstance(PDIOR,basestring), 'Arg PDIOR should be string !'
#assert fig=='None' or isinstance(fig,mlab.Axes), 'Arg ax should be plt.Axes instance !'
assert type(dL) is dict and type(Mdict) is dict and type(LegDict) is dict, 'dL, Mdict and LegDict should be dictionaries !'
LegDict['frameon'] = LegDict['frameon']=='True' or (type(LegDict['frameon']) is bool and LegDict['frameon'])
if isinstance(L,LOS):
L = [L]
elif isinstance(L, GLOS):
Leg = L.Id.NameLTX
L = L.LLOS
if Lplot=='Tot':
Pfield = 'PplotOut'
else:
Pfield = 'PplotIn'
PDIORind = np.array(['D' in PDIOR, 'I' in PDIOR, 'O' in PDIOR, 'R' in PDIOR],dtype=np.bool_)
if fig=='None':
fig = Plot_3D_mlab_Tor_DefFig()
if Leg == '':
for i in range(len(L)):
P = getattr(L[i],Pfield)
mlab.plot3d(P[0,:],P[1,:],P[2,:],name=L[i].Id.NameLTX, figure=fig, **dL)
if np.any(PDIORind):
for i in range(len(L)):
P = np.concatenate((L[i].D,L[i].PIn,L[i].POut,L[i].P1Min),axis=1)
P = P[:,PDIORind]
mlab.points3d(P[0,:],P[1,:],P[2,:],name=L[i].Id.NameLTX+' '+PDIOR, figure=fig, **Mdict)
else:
Pl,Pm = np.nan*np.ones((3,1)), np.nan*np.ones((3,1))
for i in range(len(L)):
P = getattr(L[i],Pfield)
Pl = np.concatenate((Pl,P,np.nan*np.ones((3,1))),axis=1)
P = np.concatenate((L[i].D,L[i].PIn,L[i].POut,L[i].P1Min),axis=1)
P = P[:,PDIORind]
Pm = np.concatenate((Pm,P),axis=1)
mlab.plot3d(Pl[0,:],Pl[1,:],Pl[2,:],name=Leg, figure=fig, **dL)
if np.any(PDIORind):
mlab.points3d(Pm[0,:],Pm[1,:],Pm[2,:],name=Leg+' '+PDIOR, figure=fig, **Mdict)
#ax.legend(**LegDict)
return fig
"""
def GLOS_plot_Sino(GLos, Proj='Cross', ax=None, Elt=_def.LOSImpElt,
Sketch=True, Ang=_def.LOSImpAng, AngUnit=_def.LOSImpAngUnit,
Leg=None, dL=_def.LOSMImpd, dVes=_def.TorPFilld,
dLeg=_def.TorLegd, ind=None, multi=False,
draw=True, fs=None, tit=None, wintit=None, Test=True):
if Test:
assert Proj in ['Cross','3d'], "Arg Proj must be in ['Pol','3d'] !"
assert Ang in ['theta','xi'], "Arg Ang must be in ['theta','xi'] !"
assert AngUnit in ['rad','deg'], "Arg Ang must be in ['rad','deg'] !"
if wintit is None:
wintit = _wintit
if not GLos.dsino['RefPt'] is None:
ax = GLos.config.plot_sino(ax=ax, dP=dVes, Ang=Ang,
AngUnit=AngUnit, Sketch=Sketch,
dLeg=None, draw=False, fs=fs,
wintit=wintit, Test=Test)
# Select subset
if ind is None:
ind = np.arange(0,GLos.nRays)
ind = np.asarray(ind)
Leg = GLos.Id.NameLTX if Leg is None else Leg
dL = _def.LOSLd if dL is None else dL
if multi:
if GLos.Id.Cls in ['Rays','LOS']:
Leg = [None for ii in ind]
else:
Leg = GLos.LNames
if 'c' in dL.keys():
del dL['c']
if 'L' in Elt:
if Proj=='Cross':
ax = _Plot_Sinogram_CrossProj(GLos, ax=ax, Ang=Ang, AngUnit=AngUnit,
Sketch=Sketch, dL=dL, LegDict=dLeg,
ind=ind, draw=False, fs=fs,
wintit=wintit, Test=Test)
else:
ax = _Plot_Sinogram_3D(GLos, ax=ax, Ang=Ang, AngUnit=AngUnit,
dL=dL, ind=ind, LegDict=dLeg, draw=False,
fs=fs, wintit=wintit, Test=Test)
if draw:
ax.figure.canvas.draw()
return ax
def _Plot_Sinogram_CrossProj(L, ax=None, Leg ='', Ang='theta', AngUnit='rad',
Sketch=True, dL=_def.LOSMImpd, LegDict=_def.TorLegd,
ind=None, multi=False,
draw=True, fs=None, wintit=_wintit, Test=True):
if Test:
assert ax is None or isinstance(ax,plt.Axes), 'Arg ax should be Axes instance !'
if ax is None:
ax, axSketch = _def.Plot_Impact_DefAxes('Cross', fs=fs, wintit=wintit,
Ang=Ang, AngUnit=AngUnit,
Sketch=Sketch)
Impp, Imptheta = L._dsino['p'][ind], L._dsino['theta'][ind]
if Ang=='xi':
Imptheta, Impp, bla = _GG.ConvertImpact_Theta2Xi(Imptheta, Impp, Impp)
if multi:
for ii in range(0,len(ind)):
if not L[ii]._dsino['RefPt'] is None:
ax.plot(Imptheta[ii],Impp[ii],label=Leg[ind[ii]], **dL)
else:
ax.plot(Imptheta,Impp,label=Leg, **dL)
if not LegDict is None:
ax.legend(**LegDict)
if draw:
ax.figure.canvas.draw()
return ax
def _Plot_Sinogram_3D(L,ax=None,Leg ='', Ang='theta', AngUnit='rad',
dL=_def.LOSMImpd, ind=None, multi=False,
draw=True, fs=None, wintit=_wintit, LegDict=_def.TorLegd):
assert ax is None or isinstance(ax,plt.Axes), 'Arg ax should be Axes instance !'
if ax is None:
ax = _def.Plot_Impact_DefAxes('3D', fs=fs, wintit=wintit)
Impp, Imptheta = L.sino['p'][ind], L.sino['theta'][ind]
ImpPhi = L.sino['Phi'][ind]
if Ang=='xi':
Imptheta, Impp, bla = _GG.ConvertImpact_Theta2Xi(Imptheta, Impp, Impp)
if multi:
for ii in range(len(ind)):
if not L[ii].Sino_RefPt is None:
ax.plot([Imptheta[ii]], [Impp[ii]], [ImpPhi[ii]], zdir='z',
label=Leg[ind[ii]], **dL)
else:
ax.plot(Imptheta,Impp,ImpPhi, zdir='z', label=Leg, **dL)
if not LegDict is None:
ax.legend(**LegDict)
if draw:
ax.figure.canvas.draw()
return ax
########################################################
########################################################
########################################################
# plot_touch
########################################################
########################################################
def Rays_plot_touch(cam, key=None, ind=None, quant='lengths', cdef=_cdef,
invert=None, Bck=True, cbck=_cbck, Lplot=None,
incch=[1,10], ms=4, cmap='touch', vmin=None, vmax=None,
fmt_ch='02.0f', labelpad=_labelpad, dmargin=None,
nchMax=_nchMax, lcch=_lcch, fs=None, wintit=None, tit=None,
fontsize=_fontsize, draw=True, connect=True):
########
# Prepare
if ind is not None:
ind = cam._check_indch(ind, out=bool)
if wintit is None:
wintit = _wintit
assert (issubclass(cam.__class__, utils.ToFuObject)
and 'cam' in cam.Id.Cls.lower())
nD = 2 if cam._is2D() else 1
if nD == 2:
invert = True if invert is None else invert
assert type(quant) in [str,np.ndarray]
if type(quant) is str:
lok = ['lengths', 'indices', 'angles', 'Etendues', 'Surfaces']
if not quant in lok:
msg = "Valid flags for kwarg quant are:\n"
msg += " [" + ", ".join(lok) + "]\n"
msg += " Provided: %s"%quant
raise Exception(msg)
if quant in ['Etendues','Surfaces'] and getattr(cam,quant) is None:
msg = "Required quantity is not set:\n"
msg += " self.%s = None\n"%quant
msg += " => use self.set_%s() first"%quant
raise Exception(msg)
else:
quant = quant.ravel()
if quant.shape != (cam.nRays,):
msg = "Provided quant has wrong shape!\n"
msg += " - Expected: (%s,)"%cam.nRays
msg += " - Provided: %s"%quant.shape
raise Exception(msg)
########
# Plot
out = _Cam12D_plottouch(cam, key=key, ind=ind, quant=quant, nchMax=nchMax,
Bck=Bck, lcch=lcch, cbck=cbck, Lplot=Lplot,
incch=incch, ms=ms, cmap=cmap, vmin=vmin, vmax=vmax,
fmt_ch=fmt_ch, invert=invert,
fontsize=fontsize, labelpad=labelpad,
fs=fs, dmargin=dmargin, wintit=wintit, tit=tit,
draw=draw, connect=connect, nD=nD)
return out
def _Cam12D_plot_touch_init(fs=None, dmargin=None, fontsize=8,
wintit=_wintit, nchMax=_nchMax, nD=1):
# Figure
axCol = "w"
if fs is None:
fs = (10,7)
elif type(fs) is str and fs.lower()=='a4':
fs = (8.27,11.69)
fig = plt.figure(facecolor=axCol,figsize=fs)
if wintit != False:
fig.canvas.set_window_title(wintit)
if dmargin is None:
dmargin = {'left':0.03, 'right':0.99,
'bottom':0.05, 'top':0.92,
'wspace':None, 'hspace':0.4}
# Axes
gs1 = gridspec.GridSpec(6, 3, **dmargin)
if nD == 1:
axp = fig.add_subplot(gs1[:,:-1], fc='w')
else:
pos = list(gs1[5,:-1].get_position(fig).bounds)
pos[-1] = pos[-1]/2.
cax = fig.add_axes(pos, fc='w')
axp = fig.add_subplot(gs1[:5,:-1], fc='w')
axH = fig.add_subplot(gs1[0:2,2], fc='w')
axC = fig.add_subplot(gs1[2:,2], fc='w')
axC.set_aspect('equal', adjustable='datalim')
axH.set_aspect('equal', adjustable='datalim')
Ytxt = axp.get_position().bounds[1] + axp.get_position().bounds[3]
DY = 0.02
Xtxt = axp.get_position().bounds[0]
DX = axp.get_position().bounds[2]
axtxtch = fig.add_axes([Xtxt, Ytxt, DX, DY], fc='w')
xtxt, Ytxt, dx, DY = 0.01, 0.98, 0.15, 0.02
axtxtg = fig.add_axes([xtxt, Ytxt, dx, DY], fc='None')
# Dict
dax = {'X':[axp],
'cross':[axC],
'hor':[axH],
'txtg':[axtxtg],
'txtch':[axtxtch]}
if nD == 2:
dax['colorbar'] = [cax]
# Formatting
for kk in dax.keys():
for ii in range(0,len(dax[kk])):
dax[kk][ii].tick_params(labelsize=fontsize)
if 'txt' in kk:
dax[kk][ii].patch.set_alpha(0.)
for ss in ['left','right','bottom','top']:
dax[kk][ii].spines[ss].set_visible(False)
dax[kk][ii].set_xticks([]), dax[kk][ii].set_yticks([])
dax[kk][ii].set_xlim(0,1), dax[kk][ii].set_ylim(0,1)
return dax
def _Cam12D_plottouch(cam, key=None, ind=None, quant='lengths', nchMax=_nchMax,
Bck=True, lcch=_lcch, cbck=_cbck, Lplot=None,
incch=[1,5], ms=4, plotmethod='imshow',
cmap=None, vmin=None, vmax=None,
fmt_ch='01.0f', invert=True, Dlab=None,
fontsize=_fontsize, labelpad=_labelpad,
fs=None, dmargin=None, wintit=_wintit, tit=None,
draw=True, connect=True, nD=1):
assert plotmethod == 'imshow', "plotmethod %s not coded yet !"%plotmethod
#########
# Prepare
#########
fldict = dict(fontsize=fontsize, labelpad=labelpad)
# ---------
# Check nch and X
nch = cam.nRays
nan2 = np.full((2,1),np.nan)
if nD == 1:
Xlab = r"index"
Xtype = 'x'
DX = [-1., nch]
else:
x1, x2, indr, extent = cam.get_X12plot('imshow')
if Bck:
indbck = np.r_[indr[0,0], indr[0,-1], indr[-1,0], indr[-1,-1]]
idx12 = id((x1,x2))
n12 = [x1.size, x2.size]
Xtype = 'x'
X = np.arange(0,nch)
idX = id(X)
# dchans
if key is None:
dchans = np.arange(0,nch)
else:
dchans = cam.dchans(key)
idchans = id(dchans)
# ---------
# Check colors
dElt = cam.get_touch_dict(ind=ind, out=int)
# ---------
# Check data
# data
if type(quant) is str:
if quant == 'lengths':
if cam._isLOS():
Dlab = r'LOS length'+r'$m$'
data = cam.kOut-cam.kIn
data[np.isinf(data)] = np.nan
else:
Dlab = r'VOS volume'+r'$m^3$'
data = None
raise Exception("Not coded yet !")
elif quant == 'indices':
Dlab = r'index' + r' ($a.u.$)'
data = np.arange(0,cam.nRays)
elif quant == 'angles':
Dlab = r'angle of incidence (rad.)'
data = np.arccos(-np.sum(cam.u*cam.dgeom['vperp'], axis=0))
assert np.all(data >= 0.) and np.all(data <= np.pi/2.)
else:
data = getattr(cam, quant)
Dlab = quant
Dlab += r' ($m^2/sr$)' if quant == 'Etendues' else r' ($m^2$)'
else:
data = quant
Dlab = '' if Dlab is None else Dlab
iddata = id(data)
vmin = np.nanmin(data) if vmin is None else vmin
vmax = np.nanmax(data) if vmax is None else vmax
if nD == 1:
Dlim = [min(0.,vmin), max(0.,vmax)]
Dd = [Dlim[0]-0.05*np.diff(Dlim), Dlim[1]+0.05*np.diff(Dlim)]
else:
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
if cmap == 'touch':
cols = cam.get_touch_colors(dElt=dElt)
else:
cols = np.tile(mpl.colors.to_rgba(cmap), (cam.nRays, 1)).T
cols[-1,:] = 1.-norm(data)
cols = np.swapaxes(cols[:,indr.T], 0,2)
#########
# Plot
#########
# Format axes
dax = _Cam12D_plot_touch_init(fs=fs, wintit=wintit, nchMax=nchMax,
dmargin=dmargin, fontsize=fontsize, nD=nD)
fig = dax['X'][0].figure
if tit is None:
tit = r"%s - %s - %s"%(cam.Id.Exp, cam.Id.Diag, cam.Id.Name)
if tit != False:
fig.suptitle(tit)
# -----------------
# Plot conf and bck
if cam.config is not None:
out = cam.config.plot(lax=[dax['cross'][0], dax['hor'][0]],
element='P', tit=False, wintit=False, dLeg=None, draw=False)
dax['cross'][0], dax['hor'][0] = out
if cam._isLOS():
lCross = cam._get_plotL(Lplot=Lplot, proj='cross',
return_pts=True, multi=True)
lHor = cam._get_plotL(Lplot=Lplot, proj='hor',
return_pts=True, multi=True)
if Bck and nD == 2:
crossbck = [lCross[indbck[0]],nan2,lCross[indbck[1]],nan2,
lCross[indbck[2]],nan2,lCross[indbck[3]]]
crossbck = np.concatenate(crossbck,axis=1)
horbck = [lHor[indbck[0]],nan2,lHor[indbck[1]],nan2,
lHor[indbck[2]],nan2,lHor[indbck[3]]]
horbck = np.concatenate(horbck,axis=1)
dax['cross'][0].plot(crossbck[0,:], crossbck[1,:],
c=cbck, ls='-', lw=1.)
dax['hor'][0].plot(horbck[0,:], horbck[1,:],
c=cbck, ls='-', lw=1.)
elif nD == 1:
for kn, v in dElt.items():
if np.any(v['indok']):
crok = [np.concatenate((lCross[ii],nan2), axis=1)
for ii in v['indok']]
crok = np.concatenate(crok, axis=1)
dax['cross'][0].plot(crok[0,:], crok[1,:], c=v['col'], lw=1.)
crok = [np.concatenate((lHor[ii],nan2), axis=1)
for ii in v['indok']]
crok = np.concatenate(crok, axis=1)
dax['hor'][0].plot(crok[0,:], crok[1,:], c=v['col'], lw=1.)
if np.any(v['indout']):
crout = [np.concatenate((lCross[ii],nan2), axis=1)
for ii in v['indout']]
crout = np.concatenate(crout, axis=1)
dax['cross'][0].plot(crout[0,:], crout[1,:], c=cbck, lw=1.)
crout = [np.concatenate((lHor[ii],nan2), axis=1)
for ii in v['indout']]
crout = np.concatenate(crout, axis=1)
dax['hor'][0].plot(crout[0,:], crout[1,:], c=cbck, lw=1.)
lHor = np.stack(lHor)
idlCross = id(lCross)
idlHor = id(lHor)
else:
lCross, lHor = None, None
# data, TBF
if nD == 1:
for kn,v in dElt.items():
dax['X'][0].plot(X[v['indok']], data[v['indok']],
marker='o', ms=ms, mfc='None',
c=v['col'], ls='-', lw=1.)
dax['X'][0].plot(X[v['indout']], data[v['indout']],
marker='o', ms=ms, mfc='None',
c=cbck, ls='-', lw=1.)
elif nD == 2:
dax['X'][0].imshow(cols, extent=extent, aspect='equal',
interpolation='nearest', origin='lower', zorder=-1)
cmapdef = plt.cm.gray if cmap == 'touch' else cmap
cb = mpl.colorbar.ColorbarBase(dax['colorbar'][0],
cmap=cmapdef, norm=norm,
orientation='horizontal')
cb.set_label(Dlab)
# Define datanorm because colorbar => xlim in (0,1)
if dax['colorbar'][0].get_xlim() == (0.,1.):
datanorm = np.asarray(norm(data))
else:
datanorm = data
iddatanorm= id(datanorm)
# ---------------
# Lims and labels
if nD == 1:
dax['X'][0].set_xlim(DX)
dax['X'][0].set_xlabel(Xlab, **fldict)
else:
dax['X'][0].set_xlim(extent[:2])
dax['X'][0].set_ylim(extent[2:])
if invert:
dax['X'][0].invert_xaxis()
dax['X'][0].invert_yaxis()
##################
# Interactivity dict
dgroup = {'channel': {'nMax':nchMax, 'key':'f1',
'defid':idX, 'defax':dax['X'][0]}}
# Group info (make dynamic in later versions ?)
msg = ' '.join(['%s: %s'%(v['key'],k) for k, v in dgroup.items()])
l0 = dax['txtg'][0].text(0., 0., msg,
color='k', fontweight='bold',
fontsize=6., ha='left', va='center')
# dref
dref = {idX:{'group':'channel', 'val':X, 'inc':incch}}
if nD == 2:
dref[idX]['2d'] = (x1,x2)
# ddata
ddat = {iddata:{'val':data, 'refids':[idX]}}
ddat[idchans] = {'val':dchans, 'refids':[idX]}
if lCross is not None:
ddat[idlCross] = {'val':lCross, 'refids':[idX]}
ddat[idlHor] = {'val':lHor, 'refids':[idX]}
if nD == 2:
ddat[idx12] = {'val':(x1,x2), 'refids':[idX]}
if iddatanorm not in ddat.keys():
ddat[iddatanorm] = {'val':datanorm, 'refids':[idX]}
# dax
lax_fix = [dax['cross'][0], dax['hor'][0],
dax['txtg'][0], dax['txtch'][0]]
dax2 = {}
if nD == 1:
dax2[dax['X'][0]] = {'ref':{idX:'x'}}
else:
dax2[dax['X'][0]] = {'ref':{idX:'2d'},'invert':invert}
dobj = {}
##################
# Populating dobj
# -------------
# One-shot channels
for jj in range(0,nchMax):
# Channel text
l0 = dax['txtch'][0].text((0.5+jj)/nchMax, 0., r'',
color=lcch[jj], fontweight='bold',
fontsize=6., ha='center', va='bottom')
dobj[l0] = {'dupdate':{'txt':{'id':idchans, 'lrid':[idX],
'bstr':'{0:%s}'%fmt_ch}},
'drefid':{idX:jj}}
# los
if cam._isLOS():
l, = dax['cross'][0].plot([np.nan,np.nan], [np.nan,np.nan],
c=lcch[jj], ls='-', lw=2.)
dobj[l] = {'dupdate':{'data':{'id':idlCross, 'lrid':[idX]}},
'drefid':{idX:jj}}
l, = dax['hor'][0].plot([np.nan,np.nan], [np.nan,np.nan],
c=lcch[jj], ls='-', lw=2.)
dobj[l] = {'dupdate':{'data':{'id':idlHor, 'lrid':[idX]}},
'drefid':{idX:jj}}
# -------------
# Data-specific
# Channel
for jj in range(0,nchMax):
# Channel vlines or pixels
if nD == 1:
l0 = dax['X'][0].axvline(np.nan, c=lcch[jj], ls='-', lw=1.)
dobj[l0] = {'dupdate':{'xdata':{'id':idX, 'lrid':[idX]}},
'drefid':{idX:jj}}
else:
l0, = dax['X'][0].plot([np.nan],[np.nan],
mec=lcch[jj], ls='None', marker='s', mew=2.,
ms=ms, mfc='None', zorder=10)
dobj[l0] = {'dupdate':{'data':{'id':idx12, 'lrid':[idX]}},
'drefid':{idX:jj}}
# Channel colorbar indicators
l0 = dax['colorbar'][0].axvline([np.nan], ls='-', c=lcch[jj])
dobj[l0] = {'dupdate':{'xdata':{'id':iddatanorm, 'lrid':[idX]}},
'drefid':{idX:jj}}
##################
# Instanciate KeyHandler
can = fig.canvas
can.draw()
kh = utils.KeyHandler_mpl(can=can,
dgroup=dgroup, dref=dref, ddata=ddat,
dobj=dobj, dax=dax2, lax_fix=lax_fix,
groupinit='channel', follow=True)
if connect:
kh.disconnect_old()
kh.connect()
if draw:
can.draw()
return kh
| mit |
cytomine/Cytomine-python-datamining | cytomine-applications/ldm_model_builder/SeparateTrees.py | 1 | 2817 | # -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2017. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Vandaele Rémy <[email protected]>"
__contributors__ = ["Marée Raphaël <[email protected]>"]
__copyright__ = "Copyright 2010-2017 University of Liège, Belgium, http://www.cytomine.be/"
import numpy as np
from sklearn.tree import ExtraTreeClassifier
from multiprocessing import Pool
def build_separate_tree(X,y,max_features,max_depth,min_samples_split):
clf = ExtraTreeClassifier(max_features=max_features,max_depth=max_depth,min_samples_split=min_samples_split)
clf = clf.fit(X,y)
return clf
def separatetree_training_mp_helper(jobargs):
return build_separate_tree(*jobargs)
def separatetree_test_mp_helper(jobargs):
return test_separate_tree(*jobargs)
def test_separate_tree(tree,X):
return tree.predict_proba(X)
class SeparateTrees:
def __init__(self,n_estimators=10,max_features='auto',max_depth=None,min_samples_split=2,n_jobs=1):
self.n_estimators = n_estimators
self.max_features = max_features
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.n_jobs = n_jobs
def fit(self,X,y):
self.trees = []
self.n_classes = np.max(y)+1
(h,w) = X.shape
n_features = w/self.n_estimators
p = Pool(self.n_jobs)
jobargs = [(X[:,i*n_features:(i+1)*n_features],y,self.max_features,self.max_depth,self.min_samples_split) for i in range(self.n_estimators)]
self.trees = p.map(separatetree_training_mp_helper,jobargs)
p.close()
p.join()
return self
def predict_proba(self,X):
(h,w) = X.shape
n_features = w/self.n_estimators
p = Pool(self.n_jobs)
jobargs = [(self.trees[i],X[:,i*n_features:(i+1)*n_features]) for i in range(self.n_estimators)]
probas = p.map(separatetree_test_mp_helper,jobargs)
p.close()
p.join()
return np.sum(probas,axis=0)/float(self.n_estimators)
def predict(self,X):
probas = self.predict_proba(X)
return np.argmax(probas,axis=1)
if __name__ == "__main__":
clf = SeparateTrees(n_estimators=32,max_features=2,n_jobs=4)
clf.fit(np.random.ranf((10000,3200)),np.random.randint(0,2,10000))
print clf.predict_proba(np.random.ranf((100,3200)))
print clf.predict(np.random.ranf((100,3200)))
| apache-2.0 |
JediKoder/coursera-ML | ex7/ex7_pca.py | 2 | 8115 | ## Machine Learning Online Class
# Exercise 7 | Principle Component Analysis and K-Means Clustering
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# exercise. You will need to complete the following functions:
#
# pca.m
# projectData.m`
# recoverData.m
# computeCentroids.m
# findClosestCentroids.m
# kMeansInitCentroids.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
from matplotlib import use
use('TkAgg')
import numpy as np
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from featureNormalize import featureNormalize
from pca import pca
from projectData import projectData
from recoverData import recoverData
from kMeansInitCentroids import kMeansInitCentroids
from runkMeans import runkMeans
from plotDataPoints import plotDataPoints
from ex3.displayData import displayData
from show import show
## ================== Part 1: Load Example Dataset ===================
# We start this exercise by using a small dataset that is easily to
# visualize
print 'Visualizing example dataset for PCA.'
# The following command loads the dataset. You should now have the
# variable X in your environment
data = scipy.io.loadmat('ex7data1.mat')
X = data['X']
# Visualize the example dataset
plt.scatter(X[:, 0], X[:, 1], marker='o', color='b', facecolors='none', lw=1.0)
plt.axis([0.5, 6.5, 2, 8])
plt.axis('equal')
show()
raw_input('Program paused. Press Enter to continue...')
## =============== Part 2: Principal Component Analysis ===============
# You should now implement PCA, a dimension reduction technique. You
# should complete the code in pca.m
#
print 'Running PCA on example dataset.'
# Before running PCA, it is important to first normalize X
X_norm, mu, sigma = featureNormalize(X)
# Run PCA
U, S, V = pca(X_norm)
# Compute mu, the mean of the each feature
# Draw the eigenvectors centered at mean of data. These lines show the
# directions of maximum variations in the dataset.
mu2 = mu + 1.5 * S.dot(U.T)
plt.plot([mu[0], mu2[0, 0]], [mu[1], mu2[0, 1]], '-k', lw=2)
plt.plot([mu[0], mu2[1, 0]], [mu[1], mu2[1, 1]], '-k', lw=2)
show()
print 'Top eigenvector: '
print ' U(:,1) = %f %f ', U[0,0], U[1,0]
print '(you should expect to see -0.707107 -0.707107)'
raw_input('Program paused. Press Enter to continue...')
## =================== Part 3: Dimension Reduction ===================
# You should now implement the projection step to map the data onto the
# first k eigenvectors. The code will then plot the data in this reduced
# dimensional space. This will show you what the data looks like when
# using only the corresponding eigenvectors to reconstruct it.
#
# You should complete the code in projectData.m
#
print 'Dimension reduction on example dataset.'
# Plot the normalized dataset (returned from pca)
plt.figure()
plt.scatter(X_norm[:, 0], X_norm[:, 1], marker='o', color='b', facecolors='none', lw=1.0)
plt.axis([-4, 3, -4, 3]) #axis square
plt.axis('equal')
show()
# Project the data onto K = 1 dimension
K = 1
Z = projectData(X_norm, U, K)
print 'Projection of the first example: %f', Z[0]
print '(this value should be about 1.481274)'
X_rec = recoverData(Z, U, K)
print 'Approximation of the first example: %f %f'% (X_rec[0, 0], X_rec[0, 1])
print '(this value should be about -1.047419 -1.047419)'
# Draw lines connecting the projected points to the original points
plt.scatter(X_rec[:, 0], X_rec[:, 1], marker='o', color='r', facecolor='none', lw=1.0)
for i in range(len(X_norm)):
plt.plot([X_norm[i, 0], X_rec[i, 0]], [X_norm[i, 1], X_rec[i, 1]], '--k')
show()
raw_input('Program paused. Press Enter to continue...')
## =============== Part 4: Loading and Visualizing Face Data =============
# We start the exercise by first loading and visualizing the dataset.
# The following code will load the dataset into your environment
#
print 'Loading face dataset.'
# Load Face dataset
data = scipy.io.loadmat('ex7faces.mat')
X = data['X']
# Display the first 100 faces in the dataset
displayData(X[0:100, :])
raw_input('Program paused. Press Enter to continue...')
## =========== Part 5: PCA on Face Data: Eigenfaces ===================
# Run PCA and visualize the eigenvectors which are in this case eigenfaces
# We display the first 36 eigenfaces.
#
print 'Running PCA on face dataset.\n(this might take a minute or two ...)\n\n'
# Before running PCA, it is important to first normalize X by subtracting
# the mean value from each feature
X_norm, mu, sigma = featureNormalize(X)
# Run PCA
U, S, V = pca(X_norm)
# Visualize the top 36 eigenvectors found
displayData(U[:, 1:36].T)
raw_input('Program paused. Press Enter to continue...')
## ============= Part 6: Dimension Reduction for Faces =================
# Project images to the eigen space using the top k eigenvectors
# If you are applying a machine learning algorithm
print 'Dimension reduction for face dataset.'
K = 100
Z = projectData(X_norm, U, K)
print 'The projected data Z has a size of: '
print '%d %d'% Z.shape
raw_input('Program paused. Press Enter to continue...')
## ==== Part 7: Visualization of Faces after PCA Dimension Reduction ====
# Project images to the eigen space using the top K eigen vectors and
# visualize only using those K dimensions
# Compare to the original input, which is also displayed
print 'Visualizing the projected (reduced dimension) faces.'
K = 100
X_rec = recoverData(Z, U, K)
# Display normalized data
plt.subplot(1, 2, 1)
displayData(X_norm[:100,:])
plt.title('Original faces')
plt.axis('equal')
# Display reconstructed data from only k eigenfaces
plt.subplot(1, 2, 2)
displayData(X_rec[:100,:])
plt.title('Recovered faces')
plt.axis('equal')
show()
raw_input('Program paused. Press Enter to continue...')
## === Part 8(a): Optional (ungraded) Exercise: PCA for Visualization ===
# One useful application of PCA is to use it to visualize high-dimensional
# data. In the last K-Means exercise you ran K-Means on 3-dimensional
# pixel colors of an image. We first visualize this output in 3D, and then
# apply PCA to obtain a visualization in 2D.
# Re-load the image from the previous exercise and run K-Means on it
# For this to work, you need to complete the K-Means assignment first
A = scipy.misc.imread('bird_small.png')
# If imread does not work for you, you can try instead
# load ('bird_small.mat')
A = A / 255.0
img_size = A.shape
X = A.reshape(img_size[0] * img_size[1], 3)
K = 16
max_iters = 10
initial_centroids = kMeansInitCentroids(X, K)
centroids, idx = runkMeans(X, initial_centroids, max_iters)
# Sample 1000 random indexes (since working with all the data is
# too expensive. If you have a fast computer, you may increase this.
sel = np.floor(np.random.random(1000) * len(X)) + 1
# Setup Color Palette
# Visualize the data and centroid memberships in 3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
Xs = np.array([X[s] for s in sel])
xs = Xs[:, 0]
ys = Xs[:, 1]
zs = Xs[:, 2]
cmap = plt.get_cmap("jet")
idxn = sel.astype('float')/max(sel.astype('float'))
colors = cmap(idxn)
# ax = Axes3D(fig)
ax.scatter3D(xs, ys, zs=zs, edgecolors=colors, marker='o', facecolors='none', lw=0.4, s=10)
plt.title('Pixel dataset plotted in 3D. Color shows centroid memberships')
show()
raw_input('Program paused. Press Enter to continue...')
## === Part 8(b): Optional (ungraded) Exercise: PCA for Visualization ===
# Use PCA to project this cloud to 2D for visualization
# Subtract the mean to use PCA
X_norm, mu, sigma = featureNormalize(X)
# PCA and project the data to 2D
U, S, V = pca(X_norm)
Z = projectData(X_norm, U, 2)
# Plot in 2D
plt.figure()
zs = np.array([Z[s] for s in sel])
idxs = np.array([idx[s] for s in sel])
# plt.scatter(zs[:,0], zs[:,1])
plotDataPoints(zs, idxs)
plt.title('Pixel dataset plotted in 2D, using PCA for dimensionality reduction')
show()
raw_input('Program paused. Press Enter to continue...')
| mit |
massmutual/scikit-learn | sklearn/utils/fixes.py | 39 | 13318 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
| bsd-3-clause |
OpenFAST/r-test | modules/aerodyn/ad_Kite_OLAF/CreateMotion.py | 1 | 1779 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# --- Rot Motion
freq1= 0.8
freq2= 0.4
tMax = 10
dt = 0.1
omega1 = 2*np.pi*freq1
T1 = 1/freq1
omega2 = 2*np.pi*freq2
T2 = 1/freq2
time = np.arange(0,tMax+dt/2,dt)
pos = np.zeros((len(time), 6)) # positions: x,y,z, theta_x, theta_y, theta_z
vel = np.zeros((len(time), 6)) # velocities: xdot, ydot, zdot, and omega
acc = np.zeros((len(time), 6)) # accelerations: xddot, omegadot
# -- First period we do a vertical motion
I1 = time <= T1
pos[I1,2] = 2*np.sin(omega1*time[I1])
vel[I1,2] = omega1 *2*np.cos(omega1*time[I1])
acc[I1,2] =-omega1**2*2*np.sin(omega1*time[I1])
# -- Second period we do nothing
I2 = time > T1
# -- Third period we do x rotations
I3 = time > 2*T1
pos[I3,3] = 0.10 *np.sin(omega2*time[I3])
vel[I3,3] = omega2 * 0.10 *np.cos(omega2*time[I3])
acc[I3,3] =-omega2**2* 0.10 *np.sin(omega2*time[I3])
cols = ['time_[s]', 'x_[m]', 'y_[m]', 'z_[m]' , 'theta_x_[rad]', 'theta_y_[rad]', 'theta_z_[rad]']
cols +=['xdot_[m/s]', 'ydot_[m/s]', 'zdot_[m/s]', 'omega_x_g_[rad/s]', 'omega_y_g_[rad/s]', 'omega_z_g_[rad/s]']
cols +=['xddot_[m^2/s]', 'yddot_[m^2/s]' , 'zddot_[m^2/s]', 'alpha_x_g_[rad/s]', 'alpha_y_g_[rad/s]', 'alpha_z_g_[rad/s]']
data=np.column_stack((time, pos, vel, acc))
df = pd.DataFrame( data=data, columns=cols)
df.to_csv('KiteMotionSimple.csv', index=False, sep=',', float_format='%10.6f')
print(df.shape)
# Time_[s] , x_[m], y_[m], z_[m] , theta_x_[rad], theta_y_[rad], theta_z_[rad-], xdot_[m/s], ydot_[m/s], zdot_[m/s], omega_x_g_[rad/s], omega_y_g_[rad/s], omega_z_g_[rad/s], xddot_[m^2/s], yddot_[m^2/s] , zddot_[m^2/s], alpha_x_g_[rad/s], alpha_y_g_[rad/s], alpha_z_g_[rad/s]
if __name__ == '__main__':
pass
| apache-2.0 |
LABSN/expyfun | examples/stimuli/texture_stimuli.py | 4 | 1542 | # -*- coding: utf-8 -*-
"""
========================
Generate texture stimuli
========================
This shows how to generate texture coherence stimuli.
"""
import numpy as np
import matplotlib.pyplot as plt
from expyfun.stimuli import texture_ERB, play_sound
fs = 24414
n_freqs = 20
n_coh = 18 # very coherent example
# let's make a textured stimilus and play it
sig = texture_ERB(n_freqs, n_coh, fs=fs, seq=('inc', 'nb', 'sam'))
play_sound(sig, fs, norm=True, wait=True)
###############################################################################
# Let's look at the time course
t = np.arange(len(sig)) / float(fs)
fig, ax = plt.subplots(1)
ax.plot(t, sig.T, color='k')
ax.set(xlabel='Time (sec)', ylabel='Amplitude (normalized)', xlim=t[[0, -1]])
fig.tight_layout()
###############################################################################
# And now the spectrogram:
fig, ax = plt.subplots(1, figsize=(8, 2))
img = ax.specgram(sig, NFFT=1024, Fs=fs, noverlap=800)[3]
img.set_clim([img.get_clim()[1] - 50, img.get_clim()[1]])
ax.set(xlim=t[[0, -1]], ylim=[0, 10000], xlabel='Time (sec)',
ylabel='Freq (Hz)')
fig.tight_layout()
###############################################################################
# And the long-term spectrum:
fig, ax = plt.subplots(1)
ax.psd(sig, NFFT=16384, Fs=fs, color='k')
xticks = [250, 500, 1000, 2000, 4000, 8000]
ax.set(xlabel='Frequency (Hz)', ylabel='Power (dB)', xlim=[100, 10000],
xscale='log')
ax.set(xticks=xticks)
ax.set(xticklabels=xticks)
fig.tight_layout()
| bsd-3-clause |
sserrot/champion_relationships | venv/share/doc/networkx-2.4/examples/drawing/plot_directed.py | 1 | 1145 | #! /usr/bin/env python
"""
==============
Directed Graph
==============
Draw a graph with directed edges using a colormap and different node sizes.
Edges have different colors and alphas (opacity). Drawn using matplotlib.
"""
# Author: Rodrigo Dorantes-Gilardi ([email protected])
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
G = nx.generators.directed.random_k_out_graph(10, 3, 0.5)
pos = nx.layout.spring_layout(G)
node_sizes = [3 + 10 * i for i in range(len(G))]
M = G.number_of_edges()
edge_colors = range(2, M + 2)
edge_alphas = [(5 + i) / (M + 4) for i in range(M)]
nodes = nx.draw_networkx_nodes(G, pos, node_size=node_sizes, node_color='blue')
edges = nx.draw_networkx_edges(G, pos, node_size=node_sizes, arrowstyle='->',
arrowsize=10, edge_color=edge_colors,
edge_cmap=plt.cm.Blues, width=2)
# set alpha value for each edge
for i in range(M):
edges[i].set_alpha(edge_alphas[i])
pc = mpl.collections.PatchCollection(edges, cmap=plt.cm.Blues)
pc.set_array(edge_colors)
plt.colorbar(pc)
ax = plt.gca()
ax.set_axis_off()
plt.show()
| mit |
robbymeals/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
waterponey/scikit-learn | sklearn/exceptions.py | 14 | 4945 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior.
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
.. versionadded:: 0.18
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
.. versionchanged:: 0.18
Moved from sklearn.cross_validation.
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation, extends EfficiencyWarning.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
| bsd-3-clause |
buchbend/astrolyze | build/lib.linux-x86_64-2.7/astrolyze/functions/plot_limits.py | 3 | 6923 | # Copyright (C) 2009-2012 Christof Buchbender
r"""
Functions to help to treat upper/lower limits of measured Values
correctly when they are plottet on either of the axis of a
plot.
Limits are trated as follows:
When noted in the original measurement or in Text:
Lower Limit = '<'
Upper Limit = '>'
In a plot they are on the Y axis:
Lower Limit = 'v'
Upper Limit = '^'
On the X axis the convention is:
Lower Limit = '<'
Upper Limit = '>'
In case it is not clear if the limit is upper or lower. The string ``"^v"``
is used for both
"""
def changeLimitXY(limits, direction='yToX'):
r"""
The changeLimitXY function changes values marked accrodingly to one axis
to the other notation and also to and fro text notation.
Parameters
----------
limits: list
A list of limits of arbitrary lenght. The symbols used are:
* ``o``: No upper/lower limit, i.e. a clear detected value.
* ``>``: A lower limit on the X Axis or in Text notation.
* ``<`` :An upper limit on the X Axis or in Text notation
* ``^``: An upper limit on the Y Axis.
(Y Axis is default for PLots)
* ``v``: An lower limit on the Y Axis.
direction: string
The direction of the conversion of the limits.
Can be:
* ``yToX`` or ``toText``: Conversion from limits for the
y-Axis to corresponding limits on the x-Axis or for text
notation.
* ``xToY`` or ``toPlot``: Conversion from limits for the x-Axis
or for text Notation to the correspondig limits for the y-Axis.
Returns
-------
limits: list
The converted limits.
"""
if direction == 'yToX' or direction == 'toText':
for i in range(len(limits)):
if limits[i] == 'o':
limits[i] = 'o'
if limits[i] == '^':
limits[i] = '>'
if limits[i] == 'v':
limits[i] = '<'
if limits[i] == '^v':
limits[i] = '<>'
if direction == 'xToY' or direction == 'toPlot':
for i in range(len(limits)):
if limits[i] == 'o':
limits[i] = 'o'
if limits[i] == '>':
limits[i] = '^'
if limits[i] == '<':
limits[i] = 'v'
if limits[i] == '<>':
limits[i] = '^v'
return limits
def addSubLimits(first, second):
r"""
TODO: Remember the purpose of this function. Give better names and
documentation in the future.
"""
limits = []
for i in range(len(first)):
if first[i] == 'v' and second[i] == 'v':
limits += ['v']
if first[i] == '^' and second[i] == '^':
limits += ['^']
if first[i] == '^' and second[i] == 'v':
limits += ['^v']
if first[i] == 'v' and second[i] == '^':
limits += ['^v']
if first[i] == '^' and second[i] == 'o':
limits += ['^']
if first[i] == 'o' and second[i] == '^':
limits += ['^']
if first[i] == 'v' and second[i] == 'o':
limits += ['v']
if first[i] == 'o' and second[i] == 'v':
limits += ['v']
if first[i] == 'o' and second[i] == 'o':
limits += ['o']
return limits
def ratioLimits(numerator, denominator, mode='yaxis'):
r"""
Evaluation of upper/lower limits when building the ratio
of two values with limits.
Parameters
----------
numerator: list
List of arbitrary lenght of limits for the values in the nominator of
the ratio.
denominator: list
List of arbitrary lenght of limits for the values in the denominator of
the ratio. *MUST* be the same length as numerator, though.
mode: string
Maybe ``yaxis``(Default) or ``text`` to control the type of notation.
See module description.
"""
limits = []
for i in range(len(numerator)):
# In "Y-Axis" notation
if mode == 'yaxis':
if numerator[i] == '^v' or denominator[i] == '^v':
limits += ['^v']
if numerator[i] == 'v' and denominator[i] == 'v':
limits += ['^v']
if numerator[i] == '^' and denominator[i] == '^':
limits += ['^v']
if numerator[i] == '^' and denominator[i] == 'v':
limits += ['^']
if numerator[i] == 'v' and denominator[i] == '^':
limits += ['v']
if numerator[i] == '^' and denominator[i] == 'o':
limits += ['^']
if numerator[i] == 'o' and denominator[i] == '^':
limits += ['v']
if numerator[i] == 'v' and denominator[i] == 'o':
limits += ['v']
if numerator[i] == 'o' and denominator[i] == 'v':
limits += ['^']
if numerator[i] == 'o' and denominator[i] == 'o':
limits += ['o']
if mode == 'text':
if numerator[i] == '^v' or denominator[i] == '^v':
limits += ['^v']
if numerator[i] == '<' and denominator[i] == '<':
limits += ['^v']
if numerator[i] == '>' and denominator[i] == '>':
limits += ['^v']
if numerator[i] == '>' and denominator[i] == '<':
limits += ['>']
if numerator[i] == '<' and denominator[i] == '>':
limits += ['<']
if numerator[i] == '>' and denominator[i] == 'o':
limits += ['>']
if numerator[i] == 'o' and denominator[i] == '>':
limits += ['<']
if numerator[i] == '<' and denominator[i] == 'o':
limits += ['<']
if numerator[i] == 'o' and denominator[i] == '<':
limits += ['>']
if numerator[i] == 'o' and denominator[i] == 'o':
limits += ['o']
return limits
def limCheck(xAxisLimits, yAxisLimits):
r""" This function determines which upper/lower limits have to be set on
the x and y Axis. This is helpful when working with matplotlib.
TODO: Extend documentation.
It uses False, True arguments in a list of
four entries the entries correspond to: limitChecker=
[xAxisLow, xAxisUp, yAxisLow, yAxisUp] by default the values are
set to False.
"""
limitChecker=[False, False, False, False]
if yAxisLimits == 'v':
limitChecker[2] = True
if yAxisLimits == '^':
limitChecker[3] = True
if yAxisLimits == '^v':
limitChecker[2] = True
limitChecker[3] = True
if xAxisLimits == '<':
limitChecker[0] = True
if xAxisLimits == '>':
limitChecker[1] = True
if xAxisLimits == '<>':
limitChecker[0] = True
limitChecker[1] = True
return limitChecker
| bsd-3-clause |
lukebarnard1/bokeh | bokeh/charts/builder/boxplot_builder.py | 2 | 11836 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the BoxPlot class which lets you build your BoxPlot plots just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
import pandas as pd
from ..utils import make_scatter, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, FactorRange, GlyphRenderer, Range1d
from ...models.glyphs import Rect, Segment
from ...properties import Bool, String
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def BoxPlot(values, marker="circle", outliers=True, xscale="categorical", yscale="linear",
xgrid=False, ygrid=True, **kw):
""" Create a BoxPlot chart using :class:`BoxPlotBuilder <bokeh.charts.builder.boxplot_builder.BoxPlotBuilder>`
to render the geometry from values, marker and outliers arguments.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
marker (int or string, optional): if outliers=True, the marker type to use
e.g., `circle`.
outliers (bool, optional): Whether or not to plot outliers.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import BoxPlot, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames of arrays are valid inputs)
medals = dict([
('bronze', np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0])),
('silver', np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.])),
('gold', np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.]))
])
boxplot = BoxPlot(medals, marker="circle", outliers=True, title="boxplot",
xlabel="medal type", ylabel="medal count")
output_file('boxplot.html')
show(boxplot)
"""
return create_and_build(
BoxPlotBuilder, values, marker=marker, outliers=outliers,
xscale=xscale, yscale=yscale, xgrid=xgrid, ygrid=ygrid, **kw
)
class BoxPlotBuilder(Builder):
"""This is the BoxPlot class and it is in charge of plotting
scatter plots in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (rects, lines and markers)
taking the references from the source.
"""
# TODO: (bev) should be an enumeration
marker = String(help="""
The marker type to use (e.g., ``circle``) if outliers=True.
""")
outliers = Bool(help="""
Whether to display markers for any outliers.
""")
def _process_data(self):
"""Take the BoxPlot data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad, segments and markers glyphs inside the ``_yield_renderers`` method.
Args:
cat (list): categories as a list of strings.
marker (int or string, optional): if outliers=True, the marker type to use
e.g., ``circle``.
outliers (bool, optional): Whether to plot outliers.
values (dict or pd obj): the values to be plotted as bars.
"""
self._data_segment = dict()
self._attr_segment = []
self._data_rect = dict()
self._attr_rect = []
self._data_scatter = dict()
self._attr_scatter = []
self._data_legend = dict()
if isinstance(self._values, pd.DataFrame):
self._groups = self._values.columns
else:
self._groups = list(self._values.keys())
# add group to the self._data_segment dict
self._data_segment["groups"] = self._groups
# add group and witdh to the self._data_rect dict
self._data_rect["groups"] = self._groups
self._data_rect["width"] = [0.8] * len(self._groups)
# self._data_scatter does not need references to groups now,
# they will be added later.
# add group to the self._data_legend dict
self._data_legend["groups"] = self._groups
# all the list we are going to use to save calculated values
q0_points = []
q2_points = []
iqr_centers = []
iqr_lengths = []
lower_points = []
upper_points = []
upper_center_boxes = []
upper_height_boxes = []
lower_center_boxes = []
lower_height_boxes = []
out_x, out_y, out_color = ([], [], [])
colors = cycle_colors(self._groups, self.palette)
for i, (level, values) in enumerate(self._values.items()):
# Compute quantiles, center points, heights, IQR, etc.
# quantiles
q = np.percentile(values, [25, 50, 75])
q0_points.append(q[0])
q2_points.append(q[2])
# IQR related stuff...
iqr_centers.append((q[2] + q[0]) / 2)
iqr = q[2] - q[0]
iqr_lengths.append(iqr)
lower = q[0] - 1.5 * iqr
upper = q[2] + 1.5 * iqr
lower_points.append(lower)
upper_points.append(upper)
# rect center points and heights
upper_center_boxes.append((q[2] + q[1]) / 2)
upper_height_boxes.append(q[2] - q[1])
lower_center_boxes.append((q[1] + q[0]) / 2)
lower_height_boxes.append(q[1] - q[0])
# Store indices of outliers as list
outliers = np.where(
(values > upper) | (values < lower)
)[0]
for out in outliers:
o = values[out]
out_x.append(level)
out_y.append(o)
out_color.append(colors[i])
# Store
self.set_and_get(self._data_scatter, self._attr_scatter, "out_x", out_x)
self.set_and_get(self._data_scatter, self._attr_scatter, "out_y", out_y)
self.set_and_get(self._data_scatter, self._attr_scatter, "colors", out_color)
self.set_and_get(self._data_segment, self._attr_segment, "q0", q0_points)
self.set_and_get(self._data_segment, self._attr_segment, "lower", lower_points)
self.set_and_get(self._data_segment, self._attr_segment, "q2", q2_points)
self.set_and_get(self._data_segment, self._attr_segment, "upper", upper_points)
self.set_and_get(self._data_rect, self._attr_rect, "iqr_centers", iqr_centers)
self.set_and_get(self._data_rect, self._attr_rect, "iqr_lengths", iqr_lengths)
self.set_and_get(self._data_rect, self._attr_rect, "upper_center_boxes", upper_center_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "upper_height_boxes", upper_height_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "lower_center_boxes", lower_center_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "lower_height_boxes", lower_height_boxes)
self.set_and_get(self._data_rect, self._attr_rect, "colors", colors)
def _set_sources(self):
"Push the BoxPlot data into the ColumnDataSource and calculate the proper ranges."
self._source_segment = ColumnDataSource(self._data_segment)
self._source_scatter = ColumnDataSource(self._data_scatter)
self._source_rect = ColumnDataSource(self._data_rect)
self._source_legend = ColumnDataSource(self._data_legend)
self.x_range = FactorRange(factors=self._source_segment.data["groups"])
start_y = min(self._data_segment[self._attr_segment[1]])
end_y = max(self._data_segment[self._attr_segment[3]])
## Expand min/max to encompass outliers
if self.outliers:
start_out_y = min(self._data_scatter[self._attr_scatter[1]])
end_out_y = max(self._data_scatter[self._attr_scatter[1]])
# it could be no outliers in some sides...
start_y = min(start_y, start_out_y)
end_y = max(end_y, end_out_y)
self.y_range = Range1d(start=start_y - 0.1 * (end_y - start_y),
end=end_y + 0.1 * (end_y - start_y))
def _yield_renderers(self):
"""Use the several glyphs to display the Boxplot.
It uses the selected marker glyph to display the points, segments to
display the iqr and rects to display the boxes, taking as reference
points the data loaded at the ColumnDataSurce.
"""
ats = self._attr_segment
glyph = Segment(
x0="groups", y0=ats[1], x1="groups", y1=ats[0],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source_segment, glyph=glyph)
glyph = Segment(
x0="groups", y0=ats[2], x1="groups", y1=ats[3],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source_segment, glyph=glyph)
atr = self._attr_rect
glyph = Rect(
x="groups", y=atr[0], width="width", height=atr[1],
line_color="black", line_width=2, fill_color=None,
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
glyph = Rect(
x="groups", y=atr[2], width="width", height=atr[3],
line_color="black", fill_color=atr[6],
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
glyph = Rect(
x="groups", y=atr[4], width="width", height=atr[5],
line_color="black", fill_color=atr[6],
)
yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
if self.outliers:
yield make_scatter(self._source_scatter, self._attr_scatter[0],
self._attr_scatter[1], self.marker,
self._attr_scatter[2])
# Some helper methods
def set_and_get(self, data, attr, val, content):
"""Set a new attr and then get it to fill the self._data dict.
Keep track of the attributes created.
Args:
data (dict): where to store the new attribute content
attr (list): where to store the new attribute names
val (string): name of the new attribute
content (obj): content of the new attribute
"""
self._set_and_get(data, "", attr, val, content)
| bsd-3-clause |
google-research/google-research | graph_embedding/dmon/train_gcn_randomfeatures.py | 1 | 3521 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TODO(tsitsulin): add headers, tests, and improve style."""
from absl import app
from absl import flags
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import normalized_mutual_info_score
import tensorflow.compat.v2 as tf
from graph_embedding.dmon.models.multilayer_gcn import multilayer_gcn
from graph_embedding.dmon.synthetic_data.graph_util import construct_knn_graph
from graph_embedding.dmon.synthetic_data.overlapping_gaussians import overlapping_gaussians
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'n_nodes', 1000, 'Number of nodes for the synthetic graph.', lower_bound=0)
flags.DEFINE_integer(
'n_clusters',
2,
'Number of clusters for the synthetic graph.',
lower_bound=0)
flags.DEFINE_float(
'train_size', 0.2, 'Training data proportion.', lower_bound=0)
flags.DEFINE_integer(
'n_epochs', 200, 'Number of epochs to train.', lower_bound=0)
flags.DEFINE_integer(
'n_random_features', 64, 'Number of random features.', lower_bound=0)
flags.DEFINE_float(
'learning_rate', 0.01, 'Optimizer\'s learning rate.', lower_bound=0)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
print('Bröther may i have some self-lööps')
n_nodes = FLAGS.n_nodes
n_clusters = FLAGS.n_clusters
n_random_features = FLAGS.n_random_features
train_size = FLAGS.train_size
data_clean, data_dirty, labels = overlapping_gaussians(n_nodes, n_clusters)
data_random = np.random.normal(size=(n_nodes, n_random_features))
graph_clean = construct_knn_graph(data_clean).todense().A1.reshape(
n_nodes, n_nodes)
train_mask = np.zeros(n_nodes, dtype=np.bool)
train_mask[np.random.choice(
np.arange(n_nodes), int(n_nodes * train_size), replace=False)] = True
test_mask = ~train_mask
print(f'Data shape: {data_clean.shape}, graph shape: {graph_clean.shape}')
print(f'Train size: {train_mask.sum()}, test size: {test_mask.sum()}')
input_features = tf.keras.layers.Input(shape=(n_random_features,))
input_graph = tf.keras.layers.Input((n_nodes,))
model = multilayer_gcn([input_features, input_graph], [64, 32, n_clusters])
model.compile(
optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
for epoch in range(FLAGS.n_epochs):
model.fit([data_random, graph_clean],
labels,
n_nodes,
shuffle=False,
sample_weight=train_mask)
clusters = model([data_random, graph_clean]).numpy().argmax(axis=1)[test_mask]
print(
'NMI:',
normalized_mutual_info_score(
labels[test_mask], clusters, average_method='arithmetic'))
print('Accuracy:', accuracy_score(labels[test_mask], clusters))
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
kdebrab/pandas | pandas/tests/tseries/offsets/test_offsets.py | 1 | 131030 | from distutils.version import LooseVersion
from datetime import date, datetime, timedelta
import pytest
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.series import Series
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.frequencies import (get_freq_code, get_freq_str,
INVALID_FREQ_ERR_MSG)
from pandas.tseries.frequencies import _offset_map, get_offset
from pandas.core.indexes.datetimes import (
_to_m8, DatetimeIndex, _daterange_cache)
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import WeekDay, CacheableOffset
from pandas.tseries.offsets import (BDay, CDay, BQuarterEnd, BMonthEnd,
BusinessHour, WeekOfMonth, CBMonthEnd,
CustomBusinessHour,
CBMonthBegin, BYearEnd, MonthEnd,
MonthBegin, SemiMonthBegin, SemiMonthEnd,
BYearBegin, QuarterBegin, BQuarterBegin,
BMonthBegin, DateOffset, Week, YearBegin,
YearEnd, Day,
QuarterEnd, BusinessMonthEnd, FY5253,
Nano, Easter, FY5253Quarter,
LastWeekOfMonth, Tick)
from pandas.core.tools.datetimes import format, ole2datetime
import pandas.tseries.offsets as offsets
from pandas.io.pickle import read_pickle
from pandas._libs.tslibs import timezones
from pandas._libs.tslib import NaT, Timestamp
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.tseries.holiday import USFederalHolidayCalendar
from .common import assert_offset_equal, assert_onOffset
####
# Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
with pytest.raises(ValueError):
ole2datetime(60)
def test_to_m8():
valb = datetime(2007, 10, 1)
valu = _to_m8(valb)
assert isinstance(valu, np.datetime64)
# assert valu == np.datetime64(datetime(2007,10,1))
# def test_datetime64_box():
# valu = np.datetime64(datetime(2007,10,1))
# valb = _dt_box(valu)
# assert type(valb) == datetime
# assert valb == datetime(2007,10,1)
#####
# DateOffset Tests
#####
class Base(object):
_offset = None
d = Timestamp(datetime(2008, 1, 2))
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
if klass is FY5253:
klass = klass(n=value, startingMonth=1, weekday=1,
variation='last', normalize=normalize)
elif klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
elif klass is LastWeekOfMonth:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is WeekOfMonth:
klass = klass(n=value, week=1, weekday=5, normalize=normalize)
elif klass is Week:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
try:
klass = klass(value, normalize=normalize)
except Exception:
klass = klass(normalize=normalize)
return klass
def test_apply_out_of_range(self, tz_naive_fixture):
tz = tz_naive_fixture
if self._offset is None:
return
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
try:
if self._offset in (BusinessHour, CustomBusinessHour):
# Using 10000 in BusinessHour fails in tz check because of DST
# difference
offset = self._get_offset(self._offset, value=100000)
else:
offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
assert isinstance(result, datetime)
assert result.tzinfo is None
# Check tz is preserved
t = Timestamp('20080101', tz=tz)
result = t + offset
assert isinstance(result, datetime)
assert t.tzinfo == result.tzinfo
except tslib.OutOfBoundsDatetime:
raise
except (ValueError, KeyError):
# we are creating an invalid offset
# so ignore
pass
def test_offsets_compare_equal(self):
# root cause of GH#456: __ne__ was not implemented
if self._offset is None:
return
offset1 = self._offset()
offset2 = self._offset()
assert not offset1 != offset2
assert offset1 == offset2
def test_rsub(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def test_radd(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
assert self.d + self.offset2 == self.offset2 + self.d
def test_sub(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
off = self.offset2
with pytest.raises(Exception):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-2)
assert self.d - self.offset2 == self.d - (2 * off - off)
def testMult1(self):
if self._offset is None or not hasattr(self, "offset1"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset1 attr
return
assert self.d + 10 * self.offset1 == self.d + self._offset(10)
assert self.d + 5 * self.offset1 == self.d + self._offset(5)
def testMult2(self):
if self._offset is None:
return
assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
class TestCommon(Base):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
# used for .apply and .rollforward
expecteds = {'Day': Timestamp('2011-01-02 09:00:00'),
'DateOffset': Timestamp('2011-01-02 09:00:00'),
'BusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthBegin': Timestamp('2011-02-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthEnd': Timestamp('2011-01-31 09:00:00'),
'SemiMonthEnd': Timestamp('2011-01-15 09:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 09:00:00'),
'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'YearBegin': Timestamp('2012-01-01 09:00:00'),
'BYearBegin': Timestamp('2011-01-03 09:00:00'),
'YearEnd': Timestamp('2011-12-31 09:00:00'),
'BYearEnd': Timestamp('2011-12-30 09:00:00'),
'QuarterBegin': Timestamp('2011-03-01 09:00:00'),
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BusinessHour': Timestamp('2011-01-03 10:00:00'),
'CustomBusinessHour': Timestamp('2011-01-03 10:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
'FY5253': Timestamp('2011-01-25 09:00:00'),
'Week': Timestamp('2011-01-08 09:00:00'),
'Easter': Timestamp('2011-04-24 09:00:00'),
'Hour': Timestamp('2011-01-01 10:00:00'),
'Minute': Timestamp('2011-01-01 09:01:00'),
'Second': Timestamp('2011-01-01 09:00:01'),
'Milli': Timestamp('2011-01-01 09:00:00.001000'),
'Micro': Timestamp('2011-01-01 09:00:00.000001'),
'Nano': Timestamp(np_datetime64_compat(
'2011-01-01T09:00:00.000000001Z'))}
def test_immutable(self, offset_types):
# GH#21341 check that __setattr__ raises
offset = self._get_offset(offset_types)
with pytest.raises(AttributeError):
offset.normalize = True
with pytest.raises(AttributeError):
offset.n = 91
def test_return_type(self, offset_types):
offset = self._get_offset(offset_types)
# make sure that we are returning a Timestamp
result = Timestamp('20080101') + offset
assert isinstance(result, Timestamp)
# make sure that we are returning NaT
assert NaT + offset is NaT
assert offset + NaT is NaT
assert NaT - offset is NaT
assert (-offset).apply(NaT) is NaT
def test_offset_n(self, offset_types):
offset = self._get_offset(offset_types)
assert offset.n == 1
neg_offset = offset * -1
assert neg_offset.n == -1
mul_offset = offset * 3
assert mul_offset.n == 3
def test_offset_freqstr(self, offset_types):
offset = self._get_offset(offset_types)
freqstr = offset.freqstr
if freqstr not in ('<Easter>',
"<DateOffset: days=1>",
'LWOM-SAT', ):
code = get_offset(freqstr)
assert offset.rule_code == code
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
if normalize and issubclass(offset, Tick):
# normalize=True disallowed for Tick subclasses GH#21427
return
offset_s = self._get_offset(offset, normalize=normalize)
func = getattr(offset_s, funcname)
result = func(dt)
assert isinstance(result, Timestamp)
assert result == expected
result = func(Timestamp(dt))
assert isinstance(result, Timestamp)
assert result == expected
# see gh-14101
exp_warning = None
ts = Timestamp(dt) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected + Nano(5)
else:
assert result == expected
if isinstance(dt, np.datetime64):
# test tz when input is datetime or Timestamp
return
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
tz_obj = timezones.maybe_get_tz(tz)
dt_tz = conversion.localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
assert isinstance(result, Timestamp)
assert result == expected_localize
result = func(Timestamp(dt, tz=tz))
assert isinstance(result, Timestamp)
assert result == expected_localize
# see gh-14101
exp_warning = None
ts = Timestamp(dt, tz=tz) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected_localize + Nano(5)
else:
assert result == expected_localize
def test_apply(self, offset_types):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = self.expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'apply', dt, expected)
expected = Timestamp(expected.date())
self._check_offsetfunc_works(offset_types, 'apply', dt, expected,
normalize=True)
def test_rollforward(self, offset_types):
expecteds = self.expecteds.copy()
# result will not be changed if the target is on the offset
no_changes = ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin',
'Week', 'Hour', 'Minute', 'Second', 'Milli', 'Micro',
'Nano', 'DateOffset']
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00')
expecteds['CustomBusinessHour'] = Timestamp('2011-01-03 09:00:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2011-01-02 00:00:00'),
'DateOffset': Timestamp('2011-01-02 00:00:00'),
'MonthBegin': Timestamp('2011-02-01 00:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 00:00:00'),
'YearBegin': Timestamp('2012-01-01 00:00:00'),
'Week': Timestamp('2011-01-08 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected, normalize=True)
def test_rollback(self, offset_types):
expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthEnd':
Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthBegin':
Timestamp('2010-12-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'MonthEnd': Timestamp('2010-12-31 09:00:00'),
'SemiMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BYearBegin': Timestamp('2010-01-01 09:00:00'),
'YearEnd': Timestamp('2010-12-31 09:00:00'),
'BYearEnd': Timestamp('2010-12-31 09:00:00'),
'QuarterBegin': Timestamp('2010-12-01 09:00:00'),
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessHour': Timestamp('2010-12-31 17:00:00'),
'CustomBusinessHour': Timestamp('2010-12-31 17:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
'FY5253': Timestamp('2010-01-26 09:00:00'),
'Easter': Timestamp('2010-04-04 09:00:00')}
# result will not be changed if the target is on the offset
for n in ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin', 'Week',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2010-12-31 00:00:00'),
'DateOffset': Timestamp('2010-12-31 00:00:00'),
'MonthBegin': Timestamp('2010-12-01 00:00:00'),
'SemiMonthBegin': Timestamp('2010-12-15 00:00:00'),
'YearBegin': Timestamp('2010-01-01 00:00:00'),
'Week': Timestamp('2010-12-25 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected, normalize=True)
def test_onOffset(self, offset_types):
dt = self.expecteds[offset_types.__name__]
offset_s = self._get_offset(offset_types)
assert offset_s.onOffset(dt)
# when normalize=True, onOffset checks time is 00:00:00
if issubclass(offset_types, Tick):
# normalize=True disallowed for Tick subclasses GH#21427
return
offset_n = self._get_offset(offset_types, normalize=True)
assert not offset_n.onOffset(dt)
if offset_types in (BusinessHour, CustomBusinessHour):
# In default BusinessHour (9:00-17:00), normalized time
# cannot be in business hour range
return
date = datetime(dt.year, dt.month, dt.day)
assert offset_n.onOffset(date)
def test_add(self, offset_types, tz_naive_fixture):
tz = tz_naive_fixture
dt = datetime(2011, 1, 1, 9, 0)
offset_s = self._get_offset(offset_types)
expected = self.expecteds[offset_types.__name__]
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
# normalize=True, disallowed for Tick subclasses GH#21427
if issubclass(offset_types, Tick):
return
offset_s = self._get_offset(offset_types, normalize=True)
expected = Timestamp(expected.date())
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
def test_pickle_v0_15_2(self, datapath):
offsets = {'DateOffset': DateOffset(years=1),
'MonthBegin': MonthBegin(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'Week': Week(1)}
pickle_path = datapath('tseries', 'offsets', 'data',
'dateoffset_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
# with open(pickle_path, 'wb') as f: pickle.dump(offsets, f)
#
tm.assert_dict_equal(offsets, read_pickle(pickle_path))
class TestDateOffset(Base):
def setup_method(self, method):
self.d = Timestamp(datetime(2008, 1, 2))
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert ((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert ((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert ((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert ((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert (DateOffset(months=2).copy() == DateOffset(months=2))
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
assert offset1 != offset2
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset1 = self.offset
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<BusinessDay>'
assert repr(self.offset2) == '<2 * BusinessDays>'
if compat.PY37:
expected = '<BusinessDay: offset=datetime.timedelta(days=1)>'
else:
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (BDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (BDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, d, expected in tests:
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((BDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
pytest.raises(TypeError, BDay().apply, BMonthEnd())
class TestBusinessHour(Base):
_offset = BusinessHour
def setup_method(self, method):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = BusinessHour()
self.offset2 = BusinessHour(n=3)
self.offset3 = BusinessHour(n=-1)
self.offset4 = BusinessHour(n=-4)
from datetime import time as dt_time
self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
self.offset6 = BusinessHour(start='20:00', end='05:00')
self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30),
end=dt_time(6, 30))
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
BusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
BusinessHour(start='AAA')
with pytest.raises(ValueError):
BusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<BusinessHour: BH=09:00-17:00>'
assert repr(self.offset2) == '<3 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset3) == '<-1 * BusinessHour: BH=09:00-17:00>'
assert repr(self.offset4) == '<-4 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset5) == '<BusinessHour: BH=11:00-14:30>'
assert repr(self.offset6) == '<BusinessHour: BH=20:00-05:00>'
assert repr(self.offset7) == '<-2 * BusinessHours: BH=21:30-06:30>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + BusinessHour() * 3 == expected
assert self.d + BusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert offset == offset
assert BusinessHour() != BusinessHour(-1)
assert BusinessHour(start='09:00') == BusinessHour()
assert BusinessHour(start='09:00') != BusinessHour(start='09:01')
assert (BusinessHour(start='09:00', end='17:00') !=
BusinessHour(start='17:00', end='09:01'))
def test_hash(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert hash(offset) == hash(offset)
def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
def test_sub(self):
# we have to override test_sub here becasue self.offset2 is not
# defined as self._offset(2)
off = self.offset2
with pytest.raises(Exception):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-3)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
assert self.offset3.rollback(self.d) == self.d
assert self.offset4.rollback(self.d) == self.d
assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(d) == d
assert self.offset7.rollback(d) == d
assert self._offset(5).rollback(self.d) == self.d
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
assert self.offset3.rollforward(self.d) == self.d
assert self.offset4.rollforward(self.d) == self.d
assert (self.offset5.rollforward(self.d) ==
datetime(2014, 7, 1, 11, 0))
assert (self.offset6.rollforward(self.d) ==
datetime(2014, 7, 1, 20, 0))
assert (self.offset7.rollforward(self.d) ==
datetime(2014, 7, 1, 21, 30))
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
assert self.offset6.rollforward(d) == d
assert self.offset7.rollforward(d) == d
assert self._offset(5).rollforward(self.d) == self.d
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((BusinessHour(normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((BusinessHour(-1, normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((BusinessHour(1, normalize=True, start='17:00',
end='04:00'), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('case', normalize_cases)
def test_normalize(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
on_offset_cases = []
on_offset_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='10:00', end='15:00'), {
datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False}))
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
opening_time_cases = []
# opening time should be affected by sign of n, not by n's value and
# end
opening_time_cases.append(([BusinessHour(), BusinessHour(n=2),
BusinessHour(n=4), BusinessHour(end='10:00'),
BusinessHour(n=2, end='4:00'),
BusinessHour(n=4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
# if timestamp is on opening time, next opening time is
# as it is
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9),
datetime(2014, 7, 2, 9)),
# 2014-07-05 is saturday
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9),
datetime(2014, 7, 7, 9))}))
opening_time_cases.append(([BusinessHour(start='11:15'),
BusinessHour(n=2, start='11:15'),
BusinessHour(n=3, start='11:15'),
BusinessHour(start='11:15', end='10:00'),
BusinessHour(n=2, start='11:15', end='4:00'),
BusinessHour(n=3, start='11:15',
end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15),
datetime(2014, 6, 30, 11, 15)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15),
datetime(2014, 7, 3, 11, 15)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15))}))
opening_time_cases.append(([BusinessHour(-1), BusinessHour(n=-2),
BusinessHour(n=-4),
BusinessHour(n=-1, end='10:00'),
BusinessHour(n=-2, end='4:00'),
BusinessHour(n=-4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 3, 9)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 8, 9))}))
opening_time_cases.append(([BusinessHour(start='17:00', end='05:00'),
BusinessHour(n=3, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17),
datetime(2014, 6, 30, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 3, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17),
datetime(2014, 7, 7, 17)), }))
opening_time_cases.append(([BusinessHour(-1, start='17:00', end='05:00'),
BusinessHour(n=-2, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 8, 17))}))
@pytest.mark.parametrize('case', opening_time_cases)
def test_opening_time(self, case):
_offsets, cases = case
for offset in _offsets:
for dt, (exp_next, exp_prev) in compat.iteritems(cases):
assert offset._next_opening_time(dt) == exp_next
assert offset._prev_opening_time(dt) == exp_prev
apply_cases = []
apply_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((BusinessHour(4), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
apply_cases.append((BusinessHour(-1), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)}))
apply_cases.append((BusinessHour(-4), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)}))
apply_cases.append((BusinessHour(start='13:00', end='16:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
apply_cases.append((BusinessHour(n=2, start='13:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='13:00', end='16:00'), {
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
apply_cases.append((BusinessHour(n=-3, start='10:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)}))
apply_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
apply_large_n_cases = []
# A week later
apply_large_n_cases.append((BusinessHour(40), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)}))
# 3 days and 1 hour before
apply_large_n_cases.append((BusinessHour(-25), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)}))
# 5 days and 3 hours later
apply_large_n_cases.append((BusinessHour(28, start='21:00', end='02:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)}))
@pytest.mark.parametrize('case', apply_large_n_cases)
def test_apply_large_n(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((BusinessHour(),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 16:00') + Nano(5),
Timestamp('2014-07-04 16:00') + Nano(5): Timestamp(
'2014-07-07 09:00') + Nano(5),
Timestamp('2014-07-04 16:00') - Nano(5): Timestamp(
'2014-07-04 17:00') - Nano(5)}))
tests.append((BusinessHour(-1),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 14:00') + Nano(5),
Timestamp('2014-07-04 10:00') + Nano(5): Timestamp(
'2014-07-04 09:00') + Nano(5),
Timestamp('2014-07-04 10:00') - Nano(5): Timestamp(
'2014-07-03 17:00') - Nano(5), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_datetimeindex(self):
idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:00', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:00', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00',
'2014-07-08 10:00'],
freq='BH')
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
idx1 = DatetimeIndex(start='2014-07-04 15:45', end='2014-07-08 10:45',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:45', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:45', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45',
'2014-07-07 09:45',
'2014-07-07 10:45', '2014-07-07 11:45',
'2014-07-07 12:45',
'2014-07-07 13:45', '2014-07-07 14:45',
'2014-07-07 15:45',
'2014-07-07 16:45', '2014-07-08 09:45',
'2014-07-08 10:45'],
freq='BH')
expected = idx1
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
class TestCustomBusinessHour(Base):
_offset = CustomBusinessHour
holidays = ['2014-06-27', datetime(2014, 6, 30),
np.datetime64('2014-07-02')]
def setup_method(self, method):
# 2014 Calendar to check custom holidays
# Sun Mon Tue Wed Thu Fri Sat
# 6/22 23 24 25 26 27 28
# 29 30 7/1 2 3 4 5
# 6 7 8 9 10 11 12
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = CustomBusinessHour(weekmask='Tue Wed Thu Fri')
self.offset2 = CustomBusinessHour(holidays=self.holidays)
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
CustomBusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
CustomBusinessHour(start='AAA')
with pytest.raises(ValueError):
CustomBusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<CustomBusinessHour: CBH=09:00-17:00>'
assert repr(self.offset2) == '<CustomBusinessHour: CBH=09:00-17:00>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + CustomBusinessHour() * 3 == expected
assert self.d + CustomBusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2]:
assert offset == offset
assert CustomBusinessHour() != CustomBusinessHour(-1)
assert (CustomBusinessHour(start='09:00') ==
CustomBusinessHour())
assert (CustomBusinessHour(start='09:00') !=
CustomBusinessHour(start='09:01'))
assert (CustomBusinessHour(start='09:00', end='17:00') !=
CustomBusinessHour(start='17:00', end='09:01'))
assert (CustomBusinessHour(weekmask='Tue Wed Thu Fri') !=
CustomBusinessHour(weekmask='Mon Tue Wed Thu Fri'))
assert (CustomBusinessHour(holidays=['2014-06-27']) !=
CustomBusinessHour(holidays=['2014-06-28']))
def test_sub(self):
# override the Base.test_sub implementation because self.offset2 is
# defined differently in this class than the test expects
pass
def test_hash(self):
assert hash(self.offset1) == hash(self.offset1)
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
d = datetime(2014, 7, 1, 0)
# 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
# 2014/6/30 and 2014/6/27 are holidays
assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((
CustomBusinessHour(normalize=True, holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((
CustomBusinessHour(-1, normalize=True, holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((
CustomBusinessHour(1, normalize=True,
start='17:00', end='04:00',
holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('norm_cases', normalize_cases)
def test_normalize(self, norm_cases):
offset, cases = norm_cases
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
def test_onOffset(self):
tests = []
tests.append((CustomBusinessHour(start='10:00', end='15:00',
holidays=self.holidays),
{datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
apply_cases = []
apply_cases.append((
CustomBusinessHour(holidays=holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((
CustomBusinessHour(4, holidays=holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
@pytest.mark.parametrize('apply_case', apply_cases)
def test_apply(self, apply_case):
offset, cases = apply_case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
nano_cases = []
nano_cases.append(
(CustomBusinessHour(holidays=holidays),
{Timestamp('2014-07-01 15:00') + Nano(5):
Timestamp('2014-07-01 16:00') + Nano(5),
Timestamp('2014-07-01 16:00') + Nano(5):
Timestamp('2014-07-03 09:00') + Nano(5),
Timestamp('2014-07-01 16:00') - Nano(5):
Timestamp('2014-07-01 17:00') - Nano(5)}))
nano_cases.append(
(CustomBusinessHour(-1, holidays=holidays),
{Timestamp('2014-07-01 15:00') + Nano(5):
Timestamp('2014-07-01 14:00') + Nano(5),
Timestamp('2014-07-01 10:00') + Nano(5):
Timestamp('2014-07-01 09:00') + Nano(5),
Timestamp('2014-07-01 10:00') - Nano(5):
Timestamp('2014-06-26 17:00') - Nano(5)}))
@pytest.mark.parametrize('nano_case', nano_cases)
def test_apply_nanoseconds(self, nano_case):
offset, cases = nano_case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestCustomBusinessDay(Base):
_offset = CDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.nd = np_datetime64_compat('2008-01-01 00:00:00Z')
self.offset = CDay()
self.offset1 = self.offset
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
assert repr(self.offset2) == '<2 * CustomBusinessDays>'
if compat.PY37:
expected = '<BusinessDay: offset=datetime.timedelta(days=1)>'
else:
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
assert self.offset2(self.nd) == datetime(2008, 1, 3)
def testRollback1(self):
assert CDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (CDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert CDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (CDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((CDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
assert result == datetime(2012, 11, 6)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
def test_apply_corner(self):
pytest.raises(Exception, CDay().apply, BMonthEnd())
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
assert rs == xp
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
assert xp_saudi == dt + bday_saudi
assert xp_uae == dt + bday_uae
assert xp_egypt == dt + bday_egypt
xp2 = datetime(2013, 5, 5)
assert xp2 == dt + 2 * bday_saudi
assert xp2 == dt + 2 * bday_uae
assert xp2 == dt + 2 * bday_egypt
def test_weekmask_and_holidays(self):
weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
assert xp_egypt == dt + 2 * bday_egypt
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset * 2)
def test_pickle_compat_0_14_1(self, datapath):
hdays = [datetime(2013, 1, 1) for ele in range(4)]
pth = datapath('tseries', 'offsets', 'data', 'cday-0.14.1.pickle')
cday0_14_1 = read_pickle(pth)
cday = CDay(holidays=hdays)
assert cday == cday0_14_1
class CustomBusinessMonthBase(object):
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask='Mon Wed Fri')
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
apply_cases.append((2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31)}))
apply_cases.append((-CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31)}))
apply_cases.append((-2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31)}))
apply_cases.append((CBMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-01-31', datetime(2012, 2, 28),
np.datetime64('2012-02-29')]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=freq).tolist()[0] == datetime(2012, 1, 31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
apply_cases = []
apply_cases.append((CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3)}))
apply_cases.append((2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1)}))
apply_cases.append((-CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1)}))
apply_cases.append((-2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1)}))
apply_cases.append((CBMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-02-01', datetime(2012, 2, 2),
np.datetime64('2012-03-01')]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=cbmb).tolist()[0] == datetime(2012, 1, 3))
class TestWeek(Base):
_offset = Week
d = Timestamp(datetime(2008, 1, 2))
offset1 = _offset()
offset2 = _offset(2)
def test_repr(self):
assert repr(Week(weekday=0)) == "<Week: weekday=0>"
assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
def test_corner(self):
with pytest.raises(ValueError):
Week(weekday=7)
with pytest.raises(ValueError, match="Day must be"):
Week(weekday=-1)
def test_isAnchored(self):
assert Week(weekday=0).isAnchored()
assert not Week().isAnchored()
assert not Week(2, weekday=2).isAnchored()
assert not Week(2).isAnchored()
offset_cases = []
# not business week
offset_cases.append((Week(), {
datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# Mon
offset_cases.append((Week(weekday=0), {
datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(0, weekday=0), {
datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(-2, weekday=1), {
datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('weekday', range(7))
def test_onOffset(self, weekday):
offset = Week(weekday=weekday)
for day in range(1, 8):
date = datetime(2008, 1, day)
if day % 7 == weekday:
expected = True
else:
expected = False
assert_onOffset(offset, date, expected)
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
offset1 = _offset()
offset2 = _offset(2)
def test_constructor(self):
with pytest.raises(ValueError, match="^Week"):
WeekOfMonth(n=1, week=4, weekday=0)
with pytest.raises(ValueError, match="^Week"):
WeekOfMonth(n=1, week=-1, weekday=0)
with pytest.raises(ValueError, match="^Day"):
WeekOfMonth(n=1, week=0, weekday=-1)
with pytest.raises(ValueError, match="^Day"):
WeekOfMonth(n=1, week=0, weekday=-7)
def test_repr(self):
assert (repr(WeekOfMonth(weekday=1, week=2)) ==
"<WeekOfMonth: week=2, weekday=1>")
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
# see for loop for structure
test_cases = [
(-2, 2, 1, date1, datetime(2010, 11, 16)),
(-2, 2, 1, date2, datetime(2010, 11, 16)),
(-2, 2, 1, date3, datetime(2010, 11, 16)),
(-2, 2, 1, date4, datetime(2010, 12, 21)),
(-1, 2, 1, date1, datetime(2010, 12, 21)),
(-1, 2, 1, date2, datetime(2010, 12, 21)),
(-1, 2, 1, date3, datetime(2010, 12, 21)),
(-1, 2, 1, date4, datetime(2011, 1, 18)),
(0, 0, 1, date1, datetime(2011, 1, 4)),
(0, 0, 1, date2, datetime(2011, 2, 1)),
(0, 0, 1, date3, datetime(2011, 2, 1)),
(0, 0, 1, date4, datetime(2011, 2, 1)),
(0, 1, 1, date1, datetime(2011, 1, 11)),
(0, 1, 1, date2, datetime(2011, 1, 11)),
(0, 1, 1, date3, datetime(2011, 2, 8)),
(0, 1, 1, date4, datetime(2011, 2, 8)),
(0, 0, 1, date1, datetime(2011, 1, 4)),
(0, 1, 1, date2, datetime(2011, 1, 11)),
(0, 2, 1, date3, datetime(2011, 1, 18)),
(0, 3, 1, date4, datetime(2011, 1, 25)),
(1, 0, 0, date1, datetime(2011, 2, 7)),
(1, 0, 0, date2, datetime(2011, 2, 7)),
(1, 0, 0, date3, datetime(2011, 2, 7)),
(1, 0, 0, date4, datetime(2011, 2, 7)),
(1, 0, 1, date1, datetime(2011, 2, 1)),
(1, 0, 1, date2, datetime(2011, 2, 1)),
(1, 0, 1, date3, datetime(2011, 2, 1)),
(1, 0, 1, date4, datetime(2011, 2, 1)),
(1, 0, 2, date1, datetime(2011, 1, 5)),
(1, 0, 2, date2, datetime(2011, 2, 2)),
(1, 0, 2, date3, datetime(2011, 2, 2)),
(1, 0, 2, date4, datetime(2011, 2, 2)),
(1, 2, 1, date1, datetime(2011, 1, 18)),
(1, 2, 1, date2, datetime(2011, 1, 18)),
(1, 2, 1, date3, datetime(2011, 2, 15)),
(1, 2, 1, date4, datetime(2011, 2, 15)),
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
(2, 2, 1, date4, datetime(2011, 3, 15))]
for n, week, weekday, dt, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
assert_offset_equal(offset, dt, expected)
# try subtracting
result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
assert result == datetime(2011, 1, 12)
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
assert result == datetime(2011, 2, 2)
on_offset_cases = [(0, 0, datetime(2011, 2, 7), True),
(0, 0, datetime(2011, 2, 6), False),
(0, 0, datetime(2011, 2, 14), False),
(1, 0, datetime(2011, 2, 14), True),
(0, 1, datetime(2011, 2, 1), True),
(0, 1, datetime(2011, 2, 8), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
week, weekday, dt, expected = case
offset = WeekOfMonth(week=week, weekday=weekday)
assert offset.onOffset(dt) == expected
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
offset1 = _offset()
offset2 = _offset(2)
def test_constructor(self):
with pytest.raises(ValueError, match="^N cannot be 0"):
LastWeekOfMonth(n=0, weekday=1)
with pytest.raises(ValueError, match="^Day"):
LastWeekOfMonth(n=1, weekday=-1)
with pytest.raises(ValueError, match="^Day"):
LastWeekOfMonth(n=1, weekday=7)
def test_offset(self):
# Saturday
last_sat = datetime(2013, 8, 31)
next_sat = datetime(2013, 9, 28)
offset_sat = LastWeekOfMonth(n=1, weekday=5)
one_day_before = (last_sat + timedelta(days=-1))
assert one_day_before + offset_sat == last_sat
one_day_after = (last_sat + timedelta(days=+1))
assert one_day_after + offset_sat == next_sat
# Test On that day
assert last_sat + offset_sat == next_sat
# Thursday
offset_thur = LastWeekOfMonth(n=1, weekday=3)
last_thurs = datetime(2013, 1, 31)
next_thurs = datetime(2013, 2, 28)
one_day_before = last_thurs + timedelta(days=-1)
assert one_day_before + offset_thur == last_thurs
one_day_after = last_thurs + timedelta(days=+1)
assert one_day_after + offset_thur == next_thurs
# Test on that day
assert last_thurs + offset_thur == next_thurs
three_before = last_thurs + timedelta(days=-3)
assert three_before + offset_thur == last_thurs
two_after = last_thurs + timedelta(days=+2)
assert two_after + offset_thur == next_thurs
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
on_offset_cases = [
(WeekDay.SUN, datetime(2013, 1, 27), True),
(WeekDay.SAT, datetime(2013, 3, 30), True),
(WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
(WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
(WeekDay.MON, datetime(2013, 2, 25), True),
(WeekDay.SAT, datetime(2013, 11, 30), True),
(WeekDay.SAT, datetime(2006, 8, 26), True),
(WeekDay.SAT, datetime(2007, 8, 25), True),
(WeekDay.SAT, datetime(2008, 8, 30), True),
(WeekDay.SAT, datetime(2009, 8, 29), True),
(WeekDay.SAT, datetime(2010, 8, 28), True),
(WeekDay.SAT, datetime(2011, 8, 27), True),
(WeekDay.SAT, datetime(2019, 8, 31), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
weekday, dt, expected = case
offset = LastWeekOfMonth(weekday=weekday)
assert offset.onOffset(dt) == expected
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthEnd().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SM')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15)}))
offset_cases.append((SemiMonthEnd(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16)}))
offset_cases.append((SemiMonthEnd(2), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30)}))
offset_cases.append((SemiMonthEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize('klass,assert_func',
[(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_vectorized_offset_addition(self, klass, assert_func):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthBegin().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SMS')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(2), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1)}))
offset_cases.append((SemiMonthBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
offset_cases.append((SemiMonthBegin(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4)}))
offset_cases.append((SemiMonthBegin(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize('klass,assert_func',
[(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_vectorized_offset_addition(self, klass, assert_func):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'),
Timestamp('2000-03-01', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
def test_Easter():
assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))
assert_offset_equal(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))
assert_offset_equal(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))
assert_offset_equal(-Easter(2),
datetime(2011, 1, 1),
datetime(2009, 4, 12))
assert_offset_equal(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))
assert_offset_equal(-Easter(2),
datetime(2010, 4, 4),
datetime(2008, 3, 23))
class TestOffsetNames(object):
def test_get_offset_name(self):
assert BDay().freqstr == 'B'
assert BDay(2).freqstr == '2B'
assert BMonthEnd().freqstr == 'BM'
assert Week(weekday=0).freqstr == 'W-MON'
assert Week(weekday=1).freqstr == 'W-TUE'
assert Week(weekday=2).freqstr == 'W-WED'
assert Week(weekday=3).freqstr == 'W-THU'
assert Week(weekday=4).freqstr == 'W-FRI'
assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"
def test_get_offset():
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('gibberish')
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('QS-JAN-B')
pairs = [
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4))]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
def test_get_offset_legacy():
pairs = [('w@Sat', Week(weekday=5))]
for name, expected in pairs:
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset(name)
class TestOffsetAliases(object):
def setup_method(self, method):
_offset_map.clear()
def test_alias_equality(self):
for k, v in compat.iteritems(_offset_map):
if v is None:
continue
assert k == v.copy()
def test_rule_code(self):
lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
assert k == get_offset(k).rule_code
# should be cached - this is kind of an internals test...
assert k in _offset_map
assert k == (get_offset(k) * 3).rule_code
suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
base = 'W'
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
base_lst = ['A', 'AS', 'BA', 'BAS', 'Q', 'QS', 'BQ', 'BQS']
for base in base_lst:
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
code, stride = get_freq_code('3' + k)
assert isinstance(code, int)
assert stride == 3
assert k == get_freq_str(code)
def test_dateoffset_misc():
oset = offsets.DateOffset(months=2, days=4)
# it works
oset.freqstr
assert (not offsets.DateOffset(months=2) == 2)
def test_freq_offsets():
off = BDay(1, offset=timedelta(0, 1800))
assert (off.freqstr == 'B+30Min')
off = BDay(1, offset=timedelta(0, -1800))
assert (off.freqstr == 'B-30Min')
def get_all_subclasses(cls):
ret = set()
this_subclasses = cls.__subclasses__()
ret = ret | set(this_subclasses)
for this_subclass in this_subclasses:
ret | get_all_subclasses(this_subclass)
return ret
class TestCaching(object):
# as of GH 6479 (in 0.14.0), offset caching is turned off
# as of v0.12.0 only BusinessMonth/Quarter were actually caching
def setup_method(self, method):
_daterange_cache.clear()
_offset_map.clear()
def run_X_index_creation(self, cls):
inst1 = cls()
if not inst1.isAnchored():
assert not inst1._should_cache(), cls
return
assert inst1._should_cache(), cls
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=inst1, normalize=True)
assert cls() in _daterange_cache, cls
def test_should_cache_month_end(self):
assert not MonthEnd()._should_cache()
def test_should_cache_bmonth_end(self):
assert not BusinessMonthEnd()._should_cache()
def test_should_cache_week_month(self):
assert not WeekOfMonth(weekday=1, week=2)._should_cache()
def test_all_cacheableoffsets(self):
for subclass in get_all_subclasses(CacheableOffset):
if subclass.__name__[0] == "_" \
or subclass in TestCaching.no_simple_ctr:
continue
self.run_X_index_creation(subclass)
def test_month_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=MonthEnd(), normalize=True)
assert not MonthEnd() in _daterange_cache
def test_bmonth_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=BusinessMonthEnd(), normalize=True)
assert not BusinessMonthEnd() in _daterange_cache
def test_week_of_month_index_creation(self):
inst1 = WeekOfMonth(weekday=1, week=2)
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=inst1, normalize=True)
inst2 = WeekOfMonth(weekday=1, week=2)
assert inst2 not in _daterange_cache
class TestReprNames(object):
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS']
names = [prefix + '-' + month
for prefix in month_prefixes
for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']]
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
names += ['W-' + day for day in days]
names += ['WOM-' + week + day
for week in ('1', '2', '3', '4') for day in days]
_offset_map.clear()
for name in names:
offset = get_offset(name)
assert offset.freqstr == name
def get_utc_offset_hours(ts):
# take a Timestamp and compute total hours of utc offset
o = ts.utcoffset()
return (o.days * 24 * 3600 + o.seconds) / 3600.0
class TestDST(object):
"""
test DateOffset additions over Daylight Savings Time
"""
# one microsecond before the DST transition
ts_pre_fallback = "2013-11-03 01:59:59.999999"
ts_pre_springfwd = "2013-03-10 01:59:59.999999"
# test both basic names and dateutil timezones
timezone_utc_offsets = {
'US/Eastern': dict(utc_offset_daylight=-4,
utc_offset_standard=-5, ),
'dateutil/US/Pacific': dict(utc_offset_daylight=-7,
utc_offset_standard=-8, )
}
valid_date_offsets_singular = [
'weekday', 'day', 'hour', 'minute', 'second', 'microsecond'
]
valid_date_offsets_plural = [
'weeks', 'days',
'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'
]
def _test_all_offsets(self, n, **kwds):
valid_offsets = self.valid_date_offsets_plural if n > 1 \
else self.valid_date_offsets_singular
for name in valid_offsets:
self._test_offset(offset_name=name, offset_n=n, **kwds)
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
t = tstart + offset
if expected_utc_offset is not None:
assert get_utc_offset_hours(t) == expected_utc_offset
if offset_name == 'weeks':
# dates should match
assert t.date() == timedelta(days=7 * offset.kwds[
'weeks']) + tstart.date()
# expect the same day of week, hour of day, minute, second, ...
assert (t.dayofweek == tstart.dayofweek and
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name == 'days':
# dates should match
assert timedelta(offset.kwds['days']) + tstart.date() == t.date()
# expect the same hour of day, minute, second, ...
assert (t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name in self.valid_date_offsets_singular:
# expect the singular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name
if offset_name != 'weekday' else
'dayofweek')
assert datepart_offset == offset.kwds[offset_name]
else:
# the offset should be the same as if it was done in UTC
assert (t == (tstart.tz_convert('UTC') + offset)
.tz_convert('US/Pacific'))
def _make_timestamp(self, string, hrs_offset, tz):
if hrs_offset >= 0:
offset_string = '{hrs:02d}00'.format(hrs=hrs_offset)
else:
offset_string = '-{hrs:02d}00'.format(hrs=-1 * hrs_offset)
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
# test moving from daylight savings to standard time
import dateutil
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# buggy ambiguous behavior in 2.6.0
# GH 14621
# https://github.com/dateutil/dateutil/issues/321
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
hrs_pre, tz),
expected_utc_offset=hrs_post)
elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed, but skip the test
continue
def test_springforward_plural(self):
# test moving from standard to daylight savings
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_springfwd,
hrs_pre, tz),
expected_utc_offset=hrs_post)
def test_fallback_singular(self):
# in the case of singular offsets, we don't necessarily know which utc
# offset the new Timestamp will wind up in (the tz for 1 month may be
# different from 1 second) so we don't specify an expected_utc_offset
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_fallback, hrs_pre, tz), expected_utc_offset=None)
def test_springforward_singular(self):
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_springfwd, hrs_pre, tz), expected_utc_offset=None)
offset_classes = {MonthBegin: ['11/2/2012', '12/1/2012'],
MonthEnd: ['11/2/2012', '11/30/2012'],
BMonthBegin: ['11/2/2012', '12/3/2012'],
BMonthEnd: ['11/2/2012', '11/30/2012'],
CBMonthBegin: ['11/2/2012', '12/3/2012'],
CBMonthEnd: ['11/2/2012', '11/30/2012'],
SemiMonthBegin: ['11/2/2012', '11/15/2012'],
SemiMonthEnd: ['11/2/2012', '11/15/2012'],
Week: ['11/2/2012', '11/9/2012'],
YearBegin: ['11/2/2012', '1/1/2013'],
YearEnd: ['11/2/2012', '12/31/2012'],
BYearBegin: ['11/2/2012', '1/1/2013'],
BYearEnd: ['11/2/2012', '12/31/2012'],
QuarterBegin: ['11/2/2012', '12/1/2012'],
QuarterEnd: ['11/2/2012', '12/31/2012'],
BQuarterBegin: ['11/2/2012', '12/3/2012'],
BQuarterEnd: ['11/2/2012', '12/31/2012'],
Day: ['11/4/2012', '11/4/2012 23:00']}.items()
@pytest.mark.parametrize('tup', offset_classes)
def test_all_offset_classes(self, tup):
offset, test_values = tup
first = Timestamp(test_values[0], tz='US/Eastern') + offset()
second = Timestamp(test_values[1], tz='US/Eastern')
assert first == second
# ---------------------------------------------------------------------
def test_get_offset_day_error():
# subclass of _BaseOffset must override _day_opt attribute, or we should
# get a NotImplementedError
with pytest.raises(NotImplementedError):
DateOffset()._get_offset_day(datetime.now())
def test_valid_default_arguments(offset_types):
# GH#19142 check that the calling the constructors without passing
# any keyword arguments produce valid offsets
cls = offset_types
cls()
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_month_attributes(kwd, month_classes):
# GH#18226
cls = month_classes
# check that we cannot create e.g. MonthEnd(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_tick_attributes(kwd, tick_classes):
# GH#18226
cls = tick_classes
# check that we cannot create e.g. Hour(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
def test_validate_n_error():
with pytest.raises(TypeError):
DateOffset(n='Doh!')
with pytest.raises(TypeError):
MonthBegin(n=timedelta(1))
with pytest.raises(TypeError):
BDay(n=np.array([1, 2], dtype=np.int64))
def test_require_integers(offset_types):
cls = offset_types
with pytest.raises(ValueError):
cls(n=1.5)
def test_tick_normalize_raises(tick_classes):
# check that trying to create a Tick object with normalize=True raises
# GH#21427
cls = tick_classes
with pytest.raises(ValueError):
cls(n=3, normalize=True)
def test_weeks_onoffset():
# GH#18510 Week with weekday = None, normalize = False should always
# be onOffset
offset = Week(n=2, weekday=None)
ts = Timestamp('1862-01-13 09:03:34.873477378+0210', tz='Africa/Lusaka')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
# negative n
offset = Week(n=2, weekday=None)
ts = Timestamp('1856-10-24 16:18:36.556360110-0717', tz='Pacific/Easter')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_weekofmonth_onoffset():
# GH#18864
# Make sure that nanoseconds don't trip up onOffset (and with it apply)
offset = WeekOfMonth(n=2, week=2, weekday=0)
ts = Timestamp('1916-05-15 01:14:49.583410462+0422', tz='Asia/Qyzylorda')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
# negative n
offset = WeekOfMonth(n=-3, week=1, weekday=0)
ts = Timestamp('1980-12-08 03:38:52.878321185+0500', tz='Asia/Oral')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_last_week_of_month_on_offset():
# GH#19036, GH#18977 _adjust_dst was incorrect for LastWeekOfMonth
offset = LastWeekOfMonth(n=4, weekday=6)
ts = Timestamp('1917-05-27 20:55:27.084284178+0200',
tz='Europe/Warsaw')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
# negative n
offset = LastWeekOfMonth(n=-4, weekday=5)
ts = Timestamp('2005-08-27 05:01:42.799392561-0500',
tz='America/Rainy_River')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
| bsd-3-clause |
cseed/hail | hail/python/test/hail/stats/test_linear_mixed_model.py | 3 | 17882 | import unittest
import numpy as np
import hail as hl
import hail.utils as utils
from hail.stats import LinearMixedModel
from hail.linalg import BlockMatrix
from ..helpers import *
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
@staticmethod
def _filter_and_standardize_cols(a):
a = a.copy()
col_means = np.mean(a, axis=0, keepdims=True)
a -= col_means
col_lengths = np.linalg.norm(a, axis=0, keepdims=True)
col_filter = col_lengths > 0
return np.copy(a[:, np.squeeze(col_filter)] / col_lengths[col_filter])
@skip_unless_spark_backend()
def test_linear_mixed_model_fastlmm(self):
# FastLMM Test data is from all.bed, all.bim, all.fam, cov.txt, pheno_10_causals.txt:
# https://github.com/MicrosoftGenomics/FaST-LMM/tree/master/tests/datasets/synth
#
# Data is filtered to chromosome 1,3 and samples 0-124,375-499 (2000 variants and 250 samples)
#
# Results are computed with single_snp (with LOCO) as in:
# https://github.com/MicrosoftGenomics/FaST-LMM/blob/master/doc/ipynb/FaST-LMM.ipynb
n, m = 250, 1000 # per chromosome
x_table = hl.import_table(resource('fastlmmCov.txt'), no_header=True, impute=True).key_by('f1')
y_table = hl.import_table(resource('fastlmmPheno.txt'), no_header=True, impute=True, delimiter=' ').key_by('f1')
mt = hl.import_plink(bed=resource('fastlmmTest.bed'),
bim=resource('fastlmmTest.bim'),
fam=resource('fastlmmTest.fam'),
reference_genome=None)
mt = mt.annotate_cols(x=x_table[mt.col_key].f2)
mt = mt.annotate_cols(y=y_table[mt.col_key].f2).cache()
x = np.array([np.ones(n), mt.key_cols_by()['x'].collect()]).T
y = np.array(mt.key_cols_by()['y'].collect())
mt_chr1 = mt.filter_rows(mt.locus.contig == '1')
mt_chr3 = mt.filter_rows(mt.locus.contig == '3')
# testing chrom 1 for h2, betas, p-values
h2_fastlmm = 0.14276125
beta_fastlmm = [0.012202061, 0.037718282, -0.033572693, 0.29171541, -0.045644170]
# FastLMM p-values do not agree to high precision because FastLMM regresses
# out x from each SNP first and does an F(1, dof)-test on (beta / se)^2
# (t-test), whereas Hail does likelihood ratio test.
# We verify below that Hail's p-values remain fixed going forward.
# fastlmm = [0.84650294, 0.57865098, 0.59050998, 1.6649473e-06, 0.46892059]
pval_hail = [0.84543084, 0.57596760, 0.58788517, 1.4057279e-06, 0.46578204]
gamma_fastlmm = h2_fastlmm / (1 - h2_fastlmm)
g = BlockMatrix.from_entry_expr(mt_chr1.GT.n_alt_alleles()).to_numpy().T
g_std = self._filter_and_standardize_cols(g)
# full rank
k = (g_std @ g_std.T) * (n / m)
s, u = np.linalg.eigh(k)
p = u.T
model = LinearMixedModel(p @ y, p @ x, s)
model.fit()
assert np.isclose(model.h_sq, h2_fastlmm)
h2_std_error = 0.13770773 # hard coded having checked against plot
assert np.isclose(model.h_sq_standard_error, h2_std_error, 1e-03)
h_sq_norm_lkhd = model.h_sq_normalized_lkhd()[1:-1]
argmax = int(100 * h2_fastlmm)
assert argmax <= np.argmax(h_sq_norm_lkhd) + 1 <= argmax + 1
assert np.isclose(np.sum(h_sq_norm_lkhd), 1.0)
mt3_chr3_5var = mt_chr3.filter_rows(mt_chr3.locus.position < 2005) # first 5
a = BlockMatrix.from_entry_expr(mt3_chr3_5var.GT.n_alt_alleles()).to_numpy().T
# FastLMM standardizes each variant to have mean 0 and variance 1.
a = self._filter_and_standardize_cols(a) * np.sqrt(n)
pa = p @ a
model.fit(log_gamma=np.log(gamma_fastlmm))
res = model.fit_alternatives_numpy(pa, return_pandas=True)
assert np.allclose(res['beta'], beta_fastlmm)
assert np.allclose(res['p_value'], pval_hail)
pa_t_path = utils.new_temp_file(extension='bm')
BlockMatrix.from_numpy(pa.T).write(pa_t_path, force_row_major=True)
res = model.fit_alternatives(pa_t_path).to_pandas()
assert np.allclose(res['beta'], beta_fastlmm)
assert np.allclose(res['p_value'], pval_hail)
# low rank
ld = g_std.T @ g_std
sl, v = np.linalg.eigh(ld)
n_eigenvectors = int(np.sum(sl > 1e-10))
assert n_eigenvectors < n
sl = sl[-n_eigenvectors:]
v = v[:, -n_eigenvectors:]
s = sl * (n / m)
p = (g_std @ (v / np.sqrt(sl))).T
model = LinearMixedModel(p @ y, p @ x, s, y, x)
model.fit()
assert np.isclose(model.h_sq, h2_fastlmm)
assert np.isclose(model.h_sq_standard_error, h2_std_error)
model.fit(log_gamma=np.log(gamma_fastlmm))
pa = p @ a
res = model.fit_alternatives_numpy(pa, a, return_pandas=True)
assert np.allclose(res['beta'], beta_fastlmm)
assert np.allclose(res['p_value'], pval_hail)
a_t_path = utils.new_temp_file(extension='bm')
BlockMatrix.from_numpy(a.T).write(a_t_path, force_row_major=True)
pa_t_path = utils.new_temp_file(extension='bm')
BlockMatrix.from_numpy(pa.T).write(pa_t_path, force_row_major=True)
res = model.fit_alternatives(pa_t_path, a_t_path).to_pandas()
assert np.allclose(res['beta'], beta_fastlmm)
assert np.allclose(res['p_value'], pval_hail)
# testing chrom 3 for h2
h2_fastlmm = 0.36733240
g = BlockMatrix.from_entry_expr(mt_chr3.GT.n_alt_alleles()).to_numpy().T
g_std = self._filter_and_standardize_cols(g)
# full rank
k = (g_std @ g_std.T) * (n / m)
s, u = np.linalg.eigh(k)
p = u.T
model = LinearMixedModel(p @ y, p @ x, s)
model.fit()
assert np.isclose(model.h_sq, h2_fastlmm)
h2_std_error = 0.17409641 # hard coded having checked against plot
assert np.isclose(model.h_sq_standard_error, h2_std_error)
h_sq_norm_lkhd = model.h_sq_normalized_lkhd()[1:-1]
argmax = int(100 * h2_fastlmm)
assert argmax <= np.argmax(h_sq_norm_lkhd) + 1 <= argmax + 1
assert np.isclose(np.sum(h_sq_norm_lkhd), 1.0)
# low rank
l = g_std.T @ g_std
sl, v = np.linalg.eigh(l)
n_eigenvectors = int(np.sum(sl > 1e-10))
assert n_eigenvectors < n
sl = sl[-n_eigenvectors:]
v = v[:, -n_eigenvectors:]
s = sl * (n / m)
p = (g_std @ (v / np.sqrt(sl))).T
model = LinearMixedModel(p @ y, p @ x, s, y, x)
model.fit()
assert np.isclose(model.h_sq, h2_fastlmm)
assert np.isclose(model.h_sq_standard_error, h2_std_error)
@skip_unless_spark_backend()
def test_linear_mixed_model_math(self):
gamma = 2.0 # testing at fixed value of gamma
n, f, m = 4, 2, 3
y = np.array([0.0, 1.0, 8.0, 9.0])
x = np.array([[1.0, 0.0],
[1.0, 2.0],
[1.0, 1.0],
[1.0, 4.0]])
z = np.array([[0.0, 0.0, 1.0],
[0.0, 1.0, 2.0],
[1.0, 2.0, 4.0],
[2.0, 4.0, 8.0]])
k = z @ z.T
v = k + np.eye(4) / gamma
v_inv = np.linalg.inv(v)
beta = np.linalg.solve(x.T @ v_inv @ x, x.T @ v_inv @ y)
residual = y - x @ beta
sigma_sq = 1 / (n - f) * (residual @ v_inv @ residual)
sv = sigma_sq * v
neg_log_lkhd = 0.5 * (np.linalg.slogdet(sv)[1] + np.linalg.slogdet(x.T @ np.linalg.inv(sv) @ x)[1]) # plus C
x_star = np.array([1.0, 0.0, 1.0, 0.0])
a = x_star.reshape(n, 1)
x1 = np.hstack([a, x])
beta1 = np.linalg.solve(x1.T @ v_inv @ x1, x1.T @ v_inv @ y)
residual1 = y - x1 @ beta1
chi_sq = n * np.log((residual @ v_inv @ residual) / (residual1 @ v_inv @ residual1))
# test from_kinship, full-rank fit
model, p = LinearMixedModel.from_kinship(y, x, k)
s0, u0 = np.linalg.eigh(k)
s0 = np.flip(s0, axis=0)
p0 = np.fliplr(u0).T
self.assertTrue(model._same(LinearMixedModel(p0 @ y, p0 @ x, s0)))
model.fit(np.log(gamma))
self.assertTrue(np.allclose(model.beta, beta))
self.assertAlmostEqual(model.sigma_sq, sigma_sq)
self.assertAlmostEqual(model.compute_neg_log_reml(np.log(gamma)), neg_log_lkhd)
# test full-rank alternative
pa = p @ a
stats = model.fit_alternatives_numpy(pa).collect()[0]
self.assertAlmostEqual(stats.beta, beta1[0])
self.assertAlmostEqual(stats.chi_sq, chi_sq)
pa_t_path = utils.new_temp_file()
BlockMatrix.from_numpy(pa.T).write(pa_t_path, force_row_major=True)
stats = model.fit_alternatives(pa_t_path).collect()[0]
self.assertAlmostEqual(stats.beta, beta1[0])
self.assertAlmostEqual(stats.chi_sq, chi_sq)
# test from_random_effects, low-rank fit
s0, p0 = s0[:m], p0[:m, :]
# test BlockMatrix path
temp_path = utils.new_temp_file()
model, _ = LinearMixedModel.from_random_effects(y, x,
BlockMatrix.from_numpy(z),
p_path=temp_path,
complexity_bound=0)
lmm = LinearMixedModel(p0 @ y, p0 @ x, s0, y, x, p_path=temp_path)
self.assertTrue(model._same(lmm))
# test ndarray path
model, p = LinearMixedModel.from_random_effects(y, x, z)
lmm = LinearMixedModel(p0 @ y, p0 @ x, s0, y, x)
self.assertTrue(model._same(lmm))
model.fit(np.log(gamma))
self.assertTrue(np.allclose(model.beta, beta))
self.assertAlmostEqual(model.sigma_sq, sigma_sq)
self.assertAlmostEqual(model.compute_neg_log_reml(np.log(gamma)), neg_log_lkhd)
# test low_rank alternative
pa = p @ a
stats = model.fit_alternatives_numpy(pa, a).collect()[0]
self.assertAlmostEqual(stats.beta, beta1[0])
self.assertAlmostEqual(stats.chi_sq, chi_sq)
a_t_path = utils.new_temp_file()
BlockMatrix.from_numpy(a.T).write(a_t_path, force_row_major=True)
pa_t_path = utils.new_temp_file()
BlockMatrix.from_numpy(pa.T).write(pa_t_path, force_row_major=True)
stats = model.fit_alternatives(pa_t_path, a_t_path).collect()[0]
self.assertAlmostEqual(stats.beta, beta1[0])
self.assertAlmostEqual(stats.chi_sq, chi_sq)
@skip_unless_spark_backend()
def test_linear_mixed_model_function(self):
n, f, m = 4, 2, 3
y = np.array([0.0, 1.0, 8.0, 9.0])
x = np.array([[1.0, 0.0],
[1.0, 2.0],
[1.0, 1.0],
[1.0, 4.0]])
z = np.array([[0.0, 0.0, 1.0],
[0.0, 1.0, 2.0],
[1.0, 2.0, 0.0],
[2.0, 0.0, 1.0]])
p_path = utils.new_temp_file()
def make_call(gt):
if gt == 0.0:
return hl.Call([0, 0])
if gt == 1.0:
return hl.Call([0, 1])
if gt == 2.0:
return hl.Call([1, 1])
data = [{'v': j, 's': i, 'y': y[i], 'x1': x[i, 1], 'zt': make_call(z[i, j])}
for i in range(n) for j in range(m)]
ht = hl.Table.parallelize(data, hl.dtype('struct{v: int32, s: int32, y: float64, x1: float64, zt: tcall}'))
mt = ht.to_matrix_table(row_key=['v'], col_key=['s'], col_fields=['x1', 'y'])
colsort = np.argsort(mt.key_cols_by().s.collect()).tolist()
mt = mt.choose_cols(colsort)
rrm = hl.realized_relationship_matrix(mt.zt).to_numpy()
# kinship path agrees with from_kinship
model, p = hl.linear_mixed_model(mt.y, [1, mt.x1], k=rrm, p_path=p_path, overwrite=True)
model0, p0 = LinearMixedModel.from_kinship(y, x, rrm, p_path, overwrite=True)
assert model0._same(model)
assert np.allclose(p0, p)
# random effects path with standardize=True agrees with low-rank rrm
s0, u0 = np.linalg.eigh(rrm)
s0 = np.flip(s0, axis=0)[:m]
p0 = np.fliplr(u0).T[:m, :]
model, p = hl.linear_mixed_model(mt.y, [1, mt.x1], z_t=mt.zt.n_alt_alleles(), p_path=p_path, overwrite=True)
model0 = LinearMixedModel(p0 @ y, p0 @ x, s0, y, x, p_path=p_path)
assert model0._same(model)
# random effects path with standardize=False agrees with from_random_effects
model0, p0 = LinearMixedModel.from_random_effects(y, x, z, p_path, overwrite=True)
model, p = hl.linear_mixed_model(mt.y, [1, mt.x1], z_t=mt.zt.n_alt_alleles(), p_path=p_path, overwrite=True, standardize=False)
assert model0._same(model)
assert np.allclose(p0, p.to_numpy())
@skip_unless_spark_backend()
def test_linear_mixed_regression_full_rank(self):
x_table = hl.import_table(resource('fastlmmCov.txt'), no_header=True, impute=True).key_by('f1')
y_table = hl.import_table(resource('fastlmmPheno.txt'), no_header=True, impute=True, delimiter=' ').key_by('f1')
mt = hl.import_plink(bed=resource('fastlmmTest.bed'),
bim=resource('fastlmmTest.bim'),
fam=resource('fastlmmTest.fam'),
reference_genome=None)
mt = mt.annotate_cols(x=x_table[mt.col_key].f2)
mt = mt.annotate_cols(y=y_table[mt.col_key].f2).cache()
p_path = utils.new_temp_file()
h2_fastlmm = 0.142761
h2_places = 6
beta_fastlmm = [0.012202061, 0.037718282, -0.033572693, 0.29171541, -0.045644170]
pval_hail = [0.84543084, 0.57596760, 0.58788517, 1.4057279e-06, 0.46578204]
mt_chr1 = mt.filter_rows(mt.locus.contig == '1')
model, _ = hl.linear_mixed_model(y=mt_chr1.y, x=[1, mt_chr1.x], z_t=mt_chr1.GT.n_alt_alleles(), p_path=p_path)
model.fit()
self.assertAlmostEqual(model.h_sq, h2_fastlmm, places=h2_places)
mt_chr3 = mt.filter_rows((mt.locus.contig == '3') & (mt.locus.position < 2005))
mt_chr3 = mt_chr3.annotate_rows(stats=hl.agg.stats(mt_chr3.GT.n_alt_alleles()))
ht = hl.linear_mixed_regression_rows((mt_chr3.GT.n_alt_alleles() - mt_chr3.stats.mean) / mt_chr3.stats.stdev,
model)
assert np.allclose(ht.beta.collect(), beta_fastlmm)
assert np.allclose(ht.p_value.collect(), pval_hail)
@skip_unless_spark_backend()
def test_linear_mixed_regression_low_rank(self):
x_table = hl.import_table(resource('fastlmmCov.txt'), no_header=True, impute=True).key_by('f1')
y_table = hl.import_table(resource('fastlmmPheno.txt'), no_header=True, impute=True, delimiter=' ').key_by('f1')
mt = hl.import_plink(bed=resource('fastlmmTest.bed'),
bim=resource('fastlmmTest.bim'),
fam=resource('fastlmmTest.fam'),
reference_genome=None)
mt = mt.annotate_cols(x=x_table[mt.col_key].f2)
mt = mt.annotate_cols(y=y_table[mt.col_key].f2).cache()
p_path = utils.new_temp_file()
h2_hail = 0.10001626
beta_hail = [0.0073201542, 0.039969148, -0.036727875, 0.29852363, -0.049212500]
pval_hail = [0.90685162, 0.54839177, 0.55001054, 9.85247263e-07, 0.42796507]
mt_chr1 = mt.filter_rows((mt.locus.contig == '1') & (mt.locus.position < 200))
model, _ = hl.linear_mixed_model(y=mt_chr1.y, x=[1, mt_chr1.x], z_t=mt_chr1.GT.n_alt_alleles(), p_path=p_path)
model.fit()
self.assertTrue(model.low_rank)
self.assertAlmostEqual(model.h_sq, h2_hail)
mt_chr3 = mt.filter_rows((mt.locus.contig == '3') & (mt.locus.position < 2005))
mt_chr3 = mt_chr3.annotate_rows(stats=hl.agg.stats(mt_chr3.GT.n_alt_alleles()))
ht = hl.linear_mixed_regression_rows((mt_chr3.GT.n_alt_alleles() - mt_chr3.stats.mean) / mt_chr3.stats.stdev,
model)
assert np.allclose(ht.beta.collect(), beta_hail)
assert np.allclose(ht.p_value.collect(), pval_hail)
@skip_unless_spark_backend()
def test_linear_mixed_regression_pass_through(self):
x_table = hl.import_table(resource('fastlmmCov.txt'), no_header=True, impute=True).key_by('f1')
y_table = hl.import_table(resource('fastlmmPheno.txt'), no_header=True, impute=True, delimiter=' ').key_by('f1')
mt = hl.import_plink(bed=resource('fastlmmTest.bed'),
bim=resource('fastlmmTest.bim'),
fam=resource('fastlmmTest.fam'),
reference_genome=None)
mt = mt.annotate_cols(x=x_table[mt.col_key].f2)
mt = mt.annotate_cols(y=y_table[mt.col_key].f2).cache()
p_path = utils.new_temp_file()
mt_chr1 = mt.filter_rows((mt.locus.contig == '1') & (mt.locus.position < 200))
model, _ = hl.linear_mixed_model(y=mt_chr1.y, x=[1, mt_chr1.x], z_t=mt_chr1.GT.n_alt_alleles(), p_path=p_path)
model.fit(log_gamma=0)
mt_chr3 = mt.filter_rows((mt.locus.contig == '3') & (mt.locus.position < 2005))
mt_chr3 = mt_chr3.annotate_rows(stats=hl.agg.stats(mt_chr3.GT.n_alt_alleles()), foo=hl.struct(bar=hl.rand_norm(0, 1)))
ht = hl.linear_mixed_regression_rows((mt_chr3.GT.n_alt_alleles() - mt_chr3.stats.mean) / mt_chr3.stats.stdev,
model, pass_through=['stats', mt_chr3.foo.bar, mt_chr3.cm_position])
assert mt_chr3.aggregate_rows(hl.agg.all(mt_chr3.foo.bar == ht[mt_chr3.row_key].bar))
| mit |
datapythonista/pandas | pandas/tests/indexes/period/methods/test_astype.py | 2 | 6704 | import numpy as np
import pytest
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
Int64Index,
NaT,
Period,
PeriodIndex,
Timedelta,
UInt64Index,
period_range,
)
import pandas._testing as tm
class TestPeriodIndexAsType:
@pytest.mark.parametrize("dtype", [float, "timedelta64", "timedelta64[ns]"])
def test_astype_raises(self, dtype):
# GH#13149, GH#13209
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq="D")
msg = "Cannot cast PeriodArray to dtype"
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_conversion(self):
# GH#13149, GH#13209
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq="D", name="idx")
result = idx.astype(object)
expected = Index(
[Period("2016-05-16", freq="D")] + [Period(NaT, freq="D")] * 3,
dtype="object",
name="idx",
)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = idx.astype(np.int64)
expected = Int64Index(
[16937] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index([str(x) for x in idx], name="idx")
tm.assert_index_equal(result, expected)
idx = period_range("1990", "2009", freq="A", name="idx")
with tm.assert_produces_warning(FutureWarning):
result = idx.astype("i8")
tm.assert_index_equal(result, Index(idx.asi8, name="idx"))
tm.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_uint(self):
arr = period_range("2000", periods=2, name="idx")
expected = UInt64Index(np.array([10957, 10958], dtype="uint64"), name="idx")
with tm.assert_produces_warning(FutureWarning):
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_object(self):
idx = PeriodIndex([], freq="M")
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = PeriodIndex(["2011-01", NaT], freq="M")
exp = np.array([Period("2011-01", freq="M"), NaT], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([Period("2011-01-01", freq="D"), NaT], dtype=object)
idx = PeriodIndex(["2011-01-01", NaT], freq="D")
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
# TODO: de-duplicate this version (from test_ops) with the one above
# (from test_period)
def test_astype_object2(self):
idx = period_range(start="2013-01-01", periods=4, freq="M", name="idx")
expected_list = [
Period("2013-01-31", freq="M"),
Period("2013-02-28", freq="M"),
Period("2013-03-31", freq="M"),
Period("2013-04-30", freq="M"),
]
expected = Index(expected_list, dtype=object, name="idx")
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = PeriodIndex(
["2013-01-01", "2013-01-02", "NaT", "2013-01-04"], freq="D", name="idx"
)
expected_list = [
Period("2013-01-01", freq="D"),
Period("2013-01-02", freq="D"),
Period("NaT", freq="D"),
Period("2013-01-04", freq="D"),
]
expected = Index(expected_list, dtype=object, name="idx")
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
assert result[i] == expected[i]
assert result[2] is NaT
assert result.name == expected.name
result_list = idx.tolist()
for i in [0, 1, 3]:
assert result_list[i] == expected_list[i]
assert result_list[2] is NaT
def test_astype_category(self):
obj = period_range("2000", periods=2, name="idx")
result = obj.astype("category")
expected = CategoricalIndex(
[Period("2000-01-01", freq="D"), Period("2000-01-02", freq="D")], name="idx"
)
tm.assert_index_equal(result, expected)
result = obj._data.astype("category")
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = period_range("2000", periods=2, name="idx")
result = obj.astype(bool)
expected = Index(np.array([True, True]), name="idx")
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
def test_period_astype_to_timestamp(self):
pi = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M")
exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns]", how="start")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"])
exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns]", how="end")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern")
res = pi.astype("datetime64[ns, US/Eastern]")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"], tz="US/Eastern")
exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns, US/Eastern]", how="end")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
| bsd-3-clause |
boland1992/seissuite_iran | seissuite/azimuth/NZ_density.py | 8 | 7985 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 08:44:50 2015
@author: boland
"""
#------------------------------------------------------------------------------
# MODULES
#------------------------------------------------------------------------------
import os
import fiona
import pickle
import pyproj
import datetime
import itertools
import shapefile
import numpy as np
import datetime as dt
import pointshape as ps
from shapely import geometry
import multiprocessing as mp
import matplotlib.pyplot as plt
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
#------------------------------------------------------------------------------
# VARIABLES
#------------------------------------------------------------------------------
verbose = False
#Enter path to boundary shape file.
shape_boundary = False
#shape_path = "/home/boland/Dropbox/University/UniMelb\
#/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
dataless = True
# Enter path to dataless file
#dataless_path = 'ALL_AUSTRALIA.870093.dataless'
#dataless_path = 'UOM.dataless'
# Enter number new stations desired.
N = 3
# Enter km spacing between path density points.
km_points = 20.0
# Reference elipsoid to calculate distance.
wgs84 = pyproj.Geod(ellps='WGS84')
# Enter number of bins for 2D Histogram density calculation.
nbins = 220
# Enter estimated average shear wave velocity. 3kms-1 is the default!
velocity = 3.0
# Define your ambient noise period range OR individual period in seconds.
global period_range
period_range = [1,40]
dataless_path = 'east-timor/timor.dataless'
dataless_path = '/storage/ANT/spectral_density/USARRAY/full_USARRAY.dataless'
coords = locs_from_dataless(dataless_path)
t0 = dt.datetime.now()
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
print type(UNIQUE_SHAPE)
# Generate InPoly class
INPOLY = InPoly(shape_path)
# Create matplotlib Path object from imported shapefile
#outer_shape = UNIQUE_SHAPE.buffer(1.,resolution=1)
#inner_shape = UNIQUE_SHAPE.buffer(-8,resolution=1)
#outer_poly = INPOLY.poly_from_shape(shape=outer_shape)
#inner_poly = INPOLY.poly_from_shape(shape=inner_shape)
#many_points = INPOLY.rand_poly(poly=outer_poly, N=1e4)
# Scale smaller shape to fit inside larger shape.
#SMALL_SHAPE = scale(UNIQUE_SHAPE, xfact=0.3, yfact=0.3)
#points_in_small_shape = INPOLY.rand_shape(shape=SMALL_SHAPE, IN=False)
# Generate matplotlib Path object for the small scalled polygon
#small_poly = INPOLY.node_poly(SHAPE.external_coords(shape=SMALL_SHAPE))
# Remove points that are outside the buffered_poly
#outer_poly_points = INPOLY.points_in(many_points, poly=outer_poly)
# Remove points that are inside the small_poly
#inner_poly_points = np.asarray(INPOLY.points_in(outer_poly_points,
# poly=inner_poly,
# IN=False))
#cluster_points = np.asarray(kmeans(inner_poly_points, 130)[0])
#plt.figure()
#plt.scatter(inner_poly_points[:,0], inner_poly_points[:,1], c='b')
#plt.scatter(cluster_points[:,0], cluster_points[:,1], c='orange', s=35)
#plt.show()
#-----------------------------------------------------------------------------
# INITIALISE CLASS STATES
#-----------------------------------------------------------------------------
GEODESIC = Geodesic()
COORDS = Coordinates()
INPOLY = InPoly(shape_path)
POLY_NODES = INPOLY.poly_nodes()
#-----------------------------------------------------------------------------
# GENERATE SECOND SET OF VARIABLES AND STATES
#-----------------------------------------------------------------------------
ideal_path = 'ideal_coordinates.pickle'
#if no paths have been done before, start afresh!
#if dataless:
# coords = locs_from_dataless(dataless_path)
# original_coords = coords
#elif os.path.exists(ideal_path):
# f = open(name=ideal_path, mode='rb')
# coords = pickle.load(f)
# f.close()
# decluster the points to desired specifications.
coords = COORDS.decluster(inputs=coords, degree_dist=0.5)
lonmin, lonmax = np.floor(min(coords[:,0])), np.ceil(max(coords[:,0]))
latmin, latmax = np.floor(min(coords[:,1])), np.ceil(max(coords[:,1]))
print lonmin,lonmax,latmin,latmax
plt.figure()
plt.scatter(coords[:,0], coords[:,1])
plt.show()
kappa = [np.vstack([[coord1[0],coord1[1],coord2[0],coord2[1]]\
for coord2 in coords]) for coord1 in coords]
def spread_paths(coord_list):
return GEODESIC.fast_paths(coord_list)
t0 = datetime.datetime.now()
pool = mp.Pool()
paths = pool.map(spread_paths, kappa)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print t1-t0
counter, counter2 = 0, 0
#cd Desktop/Link\ to\ SIMULATIONS/Network_Tracks/smarter_model/
grad_ideal, grad_check1, grad_check2, H_avg1, H_avg2 = 0, 0, 0, 0, 0
SHAPE = (1,1)
perc_high = 0.01
low_counter = 0
random_counter = 0
#new_coord = 0
infinite_counter = 0
find_it = []
check_coord = None
use_old_path = False
searches_per_point = 3
factor = 0.05
cluster = False
N_cluster_points = False
while infinite_counter <= 1:
t0 = datetime.datetime.now()
#----------------------------------------------------------------------
# Generate N new point coordinates
#----------------------------------------------------------------------
if cluster:
new_coords = N_cluster_points
else:
new_coords = ps.points_in_shape(shape_path, N)
coords = np.append(coords, new_coords, axis=0)
coord_set = [np.vstack([[coord1[0],coord1[1],coord2[0],coord2[1]]\
for coord2 in coords]) for coord1 in coords]
t0 = datetime.datetime.now()
pool = mp.Pool()
paths = pool.map(spread_paths, coord_set)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print "time to generate new paths", t1-t0
# Append new set of paths now that old set has been deleted.
#create a flattened numpy array of size 2xN from the paths created!
paths1 = GEODESIC.combine_paths(paths)
paths = list(paths)
paths1 = GEODESIC.remove_zeros(paths1)
DENSITY = Density(paths=paths1)
H, xedges, yedges = DENSITY.hist2d(paths=paths1)
grad = DENSITY.hgrad(H=H)
H_avg1 = np.average(H)
grad_check1 = np.std(grad)
H_masked = DENSITY.transform_h(H=H)
grad = DENSITY.transform_grad(grad=grad)
search = np.where(H<0.1*np.average(H))
Hmaxx, Hmaxy = search[1], search[0]
Hmaxx = (lonmax-lonmin)/(nbins) * Hmaxx + lonmin
Hmaxy = (latmax-latmin)/(nbins) * Hmaxy + latmin
# Make sure all low density coordinates ARE within shapefile!
low_density_coords = ps.paths_in_shape(np.column_stack((Hmaxx, Hmaxy)))
#N_cluster_points = kmeans(low_density_coords, N)[0]
density_coords = DENSITY.select_points()
# make sure that your density coords are within the boundary shape
density_coords = INPOLY.points_in(density_coords)
#cluster = True
if counter == 0:
grad_ideal = 1e6
avg_ideal = 0
if grad_check1 < grad_ideal and avg_ideal < H_avg1:
with open(u'ideal_coordinates.pickle', 'wb') as f:
print "\nExporting new ideal coordinates."
pickle.dump(coords, f, protocol=2)
DENSITY.plot_field()#nodes=POLY_NODES)#SHAPE=UNIQUE_SHAPE)
grad_ideal = grad_check1
avg_ideal = H_avg1
coords = COORDS.del_N(N=N, inputs=coords)
paths = COORDS.del_N(N=N, inputs=paths)
paths=list(paths)
counter+=1
t1 = datetime.datetime.now()
print "That loop took: ", t1-t0 | gpl-3.0 |
vortex-ape/scikit-learn | examples/svm/plot_svm_nonlinear.py | 62 | 1119 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
ycaihua/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 33 | 7918 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
"""Compare gaussian KDE results to scipy.stats.gaussian_kde"""
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
iamaris/pystock | revemp.py | 1 | 1586 | import numpy as np
import matplotlib.pyplot as plt
N = 17
data = np.random.random((N, 4))
#labels = ["ZNGA","GLUU","EA","ATVI","TTWO","COOL","FB","GOOG","YHOO","IGT","KNM","GA","ROVI","GAME","CYOU","PWRD","KONG"]
#NEmp = np.array([2034.,547.,9000.,6790.,2530.,62.,6820.,49830.,12200.,5000.,5540.,1500.,1220.,3130.,6110.,4800.,1120.])/1000.
#Earn = np.array([168.02,44.58,1123.00,1110,195.21,3.24,2502.00,15420.,1132.73,512.8,604.16,92.02,142.45,158.736,180.75,142.5712,48.95])
labels = ["KING","ZNGA","GLUU","EA","ATVI","TTWO","COOL","IGT","KNM","GA","ROVI","GAME","CYOU","PWRD"]
NEmp = np.array([665,2034.,547.,9000.,6790.,2530.,62.,5000.,5540.,1500.,1220.,3130.,6110.,4800.])
Earn = np.array([592.05, 168.02,44.58,1123.00,1110,195.21,3.24,512.8,604.16,92.02,142.45,158.736,180.75,142.5712])
ratio = Earn/NEmp
ratioS = (Earn/NEmp)*500
#plt.subplots_adjust(bottom = 0.1)
plt.plot([0,10000],[0,800],"--")
plt.scatter(
# NEmp, Earn, marker = 'o', c = data[:, 2], s = ratio*10000,
NEmp, Earn, marker = 'o', c = Earn,s = ratioS,
cmap = plt.get_cmap('Spectral'))
for label, x, y in zip(labels, NEmp, Earn):
plt.annotate(
label,
xy = (x, y), xytext = (10,4),
textcoords = 'offset points', ha = 'right', va = 'bottom')
#bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
#arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
#plt.scatter(NEmp,Earn)
plt.xlim(xmax=10000)
plt.xlim(xmin=-500)
plt.ylim(ymin=0)
plt.xlabel('Number of Employees')
plt.ylabel('Q1 2014 Revenue (Million Dollar)')
plt.show()
| apache-2.0 |
mshakya/PyPiReT | piret/checks/fasta.py | 1 | 2909 | #! /usr/bin/env python
"""Check fasta."""
import Bio
import re
import pandas as pd
import sys
class CheckFasta():
"""Check different instances of fasta."""
def __init__(self):
"""Initialize."""
# self.design_file = design_file
def confirm_fasta(self, fasta_file):
"""Check if the given file is a fasta."""
with open(fasta_file, 'r') as f_in:
# read the first line
fasta_header = f_in.readline()
# check if there is a >
if fasta_header[:1] != ">":
raise TypeError
elif len(fasta_header.split(">")[1]) == 0:
raise TypeError
try:
second_line = next(f_in)
for x in list(set(second_line)):
if x in ['A', 'T', 'G', 'C', 'N']:
pass
else:
# TODO: check this for all sequences
raise TypeError
except StopIteration:
raise TypeError
return True
def duplicate(self, fasta_file):
"""Check if a fasta file has duplicate sequences (based on name)."""
seqs = Bio.SeqIO.parse(fasta_file)
seq_header = []
for seq in seqs:
seq_header.append(seq.description)
if len(seq_header) != len(set(seq_header)):
raise ValueError
def remove_special_chars(self, fasta_file, out_fasta):
"""Remove special characters from sequence name."""
seqs = Bio.SeqIO.parse(fasta_file)
for seq in seqs:
seq_id = re.sub('[|]', '_', seq.id)
seq.id = seq_id
Bio.SeqIO.write(seq, out_fasta, "fasta")
def gff_fasta(fasta_file, gff_file):
"""Check if gff file and fasta file are compatible."""
seqs = Bio.SeqIO.parse(fasta_file)
df = pd.read_csv(gff_file, sep="\t", comment='#', header=None)
seq_header = []
for seq in seqs:
fa_id = set(seq_header.append(seq.id))
gff_id = set(df[0].tolist())
if fa_id == gff_id:
pass
else:
sys.exit('GFF file and Reference fasta do not have same ids!')
def match_gff_fasta(fasta_file, gff_file, out_fasta):
"""Rename the sequence baesd on the GFF file."""
seqs = Bio.SeqIO.parse(fasta_file)
df = pd.read_csv(gff_file, sep="\t", comment='#', header=None)
acc_ids = set(df[0].tolist())
for seq in seqs:
for acc in acc_ids:
if acc in seq.id:
seq.description = acc + " " + seq.description
Bio.SeqIO.write(seq, out_fasta, "fasta")
#TODO: implement these checks
def check_quotations(gff_file):
"""Check if there are quotations in gff file."""
with open(gff_file) as g:
content = g.readlines()
for l in content:
if '"' in l:
raise TypeError("There is a quotation in above line, remove them first")
| bsd-3-clause |
hande-qmc/hande | tools/pyhande/tests/test_extract.py | 1 | 32230 | """Test extract.py."""
import unittest
import copy
import warnings
import pandas as pd
import numpy as np
import pyhande.extract as extract
class ExtendedTestSetUp():
"""Helper for the following classes, initialises common data."""
def __init__(self):
self.filename1 = "tests/hande_files/ueg.out"
self.filename2 = "tests/hande_files/ueg2.out"
self.columns = [
'iterations', 'Shift', r'\sum H_0j N_j', 'N_0', '# H psips',
'# states', '# spawn_events', 'R_spawn', 'time'
]
self.exp_data1 = pd.DataFrame([
[10, 0.00000000e+00, -3.78518016e-01, 2.70000000e+00,
1.42000000e+02, 79, 50, 4.87900000e-01, 4.00000000e-04],
[20, -1.10017479e-01, -8.20941670e-01, 3.00000000e+00,
1.28200000e+03, 782, 463, 7.62000000e-01, 8.0000000e-04]
], columns=self.columns)
self.exp_data2 = pd.DataFrame([
[10, 0., -0.08311986, 2., 8.11834868, 7, 3, 0.2765, 0.],
[20, 0., -0.5709596, 2., 73.67254134, 66, 33, 0.3288, 0.],
[30, -1.40493959e-01, -9.03119184e-01, 2.00000000e+00,
1.71159164e+02, 162, 104, 3.77400000e-01, 0.0]
], columns=self.columns)
self.exp_metadata = {
'system': {
'nbasis': 38, 'nel': 14, 'nvirt': 24, 'Ms': 0, 'nalpha': 7,
'nbeta': 7, 'nvirt_alpha': 12, 'nvirt_beta': 12, 'nsym': 19,
'sym0': 1, 'sym_max': 19, 'nsym_tot': 19, 'sym0_tot': 1,
'sym_max_tot': 19, 'symmetry': 1, 'tot_sym': False,
'aufbau_sym': True, 'max_number_excitations': 14,
'ueg': {
'r_s': 1.0, 'ecutoff': 1.0, 'k_fermi': 1.91915829,
'E_fermi': 1.84158428, 'ktwist': [0.0, 0.0, 0.0],
'L': [3.88512994, 3.88512994, 3.88512994]
}
},
'qmc': {
'rng_seed': 1472, 'real_amplitudes': False,
'real_amplitude_force_32': False, 'spawn_cutoff': 0.01,
'excit_gen': 'renorm', 'pattempt_update': False,
'pattempt_zero_accum_data': False, 'pattempt_single': 0.0,
'pattempt_double': 1.0, 'pattempt_parallel': 0.0, 'tau': 0.1,
'tau_search': False, 'vary_shift_from': 0.0,
'vary_shift_from_proje': False, 'initial_shift': 0.0,
'shift_damping': 0.05, 'walker_length': 50000,
'spawned_walker_length': 5000, 'D0_population': 2.0,
'target_particles': 60.0, 'target_reference': False,
'initiator_approx': False, 'initiator_pop': 3.0, 'ncycles': 10,
'nreport': 3, 'power_pitzer_min_weight': 0.01,
'quasi_newton': False, 'quasi_newton_threshold': 1e-05,
'quasi_newton_value': 1.0, 'use_mpi_barriers': False
},
'fciqmc': {
'select_ref_det_every_nreports': 2147483647,
'init_spin_inv_D0': False, 'ref_det_factor': 1.5,
'non_blocking_comm': False, 'doing_load_balancing': False,
'trial_function': 'single_basis', 'guiding_function': 'none',
'quadrature_initiator': True, 'replica_tricks': False
},
'semi_stoch': {
'start_iter': 1, 'shift_iter': -1, 'space_type': 'none',
'target_size': 0, 'write_determ_space': False,
'projection_mode': 'separate', 'read_id': 2147483647,
'write_id': 2147483647, 'ci_space': {'ex_level': -1}
},
'restart': {
'read_restart': False, 'read_id': 2147483647,
'write_restart': False, 'write_id': 2147483647,
'write_freq': 2147483647, 'write_restart_shift': False,
'write_shift_id': 2147483647, 'restart_rng': True
},
'blocking': {
'blocking_on_the_fly': False, 'start_save_frequency': -1,
'start_point_number': -1, 'filename': 'BLOCKING',
'start_point': -1, 'error_limit': 0.0,
'blocks_used': 2147483647, 'min_blocks_used': 10,
'auto_shift_damping': False, 'shift_damping_precision': 2.0,
'force_shift_damping_opt': False
},
'load balancing': {
'nslots': 1, 'pop': 1000, 'percent': 0.05, 'max_attempts': 2,
'write_info': False
},
'reference': {
'det': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
'det_ms': 0, 'det_symmetry': 1, 'H00': 13.60355734,
'F0': 15.69278015, 'hilbert_space_det': [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
], 'hilbert_space_det_ms': 0,
'hilbert_space_det_symmetry': 1, 'ex_level': 14
},
'logging_in': {
'calc': 0, 'calc_file': 'CALC', 'spawn': 0,
'spawn_file': 'SPAWN', 'death': 0, 'death_file': 'DEATH',
'start_iter': 0, 'end_iter': 9223372036854775807
},
'logging': {
'write_highlevel_values': False, 'calc_unit': 2147483647,
'write_successful_spawn': False, 'write_failed_spawn': False,
'spawn_unit': 2147483647, 'write_successful_death': False,
'write_failed_death': False, 'death_unit': 2147483647
},
'calc_type': 'FCIQMC', 'nblooms': 6253.0, 'max_bloom': 9.0,
'mean_bloom': 3.93, 'input': [
'', '-- Create output with:',
'-- $[HANDE DIR]/bin/hande.x ueg.lua > ueg.out 2> ueg.err',
'-- Note that these settings are just for testing...',
'sys = ueg {', 'dim = 3,', 'nel = 14,', 'ms = 0,',
'cutoff = 1,', '}', '', 'fciqmc {', 'sys = sys,',
'qmc = {', 'tau = 0.1,', 'rng_seed = 1472,', 'init_pop = 2,',
'mc_cycles = 10,', 'nreports = 3,', 'target_population = 60,',
'state_size = 50000,', 'spawned_state_size = 5000,', '},', '}',
''
], 'UUID': 'c04c1500-cfea-4cc5-a90b-f525b4b36ec5',
'wall_time': 0.07, 'cpu_time': 0.07, 'calculation_time': 0.07
}
# metadata2 is similar:
self.exp_metadata2 = copy.deepcopy(self.exp_metadata)
self.exp_metadata2['qmc']['rng_seed'] = 389
self.exp_metadata2['qmc']['real_amplitudes'] = True
self.exp_metadata2['qmc']['tau'] = 0.03
self.exp_metadata2['UUID'] = '0a5946bd-e0f6-4971-9d3a-6ae056ea9ca4'
self.exp_metadata2['wall_time'] = 0.0
self.exp_metadata2['cpu_time'] = 0.0
self.exp_metadata2['calculation_time'] = 0.0
self.exp_metadata2['input'][2] = \
'-- $[HANDE DIR]/bin/hande.x ueg2.lua > ueg2.out 2> ueg2.err'
self.exp_metadata2['input'][8] = 'cutoff = 1.0,'
self.exp_metadata2['input'][14] = 'tau = 0.03,'
self.exp_metadata2['input'][15] = 'rng_seed = 389,'
self.exp_metadata2['input'][22] = 'real_amplitudes = true,'
self.exp_metadata2['input'][23] = '},'
self.exp_metadata2['input'][24] = '}'
self.exp_metadata2['input'].append('')
del self.exp_metadata2['nblooms']
del self.exp_metadata2['max_bloom']
del self.exp_metadata2['mean_bloom']
class TestExtractDataSets(unittest.TestCase, ExtendedTestSetUp):
"""Test extract.extract_data_sets().
Don't test "private" functions at the moment (i.e. functions with
names starting with "_"). [todo] add tests possibly
"""
def setUp(self):
ExtendedTestSetUp.__init__(self)
def test_basic_input(self):
"""Test basic input."""
data = extract.extract_data_sets([self.filename1, self.filename2])
pd.testing.assert_frame_equal(data[0][1], self.exp_data1)
pd.testing.assert_frame_equal(data[1][1], self.exp_data2)
self.assertDictEqual(data[0][0], self.exp_metadata)
self.assertDictEqual(data[1][0], self.exp_metadata2)
def test_only_one(self):
"""Only pass one file (in list)."""
data = extract.extract_data_sets([self.filename1])
pd.testing.assert_frame_equal(data[0][1], self.exp_data1)
self.assertDictEqual(data[0][0], self.exp_metadata)
def test_compressed(self):
"""Pass a list of a file, differently compressed."""
data = extract.extract_data_sets([
self.filename1+".bz2", self.filename1+".gz", self.filename1+".xz"
])
for i in range(3):
pd.testing.assert_frame_equal(data[i][1], self.exp_data1)
self.assertDictEqual(data[i][0], self.exp_metadata)
def test_unchanged_mutable(self):
"""Check that mutable objects, such as pd DataFrames, don't
change when they shouldn't.
"""
list_filename = copy.copy([self.filename1])
_ = extract.extract_data_sets([self.filename1])
self.assertListEqual(list_filename, [self.filename1])
class TestExtractData(unittest.TestCase, ExtendedTestSetUp):
"""Test extract.extract_data()."""
def setUp(self):
ExtendedTestSetUp.__init__(self)
def test_basic_fciqmc_input(self):
"""Test basic input."""
data = extract.extract_data(self.filename1)
pd.testing.assert_frame_equal(data[0][1], self.exp_data1)
self.assertDictEqual(data[0][0], self.exp_metadata)
def test_bz2_fciqmc(self):
"""Extract a compressed file - .bz2."""
data = extract.extract_data(self.filename1+".bz2")
pd.testing.assert_frame_equal(data[0][1], self.exp_data1)
self.assertDictEqual(data[0][0], self.exp_metadata)
def test_gz_fciqmc(self):
"""Extract a compressed file - .gz."""
data = extract.extract_data(self.filename1+".gz")
pd.testing.assert_frame_equal(data[0][1], self.exp_data1)
self.assertDictEqual(data[0][0], self.exp_metadata)
def test_xz_fciqmc(self):
"""Extract a compressed file - .xz."""
data = extract.extract_data(self.filename1+".xz")
pd.testing.assert_frame_equal(data[0][1], self.exp_data1)
self.assertDictEqual(data[0][0], self.exp_metadata)
def test_multiple_fciqmc(self):
"""Have two calculations in this output."""
data = extract.extract_data("tests/hande_files/multi_ueg.out")
# First calculation.
# Modify some previous data for comparison
self.exp_data1.at[0, 'time'] = 0.0
self.exp_data1.at[1, 'time'] = 0.0004
self.exp_metadata['input'][2] = (
'-- $[HANDE DIR]/bin/hande.x multi.ueg.lua > multi.ueg.out 2> '
'multi.ueg.err'
)
self.exp_metadata['input'].append('')
self.exp_metadata['input'][12:] = self.exp_metadata['input'][11:-1]
self.exp_metadata['input'][11] = 'for i=1,2 do'
self.exp_metadata['input'][-1] = 'end'
self.exp_metadata['input'].append('')
self.exp_metadata['UUID'] = 'f11d432f-2eec-45e6-801b-56e458b8f3c0'
self.exp_metadata['wall_time'] = 0.12
self.exp_metadata['cpu_time'] = 0.12
self.exp_metadata['calculation_time'] = 0.06
# Test.
pd.testing.assert_frame_equal(data[0][1], self.exp_data1)
self.assertDictEqual(data[0][0], self.exp_metadata)
# Second calculation.
# Modify some previous data for comparison
self.exp_data1.at[0, 'time'] = 0.0004
# [todo] This needs to be investigated!!! Why not 0 as before?
self.exp_metadata['qmc']['pattempt_parallel'] = 5.28562263e+180
# Test.
pd.testing.assert_frame_equal(data[1][1], self.exp_data1)
self.assertDictEqual(data[1][0], self.exp_metadata)
def test_replica_fciqmc(self):
"""Test replica tricks extraction."""
data = extract.extract_data("tests/hande_files/replica_ueg.out")
exp_data = pd.DataFrame([
[10, 0.00000000e+00, -6.72647815e-01, 2.00000000e+00,
3.36000000e+02, 0.00000000e+00, -3.13519972e-01, 2.00000000e+00,
1.98000000e+02, 302, 193, 7.69100000e-01, 0.00000000e+00],
[20, -1.13434177e-01, -5.16707132e-01, 2.00000000e+00,
3.24800000e+03, -1.11353877e-01, -4.05281916e-01, 2.00000000e+00,
1.83600000e+03, 3081, 1768, 1.35440000e+00, 2.00000000e-03]
], columns=['iterations', 'Shift_1', r'\sum H_0j N_j_1', 'N_0_1',
'# H psips_1', 'Shift_2', r'\sum H_0j N_j_2', 'N_0_2',
'# H psips_2', '# states', '# spawn_events', 'R_spawn',
'time'])
keys_to_drop = ['input', 'wall_time', 'cpu_time', 'calculation_time',
'UUID', 'nblooms', 'max_bloom', 'mean_bloom']
for key_to_drop in keys_to_drop:
# Drop some keys as they'll differ.
del data[0][0][key_to_drop]
del self.exp_metadata[key_to_drop]
self.exp_metadata['fciqmc']['replica_tricks'] = True
pd.testing.assert_frame_equal(data[0][1], exp_data)
self.assertDictEqual(data[0][0], self.exp_metadata)
def test_basic_ccmc_input(self):
"""Test CCMC."""
data = extract.extract_data("tests/hande_files/ccmc_ueg.out")
self.columns.append(self.columns[-1])
self.columns[-3:-1] = ['# attempts', 'R_spawn']
exp_data = pd.DataFrame([
[10, 0., -0.59349668, 1.86285714, 27., 12, 10, 18, 0.5119, 0.],
[20, 0., -4.40680439e-01, 2.02475127e+00, 3.90000000e+01, 26,
19, 42, 4.79900000e-01, 4.00000000e-04],
[30, -1.53365134e-02, -5.83660200e-01, 2.05614830e+00,
5.30000000e+01, 34, 12, 55, 3.91400000e-01, 0.0]
], columns=self.columns)
pd.testing.assert_frame_equal(data[0][1], exp_data)
self.exp_metadata['UUID'] = 'acc004f5-bbc6-4b5c-a831-406b90239e98'
self.exp_metadata['calc_type'] = 'CCMC'
self.exp_metadata['calculation_time'] = 0.0
self.exp_metadata['cpu_time'] = 0.01
self.exp_metadata['wall_time'] = 0.01
self.exp_metadata['reference']['ex_level'] = 3
self.exp_metadata['qmc']['target_particles'] = 30.0
del self.exp_metadata['fciqmc']
del self.exp_metadata['load balancing']
self.exp_metadata['ccmc'] = {
'cluster_multispawn_threshold': 1.79769313e+308,
'density_matrices': False,
'density_matrix_file': 'RDM', 'even_selection': False,
'full_nc': False, 'linked': False, 'move_freq': 5,
'multiref': False, 'vary_shift_reference': False
}
self.exp_metadata['input'] = [
'', '-- Create output with:',
'-- $[HANDE DIR]/bin/hande.x ccmc_ueg.lua > ccmc_ueg.out 2> '
'ccmc_ueg.err',
'-- Note that these settings are just for testing...',
'sys = ueg {', 'dim = 3,', 'nel = 14,', 'ms = 0,', 'cutoff = 1,',
'}', '', 'ccmc {', 'sys = sys,', 'qmc = {', 'tau = 0.1,',
'rng_seed = 1472,', 'init_pop = 2,', 'mc_cycles = 10,',
'nreports = 3,', 'target_population = 30,', 'state_size = 50000,',
'spawned_state_size = 5000,', '},', 'reference = {',
'ex_level = 3,', '},', '}', ''
]
self.exp_metadata['max_bloom'] = 457.0
self.exp_metadata['mean_bloom'] = 20.79
self.exp_metadata['nblooms'] = 227.0
self.assertDictEqual(data[0][0], self.exp_metadata)
def test_basic_dmqmc_input(self):
"""Test DMQMC."""
data = extract.extract_data("tests/hande_files/dmqmc_ueg.out")
exp_data = pd.DataFrame([
[0, 3.41598425e-01, 0.00000000e+00, 2.00000000e+02, 100, 48,
2.49500000e-01, 6.00000000e-03],
[2, 6.10389961e-01, 0.00000000e+00, 1.01000000e+02, 58, 33,
2.59800000e-01, 0.00000000e+00],
[4, 7.80323874e-01, 0.00000000e+00, 5.90000000e+01, 41, 26,
2.74300000e-01, 0.00000000e+00],
[0, 3.41598425e-01, 0.00000000e+00, 2.00000000e+02, 97, 45,
2.51500000e-01, 6.00000000e-03],
[2, 3.77511292e-01, 0.00000000e+00, 1.01000000e+02, 93, 46,
2.41400000e-01, 0.00000000e+00],
[4, 4.64434757e-01, 0.00000000e+00, 9.40000000e+01, 79, 36,
2.50600000e-01, 0.00000000e+00]
], columns=[
'iterations', 'Shift', 'Trace', r'# H psips',
r'# states', r'# spawn_events', 'R_spawn', 'time'
])
exp_metadata = {
'system': {
'nbasis': 38, 'nel': 14, 'nvirt': 24, 'Ms': 0, 'nalpha': 7,
'nbeta': 7, 'nvirt_alpha': 12, 'nvirt_beta': 12, 'nsym': 19,
'sym0': 1, 'sym_max': 19, 'nsym_tot': 19, 'sym0_tot': 1,
'sym_max_tot': 19, 'symmetry': 1, 'tot_sym': False,
'aufbau_sym': True, 'max_number_excitations': 14, 'ueg': {
'r_s': 1.0, 'ecutoff': 1.0, 'k_fermi': 1.91915829,
'E_fermi': 1.84158428, 'ktwist': [0.0, 0.0, 0.0],
'L': [3.88512994, 3.88512994, 3.88512994]
}},
'qmc': {
'rng_seed': 1472, 'real_amplitudes': False,
'real_amplitude_force_32': False, 'spawn_cutoff': 0.01,
'excit_gen': 'renorm', 'pattempt_update': False,
'pattempt_zero_accum_data': False, 'pattempt_single': 0.0,
'pattempt_double': 1.0, 'pattempt_parallel': 0.0, 'tau': 0.05,
'tau_search': False, 'vary_shift_from': 0.0,
'vary_shift_from_proje': False, 'initial_shift': 0.0,
'shift_damping': 0.05, 'walker_length': 50000,
'spawned_walker_length': 5000, 'D0_population': 200.0,
'target_particles': 100.0, 'target_reference': False,
'initiator_approx': False, 'initiator_pop': 3.0, 'ncycles': 2,
'nreport': 2, 'power_pitzer_min_weight': 0.01,
'quasi_newton': False, 'quasi_newton_threshold': 1e-05,
'quasi_newton_value': 1.0, 'use_mpi_barriers': False
},
'dmqmc': {
'beta_loops': 2, 'replica_tricks': False, 'start_av_rdm': 0,
'weighted_sampling': False, 'vary_weights': False,
'find_weights': False, 'find_weights_start': 0,
'calc_excit_dist': False, 'all_sym_sectors': False,
'all_spin_sectors': False, 'initiator_level': -1,
'sampling_probs': '[]', 'finish_varying_weights': 0,
'fermi_temperature': False, 'target_beta': 1.0,
'mom_dist_kmax': 0.0, 'struc_fac_qmax': 0.0
},
'ipdmqmc': {
'ipdmqmc': False, 'initial_matrix': 'hartree_fock',
'grand_canonical_initialisation': False, 'symmetric': True,
'chem_pot': 0.0, 'metropolis_attempts': 0
},
'rdm': {
'nrdms': 0, 'spawned_length': 0, 'doing_rdm': False,
'calc_ground_rdm': False, 'calc_inst_rdm': False,
'doing_concurrence': False, 'doing_vn_entropy': False,
'output_rdm': False
},
'operators': {
'energy': False, 'energy_squared': False,
'kinetic_energy': False, 'potential_energy': False,
'H0_energy': False, 'HI_energy': False,
'correlation_fn': False, 'staggered_mad_ind': False,
'rdm_r2': False, 'full_r2': False, 'mom_dist': False
},
'restart': {
'read_restart': False, 'read_id': 2147483647,
'write_restart': False, 'write_id': 2147483647,
'write_freq': 2147483647, 'write_restart_shift': False,
'write_shift_id': 2147483647, 'restart_rng': True
},
'load balancing': {
'nslots': 1, 'pop': 1000, 'percent': 0.05, 'max_attempts': 2,
'write_info': False
},
'reference': {
'det': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
'det_ms': 0, 'det_symmetry': 1, 'H00': 13.60355734,
'F0': 15.69278015, 'hilbert_space_det': [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
], 'hilbert_space_det_ms': 0,
'hilbert_space_det_symmetry': 1, 'ex_level': 14
},
'calc_type': 'DMQMC', 'input': [
'', '-- Create output with:', '-- $[HANDE DIR]/bin/hande.x '
'dmqmc_ueg.lua > dmqmc_ueg.out 2> dmqmc_ueg.err',
'-- Note that these settings are just for testing...',
'sys = ueg {', 'dim = 3,', 'nel = 14,', 'ms = 0,',
'cutoff = 1,', '}', '', 'dmqmc {', 'sys = sys,', 'qmc = {',
'tau = 0.05,', 'rng_seed = 1472,', 'init_pop = 200,',
'mc_cycles = 2,', 'nreports = 2,', 'target_population = 100,',
'state_size = 50000,', 'spawned_state_size = 5000,', '},',
'dmqmc = {', 'beta_loops = 2,', '}', '}', ''
],
'UUID': '8226e3f0-ee64-4128-9569-225da1f8b913', 'wall_time': 0.03,
'cpu_time': 0.03, 'calculation_time': 0.03
}
pd.testing.assert_frame_equal(data[0][1], exp_data)
self.assertDictEqual(data[0][0], exp_metadata)
def test_basic_fci_input(self):
"""Test FCI."""
data = extract.extract_data("tests/hande_files/fci_ueg.out")
exp_data = pd.Series([
-1.78882976e-02, 9.45177589e+00, 9.45177589e+00, 9.52511643e+00,
9.52511643e+00, 9.52511643e+00, 9.88957877e+00, 1.90174844e+01,
1.90174844e+01, 1.90174844e+01, 1.90320038e+01, 1.90320038e+01,
1.90827874e+01, 1.90827874e+01, 1.90827874e+01, 1.92329356e+01,
1.92329356e+01, 1.92329356e+01, 1.97091785e+01
], index=list(range(1, 20)), name='FCI (LAPACK)')
exp_data.index.name = 'Eigenvalue'
exp_metadata = {
'system': {
'nbasis': 38, 'nel': 2, 'nvirt': 36, 'Ms': 0, 'nalpha': 1,
'nbeta': 1, 'nvirt_alpha': 18, 'nvirt_beta': 18, 'nsym': 19,
'sym0': 1, 'sym_max': 19, 'nsym_tot': 19, 'sym0_tot': 1,
'sym_max_tot': 19, 'symmetry': 1, 'tot_sym': False,
'aufbau_sym': True, 'max_number_excitations': 2, 'ueg': {
'r_s': 1.0, 'ecutoff': 1.0, 'k_fermi': 1.91915829,
'E_fermi': 1.84158428, 'ktwist': [0.0, 0.0, 0.0],
'L': [2.0309826, 2.0309826, 2.0309826]
}},
'reference': {'ex_level': 2}, 'calc_type': 'FCI', 'input': [
'', '-- Create output with:', '-- $[HANDE DIR]/bin/hande.x '
'fci_ueg.lua > fci_ueg.out 2> fci_ueg.err',
'-- Note that these settings are just for testing...',
'sys = ueg {', 'dim = 3,', 'nel = 2,', 'ms = 0,',
'cutoff = 1,', '}', '', 'fci {', 'sys = sys,', '}', ''
],
'UUID': '7f1b774f-975b-4ec0-a626-185879d0e7a7', 'wall_time': 0.0,
'cpu_time': 0.0, 'calculation_time': 0.0, 'fci_in': {
'analyse_fci_wfn': 0, 'block_size': 64,
'determinant_file': 'DETS', 'direct_lanczos': False,
'hamiltonian_file': 'HAMIL', 'lanczos_string_len': 40,
'nlanczos_eigv': 5, 'print_fci_wfn': 0,
'print_fci_wfn_file': 'FCI_WFN', 'write_determinants': False,
'write_hamiltonian': False
}
}
pd.testing.assert_series_equal(data[0][1], exp_data)
self.assertDictEqual(data[0][0], exp_metadata)
def test_basic_simple_fciqmc_input(self):
"""Test Simple FCIQMC."""
data = extract.extract_data("tests/hande_files/simple_fciqmc_ueg.out")
exp_data = pd.DataFrame([
[10, 0., -4.70181114e-02, 8.00000000e+00, 9.00000000e+00, 0, 0,
5.00000000e-02, 0.0],
[20, -8.78004297e-03, -2.29343899e-01, 8.00000000e+00,
1.00000000e+01, 0, 0, 6.67000000e-02, 0.00000000e+00],
[30, -0.04794701, -0.33774677, 8., 16., 0, 0, 0.08, 0.]
], columns=self.columns)
del_keys = [
'load balancing', 'blocking', 'fciqmc', 'logging', 'logging_in',
'max_bloom', 'mean_bloom', 'nblooms', 'semi_stoch'
]
for key in del_keys:
del self.exp_metadata[key]
exp_metadata = {
'system': {
'nbasis': 38, 'nel': 2, 'nvirt': 36, 'Ms': 0, 'nalpha': 1,
'nbeta': 1, 'nvirt_alpha': 18, 'nvirt_beta': 18, 'nsym': 19,
'sym0': 1, 'sym_max': 19, 'nsym_tot': 19, 'sym0_tot': 1,
'sym_max_tot': 19, 'symmetry': 1, 'tot_sym': False,
'aufbau_sym': True, 'max_number_excitations': 2,
'ueg': {
'r_s': 1.0, 'ecutoff': 1.0, 'k_fermi': 1.91915829,
'E_fermi': 1.84158428, 'ktwist': [0.0, 0.0, 0.0],
'L': [2.0309826, 2.0309826, 2.0309826]
}},
'qmc': {
'rng_seed': 1472, 'real_amplitudes': False,
'real_amplitude_force_32': False, 'spawn_cutoff': 0.01,
'excit_gen': 'renorm', 'pattempt_update': False,
'pattempt_zero_accum_data': False, 'pattempt_single': -1.0,
'pattempt_double': -1.0, 'pattempt_parallel': -1.0,
'tau': 0.06, 'tau_search': False, 'vary_shift_from': 0.0,
'vary_shift_from_proje': False, 'initial_shift': 0.0,
'shift_damping': 1.79769313e+308, 'walker_length': 0,
'spawned_walker_length': 0, 'D0_population': 8.0,
'target_particles': 8.0, 'target_reference': False,
'initiator_approx': False, 'initiator_pop': 3.0, 'ncycles': 10,
'nreport': 3, 'power_pitzer_min_weight': 0.01,
'quasi_newton': False, 'quasi_newton_threshold': 1e-05,
'quasi_newton_value': 1.0, 'use_mpi_barriers': False
},
'restart': {
'read_restart': False, 'read_id': 2147483647,
'write_restart': False, 'write_id': 2147483647,
'write_freq': 2147483647, 'write_restart_shift': False,
'write_shift_id': 2147483647, 'restart_rng': True
},
'reference': {
'det': [4, 13], 'det_ms': 0, 'det_symmetry': 1, 'H00': 9.57078,
'F0': 0.0, 'ex_level': 2
},
'sparse_hamil': True, 'calc_type': 'Simple FCIQMC',
'input': [
'', '-- Create output with:', '-- $[HANDE DIR]/bin/hande.x '
'simple_fciqmc_ueg.lua > simple_fciqmc_ueg.out 2> '
'simple_fciqmc_ueg.err', '-- Note that these settings are '
'just for testing...', 'sys = ueg {', 'dim = 3,', 'nel = 2,',
'ms = 0,', 'cutoff = 1,', '}', '', 'simple_fciqmc {',
'sys = sys,', 'sparse = true,', 'qmc = {', 'tau = 0.06,',
'rng_seed = 1472,', 'init_pop = 8,', 'mc_cycles = 10,',
'nreports = 3,', 'target_population = 8,',
'state_size = 50000,', 'spawned_state_size = 5000,', '},', '}',
''
], 'UUID': 'c8132a6b-bc15-4586-adf7-80d7c3119e3c',
'wall_time': 0.0, 'cpu_time': 0.0, 'calculation_time': 0.0
}
pd.testing.assert_frame_equal(data[0][1], exp_data)
self.assertDictEqual(data[0][0], exp_metadata)
def test_hilbert(self):
"""Test MC Hilbert space size estimation extraction."""
data = extract.extract_data("tests/hande_files/hilbert_ueg.out")
exp_data = pd.DataFrame([
[1, 15233700.0, 15233700.0, np.nan],
[2, 20311600.0, 17772650.0, 2538951.0],
[3, 10155800.0, 15233700.0, 2931728.0]
], columns=['iterations', 'space size', 'mean', 'std. err.'])
exp_metadata = {
'system': {
'nbasis': 38, 'nel': 14, 'nvirt': 24, 'Ms': 0, 'nalpha': 7,
'nbeta': 7, 'nvirt_alpha': 12, 'nvirt_beta': 12, 'nsym': 19,
'sym0': 1, 'sym_max': 19, 'nsym_tot': 19, 'sym0_tot': 1,
'sym_max_tot': 19, 'symmetry': 2147483647, 'tot_sym': False,
'aufbau_sym': True, 'max_number_excitations': 14, 'ueg': {
'r_s': 1.0, 'ecutoff': 1.0, 'k_fermi': 1.91915829,
'E_fermi': 1.84158428, 'ktwist': [0.0, 0.0, 0.0],
'L': [3.88512994, 3.88512994, 3.88512994]
}},
'ex_level': 14, 'nattempts': 1000, 'ncycles': 3, 'occ_list': [],
'rng_seed': -563090706, 'calc_type': 'Hilbert space',
'input': ['', '-- Create output with:',
'-- $[HANDE DIR]/bin/hande.x hilbert_ueg.lua > '
'hilbert_ueg.out 2> hilbert_ueg.err',
'-- Note that these settings are just for testing...',
'sys = ueg {', 'dim = 3,', 'nel = 14,', 'ms = 0,',
'cutoff = 1,', '}', '', 'hilbert_space {', 'sys = sys,',
'hilbert = {', 'rng_seed = -563090706,',
'nattempts = 1000,', 'ncycles = 3,', '},', '}', ''
],
'UUID': '6c228dba-68c9-4051-b47a-5704fe261ad8', 'wall_time': 0.0,
'cpu_time': 0.0, 'calculation_time': 0.0
}
pd.testing.assert_frame_equal(data[0][1], exp_data)
self.assertDictEqual(data[0][0], exp_metadata)
def test_canonical_estimates(self):
"""Test canonical estimates extraction."""
data = extract.extract_data("tests/hande_files/cano_ueg.out")
exp_data = pd.DataFrame([
[1, 2.7943370015E+01, -1.5740506968E+00, 2.5170736424E+01,
-1.4249954801E+00, 9.0253160272E-01, 3.4800000000E-02],
[2, 2.7726172803E+01, -1.5817153286E+00, 2.5010826838E+01,
-1.4341367788E+00, 9.0391912264E-01, 3.4700000000E-02]
], columns=[
'iterations', '<T>_0', '<V>_0', r'Tr(T\rho_HF)',
r'Tr(V\rho_HF)', r'Tr(\rho_HF)', 'N_ACC/N_ATT'
])
exp_metadata = {
'system': {
'nbasis': 38, 'nel': 14, 'nvirt': 24, 'Ms': 0, 'nalpha': 7,
'nbeta': 7, 'nvirt_alpha': 12, 'nvirt_beta': 12, 'nsym': 19,
'sym0': 1, 'sym_max': 19, 'nsym_tot': 19, 'sym0_tot': 1,
'sym_max_tot': 19, 'symmetry': 2147483647, 'tot_sym': False,
'aufbau_sym': True, 'max_number_excitations': 14, 'ueg': {
'r_s': 1.0, 'ecutoff': 1.0, 'k_fermi': 1.91915829,
'E_fermi': 1.84158428, 'ktwist': [0.0, 0.0, 0.0],
'L': [3.88512994, 3.88512994, 3.88512994]
}}, 'all_spin_sectors': False, 'beta': 0.2,
'fermi_temperature': False, 'nattempts': 10000,
'free_energy_corr': -96.66701818, 'ncycles': 2,
'chem_pot': -0.64446167, 'rng_seed': 748,
'calc_type': 'Canonical energy',
'input': [
'', '-- Create output with:',
'-- $[HANDE DIR]/bin/hande.x cano_ueg.lua > cano_ueg.out 2> '
'cano_ueg.err', '-- Note that these settings are just for '
'testing...', 'sys = ueg {', 'dim = 3,', 'nel = 14,',
'ms = 0,', 'cutoff = 1,', '}', '', 'canonical_estimates {',
'sys = sys,', 'canonical_estimates = {', 'ncycles = 2,',
'nattempts = 10000,', 'beta = 0.2,', 'rng_seed = 748', '},',
'}', ''
], 'UUID': '98db19fc-8ab2-4f5a-82d0-5c4c10340a1c',
'wall_time': 0.0, 'cpu_time': 0.0, 'calculation_time': 0.0
}
pd.testing.assert_frame_equal(data[0][1], exp_data)
self.assertDictEqual(data[0][0], exp_metadata)
| lgpl-2.1 |
manulera/ModellingCourse | ReAct/Python/Example_PredatorPray_2.py | 1 | 1378 | import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import random
from ColorLine import *
init=np.array([80,40])
dt=0.001
t=np.arange(0,10,dt)
# Very nice values to see the oscillations, extinction happens
import numpy as np
from Gilles import *
import matplotlib.pyplot as plt
# Initial conditions
user_input = ['Pred', 400.0,
'Prey', 1600.0]
# Constants (this is not necessary, they could be filled up already in the reaction tuple)
k=[5,0.009,0.001,1]
# Reaction template ((stoch_1,reactant_1,stoch_2,reactant_2),(stoch_1,product_1,stoch_2,product_2),k)
# dx = A * x - B * x * y
# dy = C * x * y - D * y
reactions = (
(-1,'Prey'),(1,'Prey'),k[0], # Birth of a prey, would be interesting to see if we change the stoch to 2 for A
(-1,'Pred',1,'Prey'),(),k[1], # Prey is hunted, analogy is a catalizer for degradation
(-1,'Pred',-1,'Prey'),(1,'Pred'),k[2], # Predators nourrish on prey
(1,'Pred'),(),k[3] # Predators die
)
# dt is used for the deterministic calculation, and the
dt=0.0001
t = np.arange(0, 30, dt)
(solution,(tgill, valsgill, _, _),rows,mode)=ReAct(user_input,reactions,t)
Gillesplot(solution,t,tgill, valsgill,rows,mode)
f, ax = plt.subplots()
colorline(valsgill[0][0,:], valsgill[0][1,:], None, cmap=plt.get_cmap('jet'), linewidth=2)
ax.plot(solution[:,0],solution[:,1])
plt.show()
| gpl-3.0 |
vibhorag/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
nejctrdin/DFD | dexi.py | 1 | 12417 | import matplotlib
matplotlib.use("Agg")
import numpy as np
from scipy.interpolate import RegularGridInterpolator
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from itertools import product
from scipy.misc import derivative
from matplotlib import animation
import random
import string
import os
import content
# different error statuses given back to the user in case of problems
NOT_CORRECT_LINES = "Function should be represented in at least 3 lines!"
NOT_NUMBER_MULTIPLICITIES_ARG = ("Number of function arguments ({0}) and number "
"of multiplicities ({1}) should be equal!")
MULTIPLICITIES_INT = "Multiplicities should be integers! Muliplicity {0} is not!"
INPUT_OUTPUT_NOT_MATCHING = ("The input space size ({0}) does not match output "
"space size ({1})!")
FUNCTION_OUTPUT_FLOAT = "All function outputs should be floats!"
FUNCTION_EVALUATIONS_FLOAT = "All function evaluations should be floats {0}!"
PROBLEM_DERIVATIVES = "There was a problem constructing derivatives!"
NOT_CORRECT_ARGUMENTS_EVAL = "Number of function arguments ({0}) does not match number of supplied evaluation arguments ({1}) - {2}."
def parse_function(f_rep):
# the function that parses the input string
# removing the \r chars and splitting the function by new lines
f_rep = f_rep.replace("\r", "")
lines = f_rep.split("\n")
split = []
for line in lines:
if line != "":
split.append(line)
if len(split) < 3:
# function must be at least three lines long
return None, None, None, False, NOT_CORRECT_LINES
function_outputs = split[0]
if "," in function_outputs:
# see if function outputs are comma delimited
function_outputs = function_outputs.split(",")
# get the output size, arguments and multiplicities
output_size = len(function_outputs)
arguments = split[1].replace(" ","").split(",")
multip = split[2].replace(" ","").split(",")
# test that function outputs can be cast to integers
for output in function_outputs:
try:
float(output)
except:
return None, None, None, False, FUNCTION_OUTPUT_FLOAT
# test that length of arguments is the same as the length of the multiplicities
if len(arguments) != len(multip):
return None, None, None, False, NOT_NUMBER_MULTIPLICITIES_ARG.format(len(arguments), len(multip))
# check that multiplicities can be cast to integers
i = 1
for mutiplicity in multip:
try:
int(mutiplicity)
except:
return None, None, None, False, MULTIPLICITIES_INT.format(i)
i+=1
# get the input space from the multiplicities
input_sizes = map(int, multip)
input_size = reduce(lambda x,y: x*y, input_sizes)
# check that input space matches the size of the output space
if input_size != output_size:
return None, None, None, False, INPUT_OUTPUT_NOT_MATCHING.format(input_size, output_size)
# possible evaluations
req_evaluations = []
if len(split) > 3:
# iterate over all evaluations appearing after the third line
for e in split[3:]:
# split them
evaluation = e.replace(" ","").split(",")
# see if the length is the same as the length of arguments
if len(evaluation) != len(arguments):
return None, None, None, False, NOT_CORRECT_ARGUMENTS_EVAL.format(len(arguments), len(evaluation), map(str, evaluation))
for e in evaluation:
# check that the evaluations can be cast to floats
try:
float(e)
except:
return None, None, None, False, FUNCTION_EVALUATIONS_FLOAT.format(map(str,evaluation))
# finally add them to the list
req_evaluations.append(evaluation)
# create the input space
input_space = [xrange(size) for size in input_sizes]
# define the function
function = []
i = 0
for point in product(*input_space):
tmp = [point, float(function_outputs[i])]
function.append(tmp)
i += 1
# return the function, arguments, requested evaluations, status and message
return function, arguments, req_evaluations, True, ""
def _create_function(function):
# input size (number of arguments) and number of points
input_size = len(function[0][0])
# size of the function
defined_points_size = len(function)
# get the multiplicities of the arguments
max_values = [-1] * input_size
for point, output in function:
for i in xrange(len(point)):
max_values[i] = max(max_values[i], point[i])
# create the space for the function inputs
space = [np.array(xrange(max_val + 1)) for max_val in max_values]
fun = []
# create the mesh for function inputs
for point, output in function:
current = fun
for i in xrange(len(point)):
if len(current) - 1 < point[i]:
current.append([])
current = current[point[i]]
current.append(output)
data = np.array(fun)
# create the interpolating spline
interpolating = RegularGridInterpolator(tuple(space),
data,
bounds_error=False,
fill_value=None
)
return interpolating, max_values, input_size
def _create_image(interpolating, max_values, arguments, output_image):
# we create a list of possible characters that form the file names
_possible_chars = string.ascii_letters + string.digits
# we can draw an image if there are 1 or 2 arguments
_image_file_name_len = 10
# create a file name, that is not present in the directory
image_file_name = ""
image_dir = content._DEFAULT_IMAGE_PATH
input_size = len(arguments)
while True:
image_file_name = "".join([image_dir] + [random.choice(_possible_chars) for _ in xrange(_image_file_name_len)] + [".png"])
if not os.path.isfile(image_file_name):
break
if input_size == 1:
# if input size is 1, we have a 2D image
fig = plt.figure()
X = np.arange(-1, max_values[0] + 1.1, 0.1)
ax = fig.add_subplot(111)
ax.set_xlabel(arguments[0])
ax.set_ylabel("Output")
Y = []
for x in X:
Y.append(interpolating([x])[0][0])
plt.plot(X, Y)
if output_image:
plt.savefig(image_file_name)
elif input_size == 2:
# otherwise we have a 3D image
fig = plt.figure()
ax = fig.gca(projection="3d")
X = np.arange(-0.5, max_values[0] + 0.6, 0.1)
Y = np.arange(-0.5, max_values[1] + 0.6, 0.1)
ax.set_xlabel(arguments[0])
ax.set_ylabel(arguments[1])
ax.set_zlabel("Output")
ax.view_init(azim=-160)
X, Y = np.meshgrid(X, Y)
Z = []
for i in xrange(len(X)):
current = []
for j in xrange(len(X[i])):
x = X[i][j]
y = Y[i][j]
current.append(interpolating([x, y])[0][0])
Z.append(current)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
linewidth=0.1, antialiased=True,
shade=True, cmap=cm.jet)
# output image
if output_image:
plt.savefig(image_file_name)
return image_file_name.replace(image_dir, "")
def _create_animation(function, arguments):
if len(arguments) != 2:
return ""
interpolating, max_values, input_size = _create_function(function)
# we create a list of possible characters that form the file names
_possible_chars = string.ascii_letters + string.digits
# we can draw an image if there are 1 or 2 arguments
_image_file_name_len = 10
# create a file name, that is not present in the directory
image_dir = content._DEFAULT_IMAGE_PATH
anim_file_name = ""
# otherwise we have a 3D image
fig = plt.figure()
ax = fig.gca(projection="3d")
X = np.arange(-0.5, max_values[0] + 0.6, 0.1)
Y = np.arange(-0.5, max_values[1] + 0.6, 0.1)
ax.set_xlabel(arguments[0])
ax.set_ylabel(arguments[1])
ax.set_zlabel("Output")
X, Y = np.meshgrid(X, Y)
Z = []
for i in xrange(len(X)):
current = []
for j in xrange(len(X[i])):
x = X[i][j]
y = Y[i][j]
current.append(interpolating([x, y])[0][0])
Z.append(current)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
linewidth=0.1, antialiased=True,
shade=True, cmap=cm.jet)
# prepare the animation
def animate(nFrame):
ax.view_init(azim=-160+15*nFrame)
while True:
anim_file_name = "".join([image_dir] + [random.choice(_possible_chars) for _ in xrange(_image_file_name_len)] + [".gif"])
if not os.path.isfile(anim_file_name):
break
# output animation
anim = animation.FuncAnimation(fig, animate, frames=24)
anim.save(anim_file_name, writer="imagemagick", fps=8)
return anim_file_name.replace(image_dir, "")
def _scipy_derivatives(function, req_evaluations, arguments, output_image=True):
# the function expects a correct form of a function as parsed above and evaluation points
# then it constructs an interpolating function using scipy interpolate utility, parses the
# results and returns them
interpolating, max_values, input_size = _create_function(function)
# an inline function for computing partial derivatives
def partial_derivative(func, var=0, point=[]):
args = point[:]
def wraps(x):
args[var] = x
return func(args)
return derivative(wraps, point[var], dx=1e-6)
# list of derivatives
derivatives = []
SUM = 0.0
N = 0
for i in xrange(input_size):
for point, _ in function:
# add the derivatives
d = partial_derivative(interpolating, i, list(point))[0][0]
derivatives.append(_format_number(d))
SUM += d
N+=1
# finally add the average
derivatives.append(_format_number(SUM/N))
SUM = 0.0
N = 0
# fill the required evaluations
evaluations = []
for ev in req_evaluations:
e = interpolating(map(float, ev))[0][0]
evaluations.append((ev, _format_number(e)))
# create the filename for the possible image
image_file_name = ""
if input_size < 3:
image_file_name = _create_image(interpolating,
max_values,
arguments,
output_image)
# return the derivatives, evaluations, and image file name
return derivatives, evaluations, image_file_name
def _format_number(num):
# a function that formats a number as string with two digits after dot
return "{0:.2f}".format(num)
def get_derivatives(function, req_evaluations, arguments, output_image=True):
# the function which calls the interior function of this file
derivatives, evaluations, image_file_name = _scipy_derivatives(function, req_evaluations, arguments, output_image)
return derivatives, evaluations, image_file_name, True, ""
def create_2argument_function(mul_f, mul_s, function):
# function creates a by-point-defined function of 2 arguments with
# multiplicities of the first and the second argument given
# also the function expects a function which is applicable on two arguments
# check if multiplicities are above 0
if mul_f < 1:
raise ValueError("Multiplicity of the first attribute must be more than 0.")
if mul_s < 1:
raise ValueError("Multiplicity of the second attribute must be more than 0.")
# create iterators and apply function
function_values = []
for i in xrange(mul_f):
for j in xrange(mul_s):
function_values.append(str(function(i, j)))
# format the function as per specification
output = []
output.append(",".join(function_values))
output.append("first,second")
output.append(",".join([str(mul_f), str(mul_s)]))
# return the string representation of the function
return " ".join(output)
| gpl-3.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/IPython/lib/latextools.py | 8 | 6387 | # -*- coding: utf-8 -*-
"""Tools for handling LaTeX."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO, open
import os
import tempfile
import shutil
import subprocess
from IPython.utils.process import find_cmd, FindCmdError
from traitlets.config import get_config
from traitlets.config.configurable import SingletonConfigurable
from traitlets import List, Bool, Unicode
from IPython.utils.py3compat import cast_unicode, cast_unicode_py2 as u, PY3
try: # Py3
from base64 import encodebytes
except ImportError: # Py2
from base64 import encodestring as encodebytes
class LaTeXTool(SingletonConfigurable):
"""An object to store configuration of the LaTeX tool."""
def _config_default(self):
return get_config()
backends = List(
Unicode(), ["matplotlib", "dvipng"],
help="Preferred backend to draw LaTeX math equations. "
"Backends in the list are checked one by one and the first "
"usable one is used. Note that `matplotlib` backend "
"is usable only for inline style equations. To draw "
"display style equations, `dvipng` backend must be specified. ",
# It is a List instead of Enum, to make configuration more
# flexible. For example, to use matplotlib mainly but dvipng
# for display style, the default ["matplotlib", "dvipng"] can
# be used. To NOT use dvipng so that other repr such as
# unicode pretty printing is used, you can use ["matplotlib"].
).tag(config=True)
use_breqn = Bool(
True,
help="Use breqn.sty to automatically break long equations. "
"This configuration takes effect only for dvipng backend.",
).tag(config=True)
packages = List(
['amsmath', 'amsthm', 'amssymb', 'bm'],
help="A list of packages to use for dvipng backend. "
"'breqn' will be automatically appended when use_breqn=True.",
).tag(config=True)
preamble = Unicode(
help="Additional preamble to use when generating LaTeX source "
"for dvipng backend.",
).tag(config=True)
def latex_to_png(s, encode=False, backend=None, wrap=False):
"""Render a LaTeX string to PNG.
Parameters
----------
s : str
The raw string containing valid inline LaTeX.
encode : bool, optional
Should the PNG data base64 encoded to make it JSON'able.
backend : {matplotlib, dvipng}
Backend for producing PNG data.
wrap : bool
If true, Automatically wrap `s` as a LaTeX equation.
None is returned when the backend cannot be used.
"""
s = cast_unicode(s)
allowed_backends = LaTeXTool.instance().backends
if backend is None:
backend = allowed_backends[0]
if backend not in allowed_backends:
return None
if backend == 'matplotlib':
f = latex_to_png_mpl
elif backend == 'dvipng':
f = latex_to_png_dvipng
else:
raise ValueError('No such backend {0}'.format(backend))
bin_data = f(s, wrap)
if encode and bin_data:
bin_data = encodebytes(bin_data)
return bin_data
def latex_to_png_mpl(s, wrap):
try:
from matplotlib import mathtext
from pyparsing import ParseFatalException
except ImportError:
return None
# mpl mathtext doesn't support display math, force inline
s = s.replace('$$', '$')
if wrap:
s = u'${0}$'.format(s)
try:
mt = mathtext.MathTextParser('bitmap')
f = BytesIO()
mt.to_png(f, s, fontsize=12)
return f.getvalue()
except (ValueError, RuntimeError, ParseFatalException):
return None
def latex_to_png_dvipng(s, wrap):
try:
find_cmd('latex')
find_cmd('dvipng')
except FindCmdError:
return None
try:
workdir = tempfile.mkdtemp()
tmpfile = os.path.join(workdir, "tmp.tex")
dvifile = os.path.join(workdir, "tmp.dvi")
outfile = os.path.join(workdir, "tmp.png")
with open(tmpfile, "w", encoding='utf8') as f:
f.writelines(genelatex(s, wrap))
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(
["latex", "-halt-on-error", "-interaction", "batchmode", tmpfile],
cwd=workdir, stdout=devnull, stderr=devnull)
subprocess.check_call(
["dvipng", "-T", "tight", "-x", "1500", "-z", "9",
"-bg", "transparent", "-o", outfile, dvifile], cwd=workdir,
stdout=devnull, stderr=devnull)
with open(outfile, "rb") as f:
return f.read()
except subprocess.CalledProcessError:
return None
finally:
shutil.rmtree(workdir)
def kpsewhich(filename):
"""Invoke kpsewhich command with an argument `filename`."""
try:
find_cmd("kpsewhich")
proc = subprocess.Popen(
["kpsewhich", filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return stdout.strip().decode('utf8', 'replace')
except FindCmdError:
pass
def genelatex(body, wrap):
"""Generate LaTeX document for dvipng backend."""
lt = LaTeXTool.instance()
breqn = wrap and lt.use_breqn and kpsewhich("breqn.sty")
yield u(r'\documentclass{article}')
packages = lt.packages
if breqn:
packages = packages + ['breqn']
for pack in packages:
yield u(r'\usepackage{{{0}}}'.format(pack))
yield u(r'\pagestyle{empty}')
if lt.preamble:
yield lt.preamble
yield u(r'\begin{document}')
if breqn:
yield u(r'\begin{dmath*}')
yield body
yield u(r'\end{dmath*}')
elif wrap:
yield u'$${0}$$'.format(body)
else:
yield body
yield u'\end{document}'
_data_uri_template_png = u"""<img src="data:image/png;base64,%s" alt=%s />"""
def latex_to_html(s, alt='image'):
"""Render LaTeX to HTML with embedded PNG data using data URIs.
Parameters
----------
s : str
The raw string containing valid inline LateX.
alt : str
The alt text to use for the HTML.
"""
base64_data = latex_to_png(s, encode=True).decode('ascii')
if base64_data:
return _data_uri_template_png % (base64_data, alt)
| apache-2.0 |
petebachant/NTNU-HAWT-turbinesFoam | pynhtf/processing.py | 1 | 7298 | #!/usr/bin/env python
"""Processing functions."""
from __future__ import division, print_function
import matplotlib.pyplot as plt
import re
import numpy as np
import os
import glob
import sys
import foampy
from pxl import fdiff
import pandas as pd
# Some constants
D = {"turbine1": 0.944, "turbine2": 0.894, "nominal": 0.9}
R = {turbine: d/2 for turbine, d in D.items()}
A = {turbine: np.pi*r**2 for turbine, r in R.items()}
U = 10.0
U_infty = U
rho = 1.2
def load_u_profile(turbine="turbine2", z_R=0.0):
"""Load data from the sampled mean velocity and return it as a pandas
`DataFrame`.
"""
z_R = float(z_R)
timedirs = os.listdir("postProcessing/sets")
latest_time = max(timedirs)
fname = "{}_{}_UMean.csv".format(turbine, z_R)
data = pd.read_csv(os.path.join("postProcessing", "sets", latest_time,
fname))
df = pd.DataFrame()
df["y_R"] = data["y"]/R[turbine]
df["u"] = data["UMean_0"]
return df
def load_vel_map(turbine="turbine2", component="u"):
"""Load all mean streamwise velocity profiles. Returns a `DataFrame` with
`z_R` as the index and `y_R` as columns.
"""
# Define columns in set raw data file
columns = dict(u=0, v=1, w=2)
sets_dir = os.path.join("postProcessing", "sets")
latest_time = max(os.listdir(sets_dir))
data_dir = os.path.join(sets_dir, latest_time)
flist = os.listdir(data_dir)
z_R = []
for fname in flist:
if "UMean" in fname:
z_R.append(float(fname.split("_")[1]))
z_R.sort()
z_R.reverse()
vel = []
for zi in z_R:
fname = "{}_{}_UMean.csv".format(turbine, zi)
dfi = pd.read_csv(os.path.join(data_dir, fname))
vel.append(dfi["UMean_{}".format(columns[component])].values)
y_R = dfi["y"]/R[turbine]
z_R = np.asarray(z_R)
vel = np.asarray(vel).reshape((len(z_R), len(y_R)))
df = pd.DataFrame(vel, index=z_R, columns=y_R)
return df
def load_k_profile(turbine="turbine2", z_R=0.0):
"""Load data from the sampled `UPrime2Mean` and `kMean` (if available) and
return it as a pandas `DataFrame`.
"""
z_R = float(z_R)
df = pd.DataFrame()
timedirs = os.listdir("postProcessing/sets")
latest_time = max(timedirs)
fname_u = "{}_{}_UPrime2Mean.csv".format(turbine, z_R)
fname_k = "{}_{}_kMean.csv".format(turbine, z_R)
dfi = pd.read_csv(os.path.join("postProcessing", "sets", latest_time,
fname_u))
df["y_R"] = dfi.y/R[turbine]
df["k_resolved"] = 0.5*(dfi.UPrime2Mean_0 + dfi.UPrime2Mean_3
+ dfi.UPrime2Mean_5)
try:
dfi = pd.read_csv(os.path.join("postProcessing", "sets", latest_time,
fname_k))
df["k_modeled"] = dfi.kMean
df["k_total"] = df.k_modeled + df.k_resolved
except FileNotFoundError:
df["k_modeled"] = np.zeros(len(df.y_R))*np.nan
df["k_total"] = df.k_resolved
return df
def load_k_map(amount="total"):
"""Load all TKE profiles. Returns a `DataFrame` with `z_H` as the index and
`y_R` as columns.
"""
sets_dir = os.path.join("postProcessing", "sets")
latest_time = max(os.listdir(sets_dir))
data_dir = os.path.join(sets_dir, latest_time)
flist = os.listdir(data_dir)
z_H = []
for fname in flist:
if "UPrime2Mean" in fname:
z_H.append(float(fname.split("_")[1]))
z_H.sort()
z_H.reverse()
k = []
for z_H_i in z_H:
dfi = load_k_profile(z_H_i)
k.append(dfi["k_" + amount].values)
y_R = dfi.y_R.values
k = np.array(k).reshape((len(z_H), len(y_R)))
df = pd.DataFrame(k, index=z_H, columns=y_R)
return df
def load_upup_profile(turbine="turbine2", z_R=0.0):
"""Load data from the sampled `UPrime2Mean` and `RMeanXX` and
return it as a pandas `DataFrame`.
"""
z_R = float(z_R)
df = pd.DataFrame()
timedirs = os.listdir("postProcessing/sets")
latest_time = max(timedirs)
fname_u = "{}_{}_UPrime2Mean.csv".format(turbine, z_R)
fname_k = "{}_{}_kMean_RMeanXX.csv".format(turbine, z_R)
dfi = pd.read_csv(os.path.join("postProcessing", "sets", latest_time,
fname_u))
df["y_R"] = dfi.y/R[turbine]
df["upup_resolved"] = dfi.UPrime2Mean_0
dfi = pd.read_csv(os.path.join("postProcessing", "sets", latest_time,
fname_k))
df["upup_modeled"] = dfi.RMeanXX
df["upup_total"] = df.upup_modeled + df.upup_resolved
return df
def load_perf(turbine="turbine1", verbose=True):
"""Load turbine performance data."""
df = pd.read_csv("postProcessing/turbines/0/{}.csv".format(turbine))
df = df.drop_duplicates("time", keep="last")
t1 = df.time.iloc[len(df.time)//2]
if verbose:
print("{} performance from {:.1f}--{:.1f} seconds:".format(
turbine, t1, df.time.max()))
print("Mean TSR = {:.2f}".format(df.tsr[df.time >= t1].mean()))
print("Mean C_P = {:.2f}".format(df.cp[df.time >= t1].mean()))
print("Mean C_D = {:.2f}".format(df.cd[df.time >= t1].mean()))
return df
def calc_perf(t1=1.0):
"""Calculate the performance of both turbines. Return NaN if turbine is
not active.
"""
df1 = pd.read_csv("postProcessing/turbines/0/turbine1.csv")
df2 = pd.read_csv("postProcessing/turbines/0/turbine2.csv")
df1 = df1.drop_duplicates("time", keep="last")
df2 = df2.drop_duplicates("time", keep="last")
df1 = df1[df1.time >= t1]
df2 = df2[df2.time >= t1]
return {"tsr_turbine1": df1.tsr.mean(),
"cp_turbine1": df1.cp.mean(),
"cd_turbine1": df1.cd.mean(),
"tsr_turbine2": df2.tsr.mean(),
"cp_turbine2": df2.cp.mean(),
"cd_turbine2": df2.cd.mean()}
def load_exp_perf(turbine="turbine1", quantity="cp"):
"""Load experimental performance data from Pierella et al. (2014)"""
fpath = "processed/Pierella2014/{}_{}.csv".format(turbine, quantity)
df = pd.read_csv(fpath, skipinitialspace=True)
# Correct for the fact experimental results use nominal dimensions
df.tsr *= R[turbine]/R["nominal"]
df[quantity] *= A["nominal"]/A[turbine]
return df
def load_vel_probes():
"""Load data from velocity probes."""
fpath = "postProcessing/probes/0/U"
def func():
with open(fpath) as f:
for line in f.readlines():
yield line.replace(")", "").replace("(", "")
data = np.genfromtxt(func())
ncols = data.shape[1]
df = pd.DataFrame(data=data, columns=["time", "u", "v", "w"])
df = df.set_index("time")
df["flow_angle"] = np.degrees(np.tan(df.v / df.u))
df["wind_speed"] = (df.u**2 + df.v**2)**0.5
return df
def rotate_vector(v, rad):
"""Rotate a 2-D vector by rad radians."""
dc, ds = np.cos(rad), np.sin(rad)
x, y = v[0], v[1]
x, y = dc*x - ds*y, ds*x + dc*y
return np.array((x, y))
def load_nacelle_sets():
d = sorted(glob.glob("postProcessing/sets/*"))[-1]
fpath = os.path.join(d, "nacelle_UMean.csv")
df = pd.read_csv(fpath)
df["vel_mag"] = (df.UMean_0**2 + df.UMean_1**2 + df.UMean_2**2)**0.5
df["vel_dir"] = np.degrees(np.arctan2(df.UMean_1, df.UMean_0))
return df.drop_duplicates().reset_index(drop=True)
| mit |
saltastro/saltefficiency | weekly/weekly_summary_plots.py | 1 | 8636 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 10:06:20 2015
@author: jpk
ToDo:
*automate the subsystems check. A query that checks all the subsystems in
case things change in the future should prevent issues with the pie chart
colours.
* Need to add global variables for the password.
* Need to property parse commandline options
"""
import sys
import os
import pandas as pd
import pandas.io.sql as psql
import MySQLdb
import matplotlib.pyplot as pl
import report_queries as rq
import numpy as np
import matplotlib.dates as mdates
def priority_breakdown_pie_chart(x, ds, dirname='./logs/'):
'''
make a pie chart from the dataframe
'''
temp = list(x['Priority'])
no_blocks = map(int, list(x['No. Blocks']))
labels = ['P'+str(temp[i])+' - ' + str(no_blocks[i]) for i in range(0,len(temp))]
values = list(x['Tsec'])
# set colours for the priorities
colours = ['b','c','g','m','r']
fig = pl.figure(facecolor='w', figsize=[6, 6])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.7), fontsize=8)
title_txt = 'Weekly Priority Breakdown - ' + str(int(x['No. Blocks'].sum())) + ' Blocks Total' + '\n {}'.format(ds)
ax.set_title(title_txt, fontsize=12)
filename = dirname+'priority_breakdown_pie_chart_' +'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(filename, dpi=100)
# pl.show()
def weekly_total_time_breakdown_pie_chart(x, ds, dirname='./logs/'):
labels = ['Science - {}'.format(x['ScienceTime'][0]),
'Engineering - {}'.format(x['EngineeringTime'][0]),
'Weather - {}'.format(x['TimeLostToWeather'][0]),
'Problems - {}'.format(x['TimeLostToProblems'][0])]
values = [int(x['Science']),
int(x['Engineering']),
int(x['Weather']),
int(x['Problems'])]
colours = ['b','c','g','r']
fig = pl.figure(facecolor='w', figsize=[6, 6])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.8), fontsize=8)
title_txt = 'Weekly Time Breakdown - {} Total\n{}'.format(x['NightLength'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'total_time_breakdown_pie_chart_' + '-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_subsystem_breakdown_pie_chart(x, y, col_dict, ds, dirname='./logs/'):
subsystem = list(x['SaltSubsystem'])
time = list(x['TotalTime'])
labels = [subsystem[i] + ' - ' + time[i] for i in range(0,len(subsystem))]
values = list(x['Time'])
colours = [col_dict[i] for i in subsystem]
fig = pl.figure(facecolor='w', figsize=[6, 6])
ax = fig.add_subplot(111)
ax.set_aspect=0.8
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.9,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'k'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.5), fontsize=8)
title_txt = 'Weekly Problems Breakdown - {}\n{}'.format(y['TotalTime'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'subsystem_breakdown_pie_chart_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_time_breakdown(x, ds, dirname='./logs/'):
'''
produce a bar stacked bar chart plot of the time breakdown per day for the
past week.
'''
fig = pl.figure(figsize=(10,4),facecolor='w')
ax = fig.add_subplot(111)
width = 1.0
ax.grid(which='major', axis='y')
# science time per day
s = ax.bar(x['Date'],
x['Science'],
width,
color = 'b',
edgecolor='none')
# engineering time per day
e = ax.bar(x['Date'],
x['Engineering'],
width,
bottom = x['Science'],
color = 'c',
edgecolor='none')
# weather time per day
w = ax.bar(x['Date'],
x['Weather'],
width,
bottom = x['Science'] + x['Engineering'],
color = 'g',
edgecolor='none')
# problem time per day
p = ax.bar(x['Date'],
x['Problems'],
width,
bottom = x['Science'] + x['Engineering'] + x['Weather'],
color = 'r',
edgecolor='none')
ax.set_ylabel('Hours', fontsize=11)
ax.set_xlabel('Date', fontsize=11)
fig.legend((s[0], e[0], w[0], p[0]),
('Science Time',
'Engineering Time',
'Time lost to Weather',
'Time lost to Problems'),
frameon=False,
fontsize=10,
loc=(0.0,0.70))
title_txt = 'Weekly Time Breakdown - {}'.format(ds)
ax.set_title(title_txt, fontsize=11)
ax.xaxis_date()
date_formatter = mdates.DateFormatter('%a \n %Y-%m-%d')
ax.xaxis.set_major_formatter(date_formatter)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
fig.autofmt_xdate(rotation=0, ha = 'left')
fig.subplots_adjust(left=0.22, bottom=0.20, right=0.96, top=None,
wspace=None, hspace=None)
pl.autoscale()
filename = 'time_breakdown_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
if __name__=='__main__':
# set the colours for all the subsystems:
subsystems_list = ['BMS', 'DOME', 'TC', 'PMAS', 'SCAM', 'TCS', 'STRUCT',
'TPC', 'HRS', 'PFIS','Proposal', 'Operations',
'ELS', 'ESKOM']
cmap = pl.cm.jet
colour_map = cmap(np.linspace(0.0, 1.0, len(subsystems_list)))
col_dict = {}
for i in range(0, len(subsystems_list)):
col_dict[subsystems_list[i]] = colour_map[i]
# open mysql connection to the sdb
mysql_con = MySQLdb.connect(host='sdb.cape.saao.ac.za',
port=3306,user=os.environ['SDBUSER'],
passwd=os.environ['SDBPASS'], db='sdb')
obsdate = sys.argv[1]
date = '{}-{}-{}'.format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
interval = sys.argv[2]
# use the connection to get the required data: _d
dr_d = rq.date_range(mysql_con, date, interval=interval)
wpb_d = rq.weekly_priority_breakdown(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
wttb_d = rq.weekly_total_time_breakdown(mysql_con, date, interval=interval)
wsb_d = rq.weekly_subsystem_breakdown(mysql_con, date, interval=interval)
wsbt_d = rq.weekly_subsystem_breakdown_total(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
date_string = '{} - {}'.format(dr_d['StartDate'][0], dr_d['EndDate'][0])
# testing the pie_chart method
priority_breakdown_pie_chart(wpb_d, date_string,'')
weekly_total_time_breakdown_pie_chart(wttb_d, date_string,'')
weekly_subsystem_breakdown_pie_chart(wsb_d, wsbt_d, col_dict, date_string,'')
weekly_time_breakdown(wtb_d, date_string,'')
mysql_con.close()
| bsd-3-clause |
pprett/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
brodoll/sms-tools | software/models_interface/sineModel_function.py | 21 | 2749 | # function to call the main analysis/synthesis functions in software/models/sineModel.py
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sineModel as SM
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80, minSineDur=0.02,
maxnSines=150, freqDevOffset=10, freqDevSlope=0.001):
"""
Perform analysis/synthesis using the sinusoidal model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# analyze the sound with the sinusoidal model
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
# synthesize the output sound from the sinusoidal representation
y = SM.sineModelSynth(tfreq, tmag, tphase, Ns, H, fs)
# output sound file name
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sineModel.wav'
# write the synthesized sound obtained from the sinusoidal synthesis
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the sinusoidal frequencies
plt.subplot(3,1,2)
if (tfreq.shape[1] > 0):
numFrames = tfreq.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
tfreq[tfreq<=0] = np.nan
plt.plot(frmTime, tfreq)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of sinusoidal tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/pylab_examples/transoffset.py | 13 | 1666 | #!/usr/bin/env python
'''
This illustrates the use of transforms.offset_copy to
make a transform that positions a drawing element such as
a text string at a specified offset in screen coordinates
(dots or inches) relative to a location given in any
coordinates.
Every Artist--the mpl class from which classes such as
Text and Line are derived--has a transform that can be
set when the Artist is created, such as by the corresponding
pylab command. By default this is usually the Axes.transData
transform, going from data units to screen dots. We can
use the offset_copy function to make a modified copy of
this transform, where the modification consists of an
offset.
'''
import pylab as P
from matplotlib.transforms import offset_copy
X = P.arange(7)
Y = X**2
fig = P.figure(figsize=(5,10))
ax = P.subplot(2,1,1)
# If we want the same offset for each text instance,
# we only need to make one transform. To get the
# transform argument to offset_copy, we need to make the axes
# first; the subplot command above is one way to do this.
transOffset = offset_copy(ax.transData, fig=fig,
x = 0.05, y=0.10, units='inches')
for x, y in zip(X, Y):
P.plot((x,),(y,), 'ro')
P.text(x, y, '%d, %d' % (int(x),int(y)), transform=transOffset)
# offset_copy works for polar plots also.
ax = P.subplot(2,1,2, polar=True)
transOffset = offset_copy(ax.transData, fig=fig, y = 6, units='dots')
for x, y in zip(X, Y):
P.polar((x,),(y,), 'ro')
P.text(x, y, '%d, %d' % (int(x),int(y)),
transform=transOffset,
horizontalalignment='center',
verticalalignment='bottom')
P.show()
| mit |
bloyl/mne-python | tutorials/intro/40_sensor_locations.py | 4 | 14370 | """
.. _tut-sensor-locations:
Working with sensor locations
=============================
This tutorial describes how to read and plot sensor locations, and how
the physical location of sensors is handled in MNE-Python.
As usual we'll start by importing the modules we need and loading some
:ref:`example data <sample-dataset>`:
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, preload=True, verbose=False)
###############################################################################
# About montages and layouts
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`Montages <mne.channels.DigMontage>` contain sensor
# positions in 3D (``x``, ``y``, ``z``, in meters), and can be used to set
# the physical positions of sensors. By specifying the location of sensors
# relative to the brain, :class:`Montages <mne.channels.DigMontage>` play an
# important role in computing the forward solution and computing inverse
# estimates.
#
# In contrast, :class:`Layouts <mne.channels.Layout>` are *idealized* 2-D
# representations of sensor positions, and are primarily used for arranging
# individual sensor subplots in a topoplot, or for showing the *approximate*
# relative arrangement of sensors as seen from above.
#
# Working with built-in montages
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The 3D coordinates of MEG sensors are included in the raw recordings from MEG
# systems, and are automatically stored in the ``info`` attribute of the
# :class:`~mne.io.Raw` file upon loading. EEG electrode locations are much more
# variable because of differences in head shape. Idealized montages for many
# EEG systems are included during MNE-Python installation; these files are
# stored in your ``mne-python`` directory, in the
# :file:`mne/channels/data/montages` folder:
montage_dir = os.path.join(os.path.dirname(mne.__file__),
'channels', 'data', 'montages')
print('\nBUILT-IN MONTAGE FILES')
print('======================')
print(sorted(os.listdir(montage_dir)))
###############################################################################
# .. sidebar:: Computing sensor locations
#
# If you are interested in how standard ("idealized") EEG sensor positions
# are computed on a spherical head model, the `eeg_positions`_ repository
# provides code and documentation to this end.
#
# These built-in EEG montages can be loaded via
# :func:`mne.channels.make_standard_montage`. Note that when loading via
# :func:`~mne.channels.make_standard_montage`, provide the filename *without*
# its file extension:
ten_twenty_montage = mne.channels.make_standard_montage('standard_1020')
print(ten_twenty_montage)
###############################################################################
# Once loaded, a montage can be applied to data via one of the instance methods
# such as :meth:`raw.set_montage <mne.io.Raw.set_montage>`. It is also possible
# to skip the loading step by passing the filename string directly to the
# :meth:`~mne.io.Raw.set_montage` method. This won't work with our sample
# data, because it's channel names don't match the channel names in the
# standard 10-20 montage, so these commands are not run here:
# these will be equivalent:
# raw_1020 = raw.copy().set_montage(ten_twenty_montage)
# raw_1020 = raw.copy().set_montage('standard_1020')
###############################################################################
# :class:`Montage <mne.channels.DigMontage>` objects have a
# :meth:`~mne.channels.DigMontage.plot` method for visualization of the sensor
# locations in 3D; 2D projections are also possible by passing
# ``kind='topomap'``:
fig = ten_twenty_montage.plot(kind='3d')
fig.gca().view_init(azim=70, elev=15)
ten_twenty_montage.plot(kind='topomap', show_names=False)
###############################################################################
# .. _control-chan-projection:
#
# Controlling channel projection (MNE vs EEGLAB)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Channel positions in 2d space are obtained by projecting their actual 3d
# positions using a sphere as a reference. Because ``'standard_1020'`` montage
# contains realistic, not spherical, channel positions, we will use a different
# montage to demonstrate controlling how channels are projected to 2d space.
biosemi_montage = mne.channels.make_standard_montage('biosemi64')
biosemi_montage.plot(show_names=False)
###############################################################################
# By default a sphere with an origin in ``(0, 0, 0)`` x, y, z coordinates and
# radius of ``0.095`` meters (9.5 cm) is used. You can use a different sphere
# radius by passing a single value to ``sphere`` argument in any function that
# plots channels in 2d (like :meth:`~mne.channels.DigMontage.plot` that we use
# here, but also for example :func:`mne.viz.plot_topomap`):
biosemi_montage.plot(show_names=False, sphere=0.07)
###############################################################################
# To control not only radius, but also the sphere origin, pass a
# ``(x, y, z, radius)`` tuple to ``sphere`` argument:
biosemi_montage.plot(show_names=False, sphere=(0.03, 0.02, 0.01, 0.075))
###############################################################################
# In mne-python the head center and therefore the sphere center are calculated
# using :term:`fiducial points <fiducial>`.
# Because of this the head circle represents head
# circumference at the nasion and ear level, and not where it is commonly
# measured in 10-20 EEG system: above nasion at T4/T8, T3/T7, Oz, Fz level.
# Notice below that by default T7 and Oz channels are placed within the head
# circle, not on the head outline:
biosemi_montage.plot()
###############################################################################
# If you have previous EEGLAB experience you may prefer its convention to
# represent 10-20 head circumference with the head circle. To get EEGLAB-like
# channel layout you would have to move the sphere origin a few centimeters
# up on the z dimension:
biosemi_montage.plot(sphere=(0, 0, 0.035, 0.094))
###############################################################################
# Instead of approximating the EEGLAB-esque sphere location as above, you can
# calculate the sphere origin from position of Oz, Fpz, T3/T7 or T4/T8
# channels. This is easier once the montage has been applied to the data and
# channel positions are in the head space - see
# :ref:`this example <ex-topomap-eeglab-style>`.
###############################################################################
# .. _reading-dig-montages:
#
# Reading sensor digitization files
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In the sample data, setting the digitized EEG montage was done prior to
# saving the :class:`~mne.io.Raw` object to disk, so the sensor positions are
# already incorporated into the ``info`` attribute of the :class:`~mne.io.Raw`
# object (see the documentation of the reading functions and
# :meth:`~mne.io.Raw.set_montage` for details on how that works). Because of
# that, we can plot sensor locations directly from the :class:`~mne.io.Raw`
# object using the :meth:`~mne.io.Raw.plot_sensors` method, which provides
# similar functionality to
# :meth:`montage.plot() <mne.channels.DigMontage.plot>`.
# :meth:`~mne.io.Raw.plot_sensors` also allows channel selection by type, can
# color-code channels in various ways (by default, channels listed in
# ``raw.info['bads']`` will be plotted in red), and allows drawing into an
# existing matplotlib ``axes`` object (so the channel positions can easily be
# made as a subplot in a multi-panel figure):
# sphinx_gallery_thumbnail_number = 8
fig = plt.figure()
ax2d = fig.add_subplot(121)
ax3d = fig.add_subplot(122, projection='3d')
raw.plot_sensors(ch_type='eeg', axes=ax2d)
raw.plot_sensors(ch_type='eeg', axes=ax3d, kind='3d')
ax3d.view_init(azim=70, elev=15)
###############################################################################
# It's probably evident from the 2D topomap above that there is some
# irregularity in the EEG sensor positions in the :ref:`sample dataset
# <sample-dataset>` — this is because the sensor positions in that dataset are
# digitizations of the sensor positions on an actual subject's head, rather
# than idealized sensor positions based on a spherical head model. Depending on
# what system was used to digitize the electrode positions (e.g., a Polhemus
# Fastrak digitizer), you must use different montage reading functions (see
# :ref:`dig-formats`). The resulting :class:`montage <mne.channels.DigMontage>`
# can then be added to :class:`~mne.io.Raw` objects by passing it to the
# :meth:`~mne.io.Raw.set_montage` method (just as we did above with the name of
# the idealized montage ``'standard_1020'``). Once loaded, locations can be
# plotted with :meth:`~mne.channels.DigMontage.plot` and saved with
# :meth:`~mne.channels.DigMontage.save`, like when working with a standard
# montage.
#
# .. note::
#
# When setting a montage with :meth:`~mne.io.Raw.set_montage`
# the measurement info is updated in two places (the ``chs``
# and ``dig`` entries are updated). See :ref:`tut-info-class`.
# ``dig`` may contain HPI, fiducial, or head shape points in
# addition to electrode locations.
#
#
# Rendering sensor position with mayavi
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# It is also possible to render an image of a MEG sensor helmet in 3D, using
# mayavi instead of matplotlib, by calling :func:`mne.viz.plot_alignment`
fig = mne.viz.plot_alignment(raw.info, trans=None, dig=False, eeg=False,
surfaces=[], meg=['helmet', 'sensors'],
coord_frame='meg')
mne.viz.set_3d_view(fig, azimuth=50, elevation=90, distance=0.5)
###############################################################################
# :func:`~mne.viz.plot_alignment` requires an :class:`~mne.Info` object, and
# can also render MRI surfaces of the scalp, skull, and brain (by passing
# keywords like ``'head'``, ``'outer_skull'``, or ``'brain'`` to the
# ``surfaces`` parameter) making it useful for :ref:`assessing coordinate frame
# transformations <plot_source_alignment>`. For examples of various uses of
# :func:`~mne.viz.plot_alignment`, see :ref:`plot_montage`,
# :ref:`ex-eeg-on-scalp`, and :ref:`ex-plot-meg-sensors`.
#
#
# Working with layout files
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As with montages, many layout files are included during MNE-Python
# installation, and are stored in the :file:`mne/channels/data/layouts` folder:
layout_dir = os.path.join(os.path.dirname(mne.__file__),
'channels', 'data', 'layouts')
print('\nBUILT-IN LAYOUT FILES')
print('=====================')
print(sorted(os.listdir(layout_dir)))
###############################################################################
# You may have noticed that the file formats and filename extensions of the
# built-in layout and montage files vary considerably. This reflects different
# manufacturers' conventions; to make loading easier the montage and layout
# loading functions in MNE-Python take the filename *without its extension* so
# you don't have to keep track of which file format is used by which
# manufacturer.
#
# To load a layout file, use the :func:`mne.channels.read_layout` function, and
# provide the filename *without* its file extension. You can then visualize the
# layout using its :meth:`~mne.channels.Layout.plot` method, or (equivalently)
# by passing it to :func:`mne.viz.plot_layout`:
biosemi_layout = mne.channels.read_layout('biosemi')
biosemi_layout.plot() # same result as: mne.viz.plot_layout(biosemi_layout)
###############################################################################
# Similar to the ``picks`` argument for selecting channels from
# :class:`~mne.io.Raw` objects, the :meth:`~mne.channels.Layout.plot` method of
# :class:`~mne.channels.Layout` objects also has a ``picks`` argument. However,
# because layouts only contain information about sensor name and location (not
# sensor type), the :meth:`~mne.channels.Layout.plot` method only allows
# picking channels by index (not by name or by type). Here we find the indices
# we want using :func:`numpy.where`; selection by name or type is possible via
# :func:`mne.pick_channels` or :func:`mne.pick_types`.
midline = np.where([name.endswith('z') for name in biosemi_layout.names])[0]
biosemi_layout.plot(picks=midline)
###############################################################################
# If you're working with a :class:`~mne.io.Raw` object that already has sensor
# positions incorporated, you can create a :class:`~mne.channels.Layout` object
# with either the :func:`mne.channels.make_eeg_layout` function or
# (equivalently) the :func:`mne.channels.find_layout` function.
layout_from_raw = mne.channels.make_eeg_layout(raw.info)
# same result as: mne.channels.find_layout(raw.info, ch_type='eeg')
layout_from_raw.plot()
###############################################################################
# .. note::
#
# There is no corresponding ``make_meg_layout`` function because sensor
# locations are fixed in a MEG system (unlike in EEG, where the sensor caps
# deform to fit each subject's head). Thus MEG layouts are consistent for a
# given system and you can simply load them with
# :func:`mne.channels.read_layout`, or use :func:`mne.channels.find_layout`
# with the ``ch_type`` parameter, as shown above for EEG.
#
# All :class:`~mne.channels.Layout` objects have a
# :meth:`~mne.channels.Layout.save` method that allows writing layouts to disk,
# in either :file:`.lout` or :file:`.lay` format (which format gets written is
# inferred from the file extension you pass to the method's ``fname``
# parameter). The choice between :file:`.lout` and :file:`.lay` format only
# matters if you need to load the layout file in some other software
# (MNE-Python can read either format equally well).
#
#
# .. LINKS
#
# .. _`eeg_positions`: https://github.com/sappelhoff/eeg_positions
| bsd-3-clause |
pelson/cartopy | lib/cartopy/mpl/patch.py | 1 | 8401 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Provide shapely geometry <-> matplotlib path support.
See also `Shapely Geometric Objects <see_also_shapely>`_
and `Matplotlib Path API <http://matplotlib.org/api/path_api.html>`_.
.. see_also_shapely:
http://toblerity.org/shapely/manual.html#geometric-objects
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import matplotlib
from matplotlib.path import Path
import shapely.geometry as sgeom
def geos_to_path(shape):
"""
Create a list of :class:`matplotlib.path.Path` objects that describe
a shape.
Parameters
----------
shape
A list, tuple or single instance of any of the following
types: :class:`shapely.geometry.point.Point`,
:class:`shapely.geometry.linestring.LineString`,
:class:`shapely.geometry.linestring.LinearRing`,
:class:`shapely.geometry.polygon.Polygon`,
:class:`shapely.geometry.multipoint.MultiPoint`,
:class:`shapely.geometry.multipolygon.MultiPolygon`,
:class:`shapely.geometry.multilinestring.MultiLineString`,
:class:`shapely.geometry.collection.GeometryCollection`,
or any type with a _as_mpl_path() method.
Returns
-------
paths
A list of :class:`matplotlib.path.Path` objects.
"""
if isinstance(shape, (list, tuple)):
paths = []
for shp in shape:
paths.extend(geos_to_path(shp))
return paths
if isinstance(shape, sgeom.LinearRing):
return [Path(np.column_stack(shape.xy), closed=True)]
elif isinstance(shape, (sgeom.LineString, sgeom.Point)):
return [Path(np.column_stack(shape.xy))]
elif isinstance(shape, sgeom.Polygon):
def poly_codes(poly):
codes = np.ones(len(poly.xy[0])) * Path.LINETO
codes[0] = Path.MOVETO
codes[-1] = Path.CLOSEPOLY
return codes
if shape.is_empty:
return []
vertices = np.concatenate([np.array(shape.exterior.xy)] +
[np.array(ring.xy) for ring in
shape.interiors], 1).T
codes = np.concatenate([poly_codes(shape.exterior)] +
[poly_codes(ring) for ring in shape.interiors])
return [Path(vertices, codes)]
elif isinstance(shape, (sgeom.MultiPolygon, sgeom.GeometryCollection,
sgeom.MultiLineString, sgeom.MultiPoint)):
paths = []
for geom in shape.geoms:
paths.extend(geos_to_path(geom))
return paths
elif hasattr(shape, '_as_mpl_path'):
vertices, codes = shape._as_mpl_path()
return [Path(vertices, codes)]
else:
raise ValueError('Unsupported shape type {}.'.format(type(shape)))
def path_segments(path, **kwargs):
"""
Create an array of vertices and a corresponding array of codes from a
:class:`matplotlib.path.Path`.
Parameters
----------
path
A :class:`matplotlib.path.Path` instance.
Other Parameters
----------------
kwargs
See :func:`matplotlib.path.iter_segments` for details of the keyword
arguments.
Returns
-------
vertices, codes
A (vertices, codes) tuple, where vertices is a numpy array of
coordinates, and codes is a numpy array of matplotlib path codes.
See :class:`matplotlib.path.Path` for information on the types of
codes and their meanings.
"""
pth = path.cleaned(**kwargs)
return pth.vertices[:-1, :], pth.codes[:-1]
def path_to_geos(path, force_ccw=False):
"""
Create a list of Shapely geometric objects from a
:class:`matplotlib.path.Path`.
Parameters
----------
path
A :class:`matplotlib.path.Path` instance.
Other Parameters
----------------
force_ccw
Boolean flag determining whether the path can be inverted to enforce
ccw. Defaults to False.
Returns
-------
A list of instances of the following type(s):
:class:`shapely.geometry.polygon.Polygon`,
:class:`shapely.geometry.linestring.LineString` and/or
:class:`shapely.geometry.multilinestring.MultiLineString`.
"""
# Convert path into numpy array of vertices (and associated codes)
path_verts, path_codes = path_segments(path, curves=False)
# Split into subarrays such that each subarray consists of connected
# line segments based on the start of each one being marked by a
# matplotlib MOVETO code.
verts_split_inds = np.where(path_codes == Path.MOVETO)[0]
verts_split = np.split(path_verts, verts_split_inds)
codes_split = np.split(path_codes, verts_split_inds)
# Iterate through the vertices generating a list of
# (external_geom, [internal_polygons]) tuples.
other_result_geoms = []
collection = []
for path_verts, path_codes in zip(verts_split, codes_split):
if len(path_verts) == 0:
continue
verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :],
axis=1)
if all(verts_same_as_first):
geom = sgeom.Point(path_verts[0, :])
elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY:
geom = sgeom.Polygon(path_verts[:-1, :])
elif (matplotlib.__version__ < '2.2.0' and
# XXX A path can be given which does not end with close poly,
# in that situation, we have to guess?
path_verts.shape[0] > 3 and verts_same_as_first[-1]):
geom = sgeom.Polygon(path_verts)
else:
geom = sgeom.LineString(path_verts)
# If geom is a Polygon and is contained within the last geom in
# collection, add it to its list of internal polygons, otherwise
# simply append it as a new external geom.
if geom.is_empty:
pass
elif (len(collection) > 0 and
isinstance(collection[-1][0], sgeom.Polygon) and
isinstance(geom, sgeom.Polygon) and
collection[-1][0].contains(geom.exterior)):
collection[-1][1].append(geom.exterior)
elif isinstance(geom, sgeom.Point):
other_result_geoms.append(geom)
else:
collection.append((geom, []))
# Convert each (external_geom, [internal_polygons]) pair into a
# a shapely Polygon that encapsulates the internal polygons, if the
# external geom is a LineString leave it alone.
geom_collection = []
for external_geom, internal_polys in collection:
if internal_polys:
# XXX worry about islands within lakes
geom = sgeom.Polygon(external_geom.exterior, internal_polys)
else:
geom = external_geom
# Correctly orientate the polygon (ccw)
if isinstance(geom, sgeom.Polygon):
if force_ccw and not geom.exterior.is_ccw:
geom = sgeom.polygon.orient(geom)
geom_collection.append(geom)
# If the geom_collection only contains LineStrings combine them
# into a single MultiLinestring.
if geom_collection and all(isinstance(geom, sgeom.LineString) for
geom in geom_collection):
geom_collection = [sgeom.MultiLineString(geom_collection)]
# Remove any zero area Polygons
def not_zero_poly(geom):
return ((isinstance(geom, sgeom.Polygon) and not geom._is_empty and
geom.area != 0) or
not isinstance(geom, sgeom.Polygon))
result = list(filter(not_zero_poly, geom_collection))
return result + other_result_geoms
| lgpl-3.0 |
mhvk/astropy | astropy/timeseries/sampled.py | 5 | 16159 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from astropy.table import groups, QTable, Table
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity, UnitsError
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns
__all__ = ['TimeSeries']
@autocheck_required_columns
class TimeSeries(BaseTimeSeries):
"""
A class to represent time series data in tabular form.
`~astropy.timeseries.TimeSeries` provides a class for representing time
series as a collection of values of different quantities measured at specific
points in time (for time series with finite time bins, see the
`~astropy.timeseries.BinnedTimeSeries` class).
`~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable`
and thus provides all the standard table maniplation methods available to
tables, but it also provides additional conveniences for dealing with time
series, such as a flexible initializer for setting up the times, a method
for folding time series, and a ``time`` attribute for easy access to the
time values.
See also: https://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional
Data to initialize time series. This does not need to contain the times,
which can be provided separately, but if it does contain the times they
should be in a column called ``'time'`` to be automatically recognized.
time : `~astropy.time.Time`, `~astropy.time.TimeDelta` or iterable
The times at which the values are sampled - this can be either given
directly as a `~astropy.time.Time` or `~astropy.time.TimeDelta` array
or as any iterable that initializes the `~astropy.time.Time` class. If
this is given, then the remaining time-related arguments should not be used.
time_start : `~astropy.time.Time` or str
The time of the first sample in the time series. This is an alternative
to providing ``time`` and requires that ``time_delta`` is also provided.
time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity` ['time']
The step size in time for the series. This can either be a scalar if
the time series is evenly sampled, or an array of values if it is not.
n_samples : int
The number of time samples for the series. This is only used if both
``time_start`` and ``time_delta`` are provided and are scalar values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ['time']
def __init__(self, data=None, *, time=None, time_start=None,
time_delta=None, n_samples=None, **kwargs):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if data is None and time is None and time_start is None and time_delta is None:
self._required_columns_relax = True
return
# First if time has been given in the table data, we should extract it
# and treat it as if it had been passed as a keyword argument.
if data is not None:
if n_samples is not None:
if n_samples != len(self):
raise TypeError("'n_samples' has been given both and it is not the "
"same length as the input data.")
else:
n_samples = len(self)
if 'time' in self.colnames:
if time is None:
time = self.columns['time']
else:
raise TypeError("'time' has been given both in the table and as a keyword argument")
if time is None and time_start is None:
raise TypeError("Either 'time' or 'time_start' should be specified")
elif time is not None and time_start is not None:
raise TypeError("Cannot specify both 'time' and 'time_start'")
if time is not None and not isinstance(time, (Time, TimeDelta)):
time = Time(time)
if time_start is not None and not isinstance(time_start, (Time, TimeDelta)):
time_start = Time(time_start)
if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)):
raise TypeError("'time_delta' should be a Quantity or a TimeDelta")
if isinstance(time_delta, TimeDelta):
time_delta = time_delta.sec * u.s
if time_start is not None:
# We interpret this as meaning that time is that of the first
# sample and that the interval is given by time_delta.
if time_delta is None:
raise TypeError("'time' is scalar, so 'time_delta' is required")
if time_delta.isscalar:
time_delta = np.repeat(time_delta, n_samples)
time_delta = np.cumsum(time_delta)
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0. * u.s
time = time_start + time_delta
elif len(self.colnames) > 0 and len(time) != len(self):
raise ValueError("Length of 'time' ({}) should match "
"data length ({})".format(len(time), n_samples))
elif time_delta is not None:
raise TypeError("'time_delta' should not be specified since "
"'time' is an array")
with self._delay_required_column_checks():
if 'time' in self.colnames:
self.remove_column('time')
self.add_column(time, index=0, name='time')
@property
def time(self):
"""
The time values.
"""
return self['time']
@deprecated_renamed_argument('midpoint_epoch', 'epoch_time', '4.0')
def fold(self, period=None, epoch_time=None, epoch_phase=0,
wrap_phase=None, normalize_phase=False):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity` ['time']
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
if not isinstance(period, Quantity) or period.unit.physical_type != 'time':
raise UnitsError('period should be a Quantity in units of time')
folded = self.copy()
if epoch_time is None:
epoch_time = self.time[0]
else:
epoch_time = Time(epoch_time)
period_sec = period.to_value(u.s)
if normalize_phase:
if isinstance(epoch_phase, Quantity) and epoch_phase.unit.physical_type != 'dimensionless':
raise UnitsError('epoch_phase should be a dimensionless Quantity '
'or a float when normalize_phase=True')
epoch_phase_sec = epoch_phase * period_sec
else:
if epoch_phase == 0:
epoch_phase_sec = 0.
else:
if not isinstance(epoch_phase, Quantity) or epoch_phase.unit.physical_type != 'time':
raise UnitsError('epoch_phase should be a Quantity in units '
'of time when normalize_phase=False')
epoch_phase_sec = epoch_phase.to_value(u.s)
if wrap_phase is None:
wrap_phase = period_sec / 2
else:
if normalize_phase:
if isinstance(wrap_phase, Quantity) and not wrap_phase.unit.is_equivalent(u.one):
raise UnitsError('wrap_phase should be dimensionless when '
'normalize_phase=True')
else:
if wrap_phase < 0 or wrap_phase > 1:
raise ValueError('wrap_phase should be between 0 and 1')
else:
wrap_phase = wrap_phase * period_sec
else:
if isinstance(wrap_phase, Quantity) and wrap_phase.unit.physical_type == 'time':
if wrap_phase < 0 or wrap_phase > period:
raise ValueError('wrap_phase should be between 0 and the period')
else:
wrap_phase = wrap_phase.to_value(u.s)
else:
raise UnitsError('wrap_phase should be a Quantity in units '
'of time when normalize_phase=False')
relative_time_sec = (((self.time - epoch_time).sec
+ epoch_phase_sec
+ (period_sec - wrap_phase)) % period_sec
- (period_sec - wrap_phase))
folded_time = TimeDelta(relative_time_sec * u.s)
if normalize_phase:
folded_time = (folded_time / period).decompose()
period = period_sec = 1
with folded._delay_required_column_checks():
folded.remove_column('time')
folded.add_column(folded_time, name='time', index=0)
return folded
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if 'time' not in item:
out = QTable([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
return super().__getitem__(item)
def add_column(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_column`.
"""
# Note that the docstring is inherited from QTable
result = super().add_column(*args, **kwargs)
if len(self.indices) == 0 and 'time' in self.colnames:
self.add_index('time')
return result
def add_columns(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_columns`.
"""
# Note that the docstring is inherited from QTable
result = super().add_columns(*args, **kwargs)
if len(self.indices) == 0 and 'time' in self.colnames:
self.add_index('time')
return result
@classmethod
def from_pandas(self, df, time_scale='utc'):
"""
Convert a :class:`~pandas.DataFrame` to a
:class:`astropy.timeseries.TimeSeries`.
Parameters
----------
df : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance.
time_scale : str
The time scale to pass into `astropy.time.Time`.
Defaults to ``UTC``.
"""
from pandas import DataFrame, DatetimeIndex
if not isinstance(df, DataFrame):
raise TypeError("Input should be a pandas DataFrame")
if not isinstance(df.index, DatetimeIndex):
raise TypeError("DataFrame does not have a DatetimeIndex")
time = Time(df.index, scale=time_scale)
table = Table.from_pandas(df)
return TimeSeries(time=time, data=table)
def to_pandas(self):
"""
Convert this :class:`~astropy.timeseries.TimeSeries` to a
:class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
"""
return Table(self).to_pandas(index='time')
@classmethod
def read(self, filename, time_column=None, time_format=None, time_scale=None, format=None, *args, **kwargs):
"""
Read and parse a file and returns a `astropy.timeseries.TimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(https://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.TimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv',
... time_column='date') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_column : str, optional
The name of the time column.
time_format : str, optional
The time format for the time column.
time_scale : str, optional
The time scale for the time column.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.sampled.TimeSeries`
TimeSeries corresponding to file contents.
Notes
-----
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, format=format, *args, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_column is None:
raise ValueError("``time_column`` should be provided since the default Table readers are being used.")
table = Table.read(filename, format=format, *args, **kwargs)
if time_column in table.colnames:
time = Time(table.columns[time_column], scale=time_scale, format=time_format)
table.remove_column(time_column)
else:
raise ValueError(f"Time column '{time_column}' not found in the input data.")
return TimeSeries(time=time, data=table)
| bsd-3-clause |
jefffohl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/text.py | 69 | 55366 | """
Classes for including text in a figure.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import artist
from artist import Artist
from cbook import is_string_like, maxdict
from font_manager import FontProperties
from patches import bbox_artist, YAArrow, FancyBboxPatch, \
FancyArrowPatch, Rectangle
import transforms as mtransforms
from transforms import Affine2D, Bbox
from lines import Line2D
import matplotlib.nxutils as nxutils
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
if rotation in ('horizontal', None):
angle = 0.
elif rotation == 'vertical':
angle = 90.
else:
angle = float(rotation)
return angle%360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Text'] = """
========================== =========================================================================
Property Value
========================== =========================================================================
alpha float
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a pad in points
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family [ 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties instance
horizontalalignment or ha [ 'center' | 'right' | 'left' ]
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string eg, ['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
size or fontsize [ size in points | relative size eg 'smaller', 'x-large' ]
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
variant [ 'normal' | 'small-caps' ]
verticalalignment or va [ 'center' | 'top' | 'bottom' | 'baseline' ]
visible [True | False]
weight or fontweight [ 'normal' | 'bold' | 'heavy' | 'light' | 'ultrabold' | 'ultralight']
x float
y float
zorder any number
========================== =========================================================================
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = text.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(-theta)
for t, wh, x, y in text._get_layout(renderer)[1]:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
xt2, yt2 = xt1+w, yt1+h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
def __str__(self):
return "Text(%g,%g,%s)"%(self._y,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='bottom',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self.cached = maxdict(5)
self._x, self._y = x, y
if color is None: color = rcParams['text.color']
if fontproperties is None: fontproperties=FontProperties()
elif is_string_like(fontproperties): fontproperties=FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox = None
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.update(kwargs)
#self.set_bbox(dict(pad=0))
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible() or self._renderer is None:
return False,{}
l,b,w,h = self.get_window_extent().bounds
r = l+w
t = b+h
xyverts = (l,b), (l, t), (r, t), (r, b)
x, y = mouseevent.x, mouseevent.y
inside = nxutils.pnpoly(x, y, xyverts)
return inside,{}
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_position()
return self.get_transform().transform_point((x,y))
def _get_multialignment(self):
if self._multialignment is not None: return self._multialignment
else: return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
def _get_layout(self, renderer):
key = self.get_prop_tup()
if key in self.cached: return self.cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self._text.split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, heightt, bl = renderer.get_text_width_height_descent(
'lp', self._fontproperties, ismath=False)
offsety = heightt * self._linespacing
baseline = None
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
w, h, d = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
if baseline is None:
baseline = h - d
whs[i] = w, h
horizLayout[i] = thisx, thisy, w, h
thisy -= offsety
width = max(width, w)
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax-ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines)>1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width/2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the targe position offset the display bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign=='center': offsetx = (xmin + width/2.0)
elif halign=='right': offsetx = (xmin + width)
else: offsetx = xmin
if valign=='center': offsety = (ymin + height/2.0)
elif valign=='top': offsety = (ymin + height)
elif valign=='baseline': offsety = (ymin + height) - baseline
else: offsety = ymin
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, zip(lines, whs, xs, ys)
self.cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a rectangle, eg facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
If rectprops has "boxstyle" key. A FancyBboxPatch
is initialized with rectprops and will be drawn. The mutation
scale of the FancyBboxPath is set to the fontsize.
ACCEPTS: rectangle prop dict
"""
# The self._bbox_patch object is created only if rectprops has
# boxstyle key. Otherwise, self._bbox will be set to the
# rectprops and the bbox will be drawn using bbox_artist
# function. This is to keep the backward compatibility.
if rectprops is not None and "boxstyle" in rectprops:
props = rectprops.copy()
boxstyle = props.pop("boxstyle")
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch((0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
self._bbox = None
else:
self._bbox_patch = None
self._bbox = rectprops
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
# For arrow_patch, use textbox as patchA by default.
if not isinstance(self.arrow_patch, FancyArrowPatch):
return
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
#self._bbox_patch.draw(renderer)
else:
props = self._bbox
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = self.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
self.arrow_patch.set_patchA(r)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBoxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
if self._text=='': return
bbox, info = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if self._bbox_patch:
self._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(self._color)
gc.set_alpha(self._alpha)
gc.set_url(self._url)
if self.get_clip_on():
gc.set_clip_rectangle(self.clipbox)
if self._bbox:
bbox_artist(self, renderer, self._bbox)
angle = self.get_rotation()
if rcParams['text.usetex']:
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle)
return
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath)
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
x, y = self.get_position()
return (x, y, self._text, self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties), self._rotation,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible(): return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self._text == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx,ty,0,0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info = self._get_layout(self._renderer)
x, y = self.get_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
ACCEPTS: any matplotlib color
"""
if self._bbox is None:
self._bbox = dict(facecolor=color, edgecolor=color)
else:
self._bbox.update(dict(facecolor=color))
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._horizontalalignment = align
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._multialignment = align
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
"""
self._fontproperties.set_family(fontname)
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' % str(legal))
self._verticalalignment = align
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
artist.kwdocd['Text'] = artist.kwdoc(Text)
Text.__init__.im_func.__doc__ = cbook.dedent(Text.__init__.__doc__) % artist.kwdocd
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (I.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)"%(self._x,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength, self._dashdirection, self._dashrotation, self._dashpad, self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi*(angle/180.0+dashdirection-1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy+dashpush*cd
c2 = cxy+(dashpush+dashlength)*cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta/cos_theta
dx = w
dy = w*tan_theta
if dy > h or dy < -h:
dy = h
dx = h/tan_theta
cwd = np.array([dx, dy])/2
cwd *= 1+dashpad/np.sqrt(np.dot(cwd,cwd))
cw = c2+(dashdirection*2-1)*cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation == None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
artist.kwdocd['TextWithDash'] = artist.kwdoc(TextWithDash)
class Annotation(Text):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
"""
def __str__(self):
return "Annotation(%g,%g,%s)"%(self.xy[0],self.xy[1],repr(self._text))
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
**kwargs):
"""
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a FancyArrowPatch
instance is created with the given dictionary and is
drawn. Otherwise, a YAArow patch instance is created and
drawn. Valid keys for YAArow are
========= =============================================================
Key Description
========= =============================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from the
endpoints. ie, ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= =============================================================
Valid keys for FancyArrowPatch are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. Eg::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
Additional kwargs are Text properties:
%(Text)s
"""
if xytext is None:
xytext = xy
if textcoords is None:
textcoords = xycoords
# we'll draw ourself after the artist we annotate by default
x,y = self.xytext = xytext
Text.__init__(self, x, y, s, **kwargs)
self.xy = xy
self.xycoords = xycoords
self.textcoords = textcoords
self.arrowprops = arrowprops
self.arrow = None
if arrowprops and arrowprops.has_key("arrowstyle"):
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1,1),
**arrowprops)
else:
self.arrow_patch = None
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def contains(self,event):
t,tinfo = Text.contains(self,event)
if self.arrow is not None:
a,ainfo=self.arrow.contains(event)
t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t,tinfo
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def _get_xy(self, x, y, s):
if s=='data':
trans = self.axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s=='offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi/72.
y *= dpi/72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s=='polar':
theta, r = x, y
x = r*np.cos(theta)
y = r*np.sin(theta)
trans = self.axes.transData
return trans.transform_point((x,y))
elif s=='figure points':
#points from the lower left corner of the figure
dpi = self.figure.dpi
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
x *= dpi/72.
y *= dpi/72.
if x<0:
x = r + x
if y<0:
y = t + y
return x,y
elif s=='figure pixels':
#pixels from the lower left corner of the figure
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
if y<0:
y = t + y
return x, y
elif s=='figure fraction':
#(0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x,y))
elif s=='axes points':
#points from the lower left corner of the axes
dpi = self.figure.dpi
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x*dpi/72.
else:
x = l + x*dpi/72.
if y<0:
y = t + y*dpi/72.
else:
y = b + y*dpi/72.
return x, y
elif s=='axes pixels':
#pixels from the lower left corner of the axes
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
else:
x = l + x
if y<0:
y = t + y
else:
y = b + y
return x, y
elif s=='axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = self.axes.transAxes
return trans.transform_point((x, y))
def update_positions(self, renderer):
x, y = self.xytext
self._x, self._y = self._get_xy(x, y, self.textcoords)
x, y = self.xy
x, y = self._get_xy(x, y, self.xycoords)
ox0, oy0 = self._x, self._y
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
l,b,w,h = self.get_window_extent(renderer).bounds
r = l+w
t = b+h
xc = 0.5*(l+r)
yc = 0.5*(b+t)
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Otherwise, fallback to YAArrow.
#if d.has_key("arrowstyle"):
if self.arrow_patch:
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = self.get_window_extent(renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1,oy1))
mutation_scale = d.pop("mutation_scale", self.get_size())
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
if self._bbox_patch:
patchA = d.pop("patchA", self._bbox_patch)
self.arrow_patch.set_patchA(patchA)
else:
patchA = d.pop("patchA", self._bbox)
self.arrow_patch.set_patchA(patchA)
else:
# pick the x,y corner of the text bbox closest to point
# annotated
dsu = [(abs(val-x0), val) for val in l, r, xc]
dsu.sort()
_, x = dsu[0]
dsu = [(abs(val-y0), val) for val in b, t, yc]
dsu.sort()
_, y = dsu[0]
shrink = d.pop('shrink', 0.0)
theta = math.atan2(y-y0, x-x0)
r = math.sqrt((y-y0)**2. + (x-x0)**2.)
dx = shrink*r*math.cos(theta)
dy = shrink*r*math.sin(theta)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
frac = d.pop('frac', 0.1)
self.arrow = YAArrow(self.figure, (x0+dx,y0+dy), (x-dx, y-dy),
width=width, headwidth=headwidth, frac=frac,
**d)
self.arrow.set_clip_box(self.get_clip_box())
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow is not None:
if self.arrow.figure is None and self.figure is not None:
self.arrow.figure = self.figure
self.arrow.draw(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
Text.draw(self, renderer)
artist.kwdocd['Annotation'] = Annotation.__init__.__doc__
| gpl-3.0 |
alutu/revelio | Revelio-parser/run_cgndetect_v5.5.py | 1 | 22476 | #!/usr/bin/env python
"""
Parse the raw data from Revelio tests running on SamKnows testbeds:
Data model of the Raw Revelio results:
"unit_id","dtime","local_ip","upnp_wan_ip","stun_mapped","hairpin","packet_size","traceroute","successes","failures","location_id"
Before running this script on the results, clean the results file using the following awk command:
$$$ more revelio_results.csv | grep "traceroute to " | awk -F "\",\"" '{OFS = ";"; $1=$1}{print $0}' | awk -F "\"" '{OFS = ""; $1=$1}{print $0}' > input_data_file
--first line is the traceroute to the mapped address (using 100 bytes packets)
142156;2014-05-03 00:39:56;lo:127.0.0.1,br-lan:192.168.1.211,br-lan:1:10.98.11.62,;upnp 86.135.103.84;stun 86.135.103.84:52792;no hairpin;100;traceroute to 86.135.103.84 (86.135.103.84), 30 hops max, 100 byte packets| 1 86.135.103.84 1.160 ms|;NULL;NULL;NULL;NULL;NULL;NULL;NULL;NULL;NULL;1;0;674854
--the rest of the lines are traceroutes to 4.69.202.89, using 21 different packet sizes
142156;2014-05-03 00:39:56;lo:127.0.0.1,br-lan:192.168.1.211,br-lan:1:10.98.11.62,;upnp 86.135.103.84;stun 86.135.103.84:52792;no hairpin;120;traceroute to 4.69.202.89 (4.69.202.89), 30 hops max, 120 byte packets| 1 192.168.1.254 0.940 ms| 2 *| 3 *| 4 213.120.158.157 18.829 ms| 5 212.140.235.138 20.919 ms| 6 217.41.169.195 20.947 ms| 7 217.41.169.109 21.340 ms| 8 109.159.251.225 20.546 ms| 9 109.159.251.143 32.544 ms|10 62.6.200.181 27.337 ms|11 166.49.168.49 30.949 ms|12 212.187.201.125 36.151 ms|13 4.69.202.181 27.340 ms|14 4.69.159.82 35.751 ms|15 4.69.161.82 37.731 ms|16 4.69.202.89 39.354 ms|;NULL;NULL;NULL;NULL;NULL;NULL;NULL;NULL;NULL;1;0;674854
This script also receives as input the metadata from SamKnows on the ISP identity, location etc.
"""
### in version 5.4 we address the numerous inconsistencies that we findin the data: e.g., some probes do not run all the 21 acket sizes
import sys, re
import netaddr
from netaddr import *
from pandas import *
import pandas
from scipy import *
from numpy import *
from matplotlib import *
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.stats import linregress
#import rpy2.rpy_classic as rpy
#from rpy import r -- gives problems with R3.3, use instead statsmodels
import statsmodels.api as sm
from math import *
dslite = IPNetwork("192.0.0.0/29")
sharedIP = IPNetwork("100.64.0.0/10")
private1 = IPNetwork("192.168.0.0/16")
private2 = IPNetwork("10.0.0.0/8")
private3 = IPNetwork("172.16.0.0/12")
class ABLine2D(plt.Line2D):
"""
Draw a line based on its slope and y-intercept. Keyword arguments are
passed to the <matplotlib.lines.Line2D> constructor.
"""
def __init__(self,slope,intercept,**kwargs):
# get current axes if user has not specified them
ax = kwargs.pop('axes',plt.gca())
# if unspecified, get the line color from the axes
if not (kwargs.has_key('color') or kwargs.has_key('c')):
kwargs.update({'color':ax._get_lines.color_cycle.next()})
# init the line, add it to the axes
super(ABLine2D,self).__init__([None],[None],**kwargs)
self._slope = slope
self._intercept = intercept
ax.add_line(self)
# cache the renderer, draw the line for the first time
ax.figure.canvas.draw()
self._update_lim(None)
# connect to axis callbacks
self.axes.callbacks.connect('xlim_changed',self._update_lim)
self.axes.callbacks.connect('ylim_changed',self._update_lim)
def _update_lim(self,event):
""" called whenever axis x/y limits change """
x = np.array(self.axes.get_xbound())
y = (self._slope*x)+self._intercept
self.set_data(x,y)
self.axes.draw_artist(self)
def uniq(lst):
# unique elements with preserving the order of appearence
checked = []
for e in lst:
ttl = e.split(",")[1]
hop = e.split(",")[0]
if hop != "*":
if (hop, ttl) not in checked:
checked.append((hop, ttl))
return checked
"""
get the per-hop RTTs only from the traceroute results
traceroute to 4.69.202.89 (4.69.202.89), 30 hops max, 184 byte packets| 1 192.168.1.254 0.689 ms| 2 *| 3 *| 4 213.120.158.157 16.400 ms| 5 212.140.235.138 20.949 ms| 6 217.41.169.195 20.943 ms| 7 217.41.169.109 21.357 ms| 8 109.159.251.201 20.988 ms| 9 109.159.251.17 27.337 ms|10 62.6.200.181 27.327 ms|11 166.49.168.49 27.746 ms|12 212.187.201.125 37.351 ms|13 4.69.202.181 30.153 ms|14 4.69.159.90 36.982 ms|15 4.69.161.86 33.348 ms|16 4.69.202.89 43.316 ms|
"""
def get_RTTs(trace):
RTTs = dict()
hops = [] # if we want to
tr = trace.split("|")
for hop in tr[1:-1]:
result = hop.split()
if len(result) == 1:
return 0
#break
if result[1] != "*":
RTTs[result[0]] = result[2]
else:
RTTs[result[0]] = "0"
return RTTs
def get_hopIPs(trace):
hopIPs = dict()
hops = [] # if we want to
tr = trace.split("|")
for hop in tr[1:-1]:
result = hop.split()
if len(result)==1:
return 0
#break
if result[1] != "*":
hopIPs[result[0]] = result[1]
else:
hopIPs[result[0]] = "*"
return hopIPs
def rtt_func(x, ibw, lat):
return x*ibw + lat
def is_private(address):
#address = IPAddress(ip)
if address in private1:
return 1
elif address in private2:
return 1
elif address in private3:
return 1
else:
return -1
def stun_trace(stun, tracert):
for (hop, ttl) in tracert:
if (stun == hop):
return 1 # no CGN
return -1 # maybe CGN
try:
print sys.argv
## before running this script on the results, clean the results file using the following awk command:
## more curr_cgnatdetect.csv | grep "traceroute to " | awk -F "\",\"" '{OFS = ";"; $1=$1}{print $0}' | awk -F "\"" '{OFS = ""; $1=$1}{print $0}' > input_data_file
### output fields in the <res_out> file that stores the final version of the parsed results
### unitID;ISP;CC;STUN_MA;UPNP;HAIRPIN;ACCESS-LINK;hops-to-MA;timeout-MA
input_data= sys.argv[1]
print " CGNDETECT data: "+ input_data
metadata = sys.argv[2]
print " Unit metadata: "+ metadata
output = sys.argv[3]
print " Output file: " + output
res_out = sys.argv[4]
print " Res-out file: " + res_out
except:
print "Usage: run_cgndetect.py <input_data_file> <input_metadata_file> <output_file> <res-out file>"
sys.exit(0)
def main():
data = open(input_data, 'r')
out = open(output, 'w+') # output file where we write the "parsed" version of the raw data we get from the probes
# we have here the following fields: Probe_ID;ISP;CC;UPNP;STUN;Hairpin;time;packet_size;TTL;hopIP;RTT
meta = open(metadata, 'r') # metadata about the probes that ran the REVELIO tests
res_file = open(res_out, 'a+') # results file, where we have the actual result of the pathchar algorithm, but NOT the final decision
# (write and then ) run an additional script to verify the rules we have in the REVELIO flowchart
upnp_wan = dict()
cpe = dict()
stun = dict()
hairpin = dict()
traceroute_rtt = dict()
traceroute_ips = dict()
failed = []
# read the metadata for the probes running the tests
ISP = dict()
country = dict()
for line in meta:
#print str(line)
info = line.strip().split(",")
unit = str(info[0].split()[0])
isp = info[6]
if unit not in ISP:
ISP[unit] = isp
if unit not in country:
country[unit] = info[1]
#"unit_id","dtime","local_ip","upnp_wan_ip","stun_mapped","hairpin","packet_size","traceroute","successes","failures","location_id"
for line in data:
#try:
if "unit_id" in str(line):
continue
info = line.strip().split(";")
unit = info[0]
time = info[1]
ifs = info[2]
localIPs = ifs.split(",")
localIFs = dict()
for one in localIPs:
temp = one.split(":")
if(len(temp)==2):
localIFs[temp[0]] = temp[1]
#else:
# localIFs[temp[:-1]] = temp[-1]
if "br-lan" in localIFs:
cpe[unit, time] = localIFs["br-lan"]
elif "eth0" in localIFs:
cpe[unit, time] = localIFs["eth0"]
else:
cpe[unit, time] = "127.0.0.1"
if (unit, time) not in traceroute_rtt:
upnp_wan[unit, time] = ""
stun[unit, time] = ""
hairpin[unit, time] = "0"
traceroute_rtt[unit, time] = dict()
traceroute_ips[unit, time] = dict()
### get upnp address
if "upnp" in info[3]: #upnp test outputed something or ""
upnp_wan[unit, time] = info[3].split(" ")[1]
else:
upnp_wan[unit, time] = info[3]
### get stun mapped address
if "stun" in info[4]:
stun[unit, time] = info[4].split(" ")[1].split(":")[0]
else:
stun[unit, time] = "stun_failed"
### get hairpin TTL value
if info[5] == "no hairpin":
hairpin[unit, time] = 0
elif "TTL" in info[5]:
hairpin[unit, time] = info[5].split(":")[1]
### get the RTTs from the traceroute measuremeants
packet_size = info[6]
if get_RTTs(info[7]) != 0:
traceroute_rtt[unit, time][packet_size] = get_RTTs(info[7])
traceroute_ips[unit, time][packet_size] = get_hopIPs(info[7])
else:
print "Traceroute failed: unit " + str(unit) + ", time " + str(time) + ", packet_size " + str(packet_size)
else:
packet_size = info[6]
if get_RTTs(info[7]) != 0:
traceroute_rtt[unit, time][packet_size] = get_RTTs(info[7])
traceroute_ips[unit, time][packet_size] = get_hopIPs(info[7])
else:
print "Traceroute failed: unit " + str(unit) + ", time " + str(time) + ", packet_size " + str(packet_size)
### output the dataframe we will work with and then read it into a pandas.DataFrame
out.write('Probe_ID;ISP;CC;UPNP;STUN;Hairpin;time;packet_size;TTL;hopIP;RTT\n')
for (unit, time) in traceroute_rtt:
for packet_size in traceroute_rtt[unit,time]:
#print "writing to output file: " + str(unit) + " " + str(time) + " " +str(packet_size)
if str(unit) not in ISP:
ISP[str(unit)] = "N/A"
if unit not in country:
country[unit] = "N/A"
for ttl in traceroute_rtt[unit, time][packet_size]:
out.write(str(unit) + ";" + str(ISP[unit]) + ";" + str(country[str(unit)]) + ";" + str(upnp_wan[unit, time]) + ";" + str(stun[unit, time]) + ";" + str(hairpin[unit, time]) + ";" + str(time) + ";" + str(packet_size) + ";" + str(ttl) + ";" + str(traceroute_ips[unit, time][packet_size][ttl]) + ";" + str(traceroute_rtt[unit, time][packet_size][ttl]) + "\n")
out.close()
data = read_csv(output, sep = ";")
del upnp_wan, stun, hairpin, traceroute_ips, traceroute_rtt
### perform statistical analysis for the RTTs from traceroutes [PATHCHAR]
#perform the following per probe and then per TTL:
#1) minimum filtering
#2) curve-fitting
#3) differencing to find the BW and latency values on each link
SORTTs = dict()
slope_intercept = dict()
links = dict()
hops_to_MA = dict()
timeout_MA = dict()
upnp_wan = dict() # the set of WAN-IPs observed over the time of the analysis
stun = dict() # the set of STUN mapped addresses observed over the time of the analysis
hairpin = dict()
trace_hop = []
cascade_nat = dict() # number of private IPs which appear consequtively in the traceroute to the fixed address in Level3
for probe in data['Probe_ID'].unique():
### plt.figure(1)
probe_data = data[data['Probe_ID']==probe] # select the results only from whitebox "probe"
print "Processing results for Whitebox with unique ID " + str(probe)
# get some information on the whiteboxes: country and ISP
upnp_wan[probe] = []
stun[probe] = []
hairpin[probe] = []
# count the number of hops until the STUN-MA for every time the test ran
hops_to_MA[probe] = set()
timeout_MA[probe] = set()
trace_ma = probe_data[probe_data['packet_size'] == 100]
for t in trace_ma['time'].unique():
timeout_MA[probe].add(len(trace_ma[trace_ma['time'] == t]['TTL']))
if "*" in trace_ma[trace_ma['time'] == t]['hopIP'].unique():
hops_to_MA[probe].add(len(trace_ma[trace_ma['time'] == t]['hopIP'].unique()) - 1)
else:
hops_to_MA[probe].add(len(trace_ma[trace_ma['time'] == t]['hopIP'].unique()))
# output only one value per probe for the following: STUN-MA; UPNP-WAN; HAIRPIN
upnp_wan[probe] = probe_data['UPNP'].unique()
stun[probe] = probe_data['STUN'].unique()
hairpin[probe] = probe_data['Hairpin'].unique()
# continue with pathchar
print "TTL vector: " + str(sort(probe_data['TTL'].unique()))
for ttl in list(sort(probe_data['TTL'].unique())): # separate TTL: process one TTL value at a time
ttl_probe_data = probe_data[probe_data['TTL']==ttl] # select all the traceroute result for all packet lengths with TTL = ttl
SORTTs[probe, ttl]= dict()
print " \nRunning pathchar for link [TTL]" + str(ttl)
#1)
for packet_size in ttl_probe_data['packet_size'].unique():
if packet_size >100 : # exclude the traceroute to the STUN mapped address which is being done with packet_size = 100
SORTTs[probe, ttl][packet_size] = ttl_probe_data[ttl_probe_data['packet_size'] == packet_size]['RTT'].min()
# we can also use the second percentile .quantile(0.02) instead of the minimum .min()
#print "Percentile: " + str(percentile(ttl_probe_data[ttl_probe_data['packet_size'] == packet_size]['RTT'], 2))
#print "Quantile: " + str(ttl_probe_data[ttl_probe_data['packet_size'] == packet_size]['RTT'].quantile(0.02))
else:
continue # replace this with counting the number of hops to the STUN mapped address
# which is further needed to run the CGN detection
#2)
# normally, we should have input for 21 different packet sizes, check that we do, otherwise discard since the fitting cannot be done
print "Number of packet sizes tested: " + str(len(zip([packet_size for packet_size in SORTTs[probe, ttl]], [SORTTs[probe, ttl][packet_size] for packet_size in SORTTs[probe, ttl]])))
if len(zip([packet_size for packet_size in SORTTs[probe, ttl]], [SORTTs[probe, ttl][packet_size] for packet_size in SORTTs[probe, ttl]])) >= 19 :
probe_ttl_df = DataFrame(SORTTs[probe, ttl].items(), columns = ['packet_size', 'min_rtt'])
##print "Data Frame empty: " + str(len(probe_ttl_df.as_matrix()))
# check that we do have data to work with
if len(probe_ttl_df.as_matrix()) > 1:
linear_model = sm.RLM.from_formula("min_rtt ~ packet_size", data = probe_ttl_df, M=sm.robust.norms.LeastSquares())
#linear_model = sm.RLM(probe_ttl_df['min_rtt'], probe_ttl_df['packet_size'], M=sm.robust.norms.LeastSquares)
res = linear_model.fit()
try:
print res.summary()
except:
print "Error here!!!"
slope_intercept[probe, ttl] = [res.params['packet_size'], res.params['Intercept']]
if probe not in links:
links[probe] = []
links[probe].append(ttl)
else:
links[probe].append(ttl)
else:
print " No input for pathchar"
###plt.plot(probe_ttl_df['packet_size'], probe_ttl_df['min_rtt'], 'o', color = 'k')
###plt.plot(probe_ttl_df['packet_size'], res.fittedvalues, '-');
#ABLine2D(res.params['packet_size'], res.params['Intercept'])
else:
print " Not enough input to run pathchar: hop did not reply to traceroute"
slope_intercept[probe, ttl] = [0, 0]
### plt.xlabel("Packet Size [bytes]")
### plt.ylabel("RTT [ms]")
### plt.title("PathChar: Curve Fitting for Probe " + str(probe))
### plt.show()
#3)
bw_lat = dict()
for probe in data['Probe_ID'].unique():
bw_lat[probe] = dict()
if slope_intercept[probe, 1][0]>0: ### why for some we don't have values in the slope-intercept?
### to fix this, we added the probes with not enough data to run pathchar in the slope_intercept dict
### we conrol for values of 0
bw = 8/(1000*slope_intercept[probe, 1][0])
else:
bw = 0
if slope_intercept[probe, 1][1]>0:
lat = slope_intercept[probe, 1][1]/2
else:
lat = 0
bw_lat[probe][1] = [bw, lat] # values for TTL = 1 --> the first link
print "Differentiating to obtain BW and LAT estimates for probe " + str(probe)
print "TTL vector: " + str(sort(data[data['Probe_ID'] == probe]['TTL'].unique()))
print " Link 1: BW [Mb] , LAT[ms]: " + str(bw_lat[probe][1])
if probe in links:
for ttl in list(links[probe]):
if ttl+1 < len(list(links[probe])):
#add condition here to take only the non-zero values of the RTT
if slope_intercept[probe, ttl+1][0] == 0 or slope_intercept[probe, ttl+1][0] == 'nan':
slope_intercept[probe, ttl+1][0] = slope_intercept[probe, ttl][0]
if slope_intercept[probe, ttl+1][0] <= slope_intercept[probe, ttl][0]:
try:
if (slope_intercept[probe, ttl][0] - slope_intercept[probe, ttl+1][0])/slope_intercept[probe, ttl][0] < 0.5:
bw = bw_lat[probe][ttl][0]
else:
bw = 8/(1000*(slope_intercept[probe, ttl+1][0] - slope_intercept[probe, ttl][0]))
except:
bw = 0
else:
bw = 8/(1000*(slope_intercept[probe, ttl+1][0] - slope_intercept[probe, ttl][0]))
if slope_intercept[probe, ttl +1][1] == 0 or slope_intercept[probe, ttl +1][1] == 'nan':
slope_intercept[probe, ttl +1][1] = slope_intercept[probe, ttl][1]
if slope_intercept[probe, ttl +1][1] <= slope_intercept[probe, ttl][1]:
try:
if (slope_intercept[probe, ttl][1] - slope_intercept[probe, ttl+1][1])/slope_intercept[probe, ttl][1] < 0.5:
lat = bw_lat[probe][ttl][1]
else:
lat = (slope_intercept[probe, ttl +1][1] - slope_intercept[probe, ttl][1])/2
except:
lat = 0
else:
lat = (slope_intercept[probe, ttl +1][1] - slope_intercept[probe, ttl][1])/2
bw_lat[probe][ttl+1] = [bw, lat]
print " Link " + str(ttl+1) + ": BW [Mb] , LAT[ms]: " + str(bw_lat[probe][ttl +1])
#4) Detect the access link and the location of the SK Whitebox
access_link = dict()
for probe in bw_lat:
for ttl in bw_lat[probe]:
try:
if ttl > 1 and ttl+1 in bw_lat[probe]:
print "TTL:" + str(ttl) + " for probe " + str(probe)
print "LATENCY: " + str(bw_lat[probe][ttl][1]) + " previous TTL: " + str(bw_lat[probe][ttl-1][1])
if ceil(log10(bw_lat[probe][ttl][1])) - ceil(log10(bw_lat[probe][ttl-1][1])) >=1: # --> this is the difference is order of magnitute
# if this difference is higher or equal than 1 --> access link detected!
print "Access link detected for probe " + str(probe) + ": link " + str(ttl)
access_link[probe] = ttl
break
#if bw_lat[probe][ttl][1] >= 3* bw_lat[probe][ttl-1][1] and bw_lat[probe][ttl][1] >= bw_lat[probe][ttl+1][1]:
#print "Access link detected for probe " + str(probe) + ": link " + str(ttl)
#access_link[probe] = ttl
#break
elif ttl+1 not in bw_lat[probe]:
print "Access link detected for probe " + str(probe) + ": cannot detect"
access_link[probe] = 0
except:
print "Access link detected for probe " + str(probe) + ": cannot detect"
access_link[probe] = 0
### output the parsed Revelio output from the whiteboxes, including the output from the pathchar
### output fields:
### unitID;ISP;CC;STUN_MA;UPNP;HAIRPIN;ACCESS-LINK;hops-to-MA;timeout-MA
#TODO: add here the fields on the presence of shared IP addresses and the private IP address
for unit in access_link:
print "Unit number to res_file: " + str(unit)
if str(unit) not in ISP:
ISP[str(unit)] = "N/A"
if unit not in country:
country[unit] = "N/A"
try:
res_file.write(str(unit) + ";" + str(ISP[str(unit)]) + ";" + str(country[str(unit)]) + ";" + " ".join([str(x) for x in stun[unit]]) + ";" + " ".join([str(x) for x in upnp_wan[unit]]) + ";" + " ".join([str(x) for x in hairpin[unit]]) + ";" + str(access_link[unit]) + ";" + str(list(hops_to_MA[unit])) + ";" + str(list(timeout_MA[unit])) + "\n")
except:
#continue
if unit not in country:
country[str(unit)] = "N/A"
ISP[str(unit)] = "N/A"
res_file.write(str(unit) + ";" + str(ISP[str(unit)]) + ";" + str(country[str(unit)]) + ";" + " ".join([str(x) for x in stun[unit]]) + ";" + " ".join([str(x) for x in upnp_wan[unit]]) + ";" + " ".join([str(x) for x in hairpin[unit]]) + ";" + str(access_link[unit]) + ";" + str(list(hops_to_MA[unit])) + ";" + str(list(timeout_MA[unit])) + "\n")
res_file.close()
if __name__ == '__main__': #only when run from cmd line
main()
| gpl-2.0 |
ovaskevich/PyLaTeX | setup.py | 2 | 3715 | try:
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.egg_info import egg_info
except ImportError:
from distutils.core import setup
import sys
import os
import subprocess
import errno
if sys.version_info[:2] <= (2, 6):
raise RuntimeError(
"You're using Python <= 2.6, but this package requires either Python "
"2.7, or 3.3 or above, so you can't use it unless you upgrade your "
"Python version."
)
dependencies = ['ordered-set']
extras = {
'docs': ['sphinx'],
'matrices': ['numpy'],
'matplotlib': ['matplotlib'],
'quantities': ['quantities', 'numpy'],
'testing': ['flake8', 'pep8-naming', 'flake8_docstrings', 'nose'],
'convert_to_py2': ['3to2', 'future'],
}
if sys.version_info[0] == 3:
source_dir = '.'
else:
source_dir = 'python2_source'
dependencies.append('future')
PY2_CONVERTED = False
extras['all'] = list(set([req for reqs in extras.values() for req in reqs]))
# Automatically convert the source from Python 3 to Python 2 if we need to.
class CustomInstall(install):
def run(self):
convert_to_py2()
install.run(self)
class CustomEggInfo(egg_info):
def initialize_options(self):
convert_to_py2()
egg_info.initialize_options(self)
def convert_to_py2():
if source_dir == 'python2_source' and not PY2_CONVERTED:
try:
# Check if 3to2 exists
subprocess.check_output(['3to2', '--help'])
subprocess.check_output(['pasteurize', '--help'])
except OSError as e:
if e.errno != errno.ENOENT:
raise e
if not os.path.exists(os.path.join(source_dir, 'pylatex')):
raise ImportError('3to2 and future need to be installed '
'before installing when PyLaTeX for Python '
'2.7 when it is not installed using one of '
'the pip releases.')
else:
converter = os.path.dirname(os.path.realpath(__file__)) \
+ '/convert_to_py2.sh'
subprocess.check_call([converter])
global PY2_CONVERTED
PY2_CONVERTED = True
setup(name='PyLaTeX',
version='1.0.0',
author='Jelte Fennema',
author_email='[email protected]',
description='A Python library for creating LaTeX files and snippets',
long_description=open('README.rst').read(),
package_dir={'': source_dir},
packages=['pylatex', 'pylatex.base_classes'],
url='https://github.com/JelteF/PyLaTeX',
license='MIT',
install_requires=dependencies,
extras_require=extras,
cmdclass={
'install': CustomInstall,
'egg_info': CustomEggInfo,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Code Generators',
'Topic :: Text Processing :: Markup :: LaTeX',
]
)
| mit |
nmayorov/scikit-learn | examples/decomposition/plot_sparse_coding.py | 27 | 4037 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| bsd-3-clause |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/cluster/tests/test_affinity_propagation.py | 2 | 1596 | """
Testing for Clustering methods
"""
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from sklearn.cluster.affinity_propagation_ import AffinityPropagation, \
affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
"""Affinity Propagation algorithm
"""
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(S,
preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
| agpl-3.0 |
ian-r-rose/burnman | misc/benchmarks/chemical_potential_benchmarks.py | 1 | 3982 | from __future__ import absolute_import
# Benchmarks for the chemical potential functions
import os.path
import sys
sys.path.insert(1, os.path.abspath('../..'))
import numpy as np
import matplotlib.pyplot as plt
import burnman
from burnman.processchemistry import *
from burnman.chemicalpotentials import *
import burnman.constants as constants
atomic_masses = read_masses()
class Re (burnman.Mineral):
def __init__(self):
formula = 'Re1.0'
formula = dictionarize_formula(formula)
self.params = {
'name': 'Re',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': 0.0,
'S_0': 36.53,
'V_0': 8.862e-06,
'Cp': [23.7, 0.005448, 68.0, 0.0],
'a_0': 1.9e-05,
'K_0': 3.6e+11,
'Kprime_0': 4.05,
'Kdprime_0': -1.1e-11,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula, atomic_masses)}
burnman.Mineral.__init__(self)
class ReO2 (burnman.Mineral):
def __init__(self):
formula = 'Re1.0O2.0'
formula = dictionarize_formula(formula)
self.params = {
'name': 'ReO2',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -445140.0,
'S_0': 47.82,
'V_0': 1.8779e-05,
'Cp': [76.89, 0.00993, -1207130.0, -208.0],
'a_0': 4.4e-05,
'K_0': 1.8e+11,
'Kprime_0': 4.05,
'Kdprime_0': -2.25e-11,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula, atomic_masses)}
burnman.Mineral.__init__(self)
'''
Here we find the oxygen fugacity of the FMQ assemblage
and also the Re-ReO2 buffer
Fugacity is often defined relative to a material at
some fixed reference pressure
Here we use room pressure, 100 kPa
'''
fa = burnman.minerals.HP_2011_ds62.fa()
mt = burnman.minerals.HP_2011_ds62.mt()
qtz = burnman.minerals.HP_2011_ds62.q()
FMQ = [fa, mt, qtz]
oxygen = burnman.minerals.HP_2011_fluids.O2()
rhenium = Re()
rheniumIVoxide = ReO2()
ReReO2buffer = [rhenium, rheniumIVoxide]
Pr = 1.e5
temperatures = np.linspace(900., 1420., 100)
log10fO2_FMQ_ONeill1987 = np.empty_like(temperatures)
log10fO2_FMQ = np.empty_like(temperatures)
invT = np.empty_like(temperatures)
P = 1.e5
for i, T in enumerate(temperatures):
oxygen.set_state(Pr, T)
for mineral in FMQ:
mineral.set_state(P, T)
for mineral in ReReO2buffer:
mineral.set_state(P, T)
muO2_FMQ_ONeill1987 = -587474. + 1584.427 * \
T - 203.3164 * T * np.log(T) + 0.092710 * T * T
log10fO2_FMQ_ONeill1987[i] = np.log10(
np.exp((muO2_FMQ_ONeill1987) / (constants.gas_constant * T)))
invT[i] = 10000. / (T)
log10fO2_FMQ[i] = np.log10(fugacity(oxygen, FMQ))
plt.plot(temperatures, log10fO2_FMQ_ONeill1987,
'k', linewidth=3., label='FMQ (O\'Neill (1987)')
plt.plot(temperatures, log10fO2_FMQ, 'b--',
linewidth=3., label='FMQ (HP 2011 ds62)')
temperatures = np.linspace(850., 1250., 100)
log10fO2_Re_PO1994 = np.empty_like(temperatures)
log10fO2_ReReO2buffer = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
oxygen.set_state(Pr, T)
for mineral in FMQ:
mineral.set_state(P, T)
for mineral in ReReO2buffer:
mineral.set_state(P, T)
muO2_Re_PO1994 = -451020 + 297.595 * T - 14.6585 * T * np.log(T)
log10fO2_Re_PO1994[i] = np.log10(
np.exp((muO2_Re_PO1994) / (constants.gas_constant * T)))
invT[i] = 10000. / (T)
log10fO2_ReReO2buffer[i] = np.log10(fugacity(oxygen, ReReO2buffer))
plt.plot(temperatures, log10fO2_Re_PO1994, 'k',
linewidth=3., label='Re-ReO2 (Pownceby and O\'Neill (1994)')
plt.plot(temperatures, log10fO2_ReReO2buffer, 'r--',
linewidth=3., label='Re-ReO2 (HP 2011 ds62)')
plt.ylabel("log_10 (fO2)")
plt.xlabel("T (K)")
plt.legend(loc='lower right')
plt.show()
| gpl-2.0 |
dingocuster/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
keras-team/keras-io | examples/vision/learnable_resizer.py | 1 | 10839 | """
Title: Learning to Resize in Computer Vision
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/04/30
Last modified: 2021/05/13
Description: How to optimally learn representations of images for a given resolution.
"""
"""
It is a common belief that if we constrain vision models to perceive things as humans do,
their performance can be improved. For example, in [this work](https://arxiv.org/abs/1811.12231),
Geirhos et al. showed that the vision models pre-trained on the ImageNet-1k dataset are
biased toward texture whereas human beings mostly use the shape descriptor to develop a
common perception. But does this belief always apply especially when it comes to improving
the performance of vision models?
It turns out it may not always be the case. When training vision models, it is common to
resize images to a lower dimension ((224 x 224), (299 x 299), etc.) to allow mini-batch
learning and also to keep up the compute limitations. We generally make use of image
resizing methods like **bilinear interpolation** for this step and the resized images do
not lose much of their perceptual character to the human eyes. In
[Learning to Resize Images for Computer Vision Tasks](https://arxiv.org/abs/2103.09950v1), Talebi et al. show
that if we try to optimize the perceptual quality of the images for the vision models
rather than the human eyes, their performance can further be improved. They investigate
the following question:
**For a given image resolution and a model, how to best resize the given images?**
As shown in the paper, this idea helps to consistently improve the performance of the
common vision models (pre-trained on ImageNet-1k) like DenseNet-121, ResNet-50,
MobileNetV2, and EfficientNets. In this example, we will implement the learnable image
resizing module as proposed in the paper and demonstrate that on the
[Cats and Dogs dataset](https://www.microsoft.com/en-us/download/details.aspx?id=54765)
using the [DenseNet-121](https://arxiv.org/abs/1608.06993) architecture.
This example requires TensorFlow 2.4 or higher.
"""
"""
## Setup
"""
from tensorflow.keras import layers
from tensorflow import keras
import tensorflow as tf
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
import matplotlib.pyplot as plt
import numpy as np
"""
## Define hyperparameters
"""
"""
In order to facilitate mini-batch learning, we need to have a fixed shape for the images
inside a given batch. This is why an initial resizing is required. We first resize all
the images to (300 x 300) shape and then learn their optimal representation for the
(150 x 150) resolution.
"""
INP_SIZE = (300, 300)
TARGET_SIZE = (150, 150)
INTERPOLATION = "bilinear"
AUTO = tf.data.AUTOTUNE
BATCH_SIZE = 64
EPOCHS = 5
"""
In this example, we will use the bilinear interpolation but the learnable image resizer
module is not dependent on any specific interpolation method. We can also use others,
such as bicubic.
"""
"""
## Load and prepare the dataset
For this example, we will only use 40% of the total training dataset.
"""
train_ds, validation_ds = tfds.load(
"cats_vs_dogs",
# Reserve 10% for validation
split=["train[:40%]", "train[40%:50%]"],
as_supervised=True,
)
def preprocess_dataset(image, label):
image = tf.image.resize(image, (INP_SIZE[0], INP_SIZE[1]))
label = tf.one_hot(label, depth=2)
return (image, label)
train_ds = (
train_ds.shuffle(BATCH_SIZE * 100)
.map(preprocess_dataset, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
validation_ds = (
validation_ds.map(preprocess_dataset, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
"""
## Define the learnable resizer utilities
The figure below (courtesy: [Learning to Resize Images for Computer Vision Tasks](https://arxiv.org/abs/2103.09950v1))
presents the structure of the learnable resizing module:

"""
def conv_block(x, filters, kernel_size, strides, activation=layers.LeakyReLU(0.2)):
x = layers.Conv2D(filters, kernel_size, strides, padding="same", use_bias=False)(x)
x = layers.BatchNormalization()(x)
if activation:
x = activation(x)
return x
def res_block(x):
inputs = x
x = conv_block(x, 16, 3, 1)
x = conv_block(x, 16, 3, 1, activation=None)
return layers.Add()([inputs, x])
def get_learnable_resizer(filters=16, num_res_blocks=1, interpolation=INTERPOLATION):
inputs = layers.Input(shape=[None, None, 3])
# First, perform naive resizing.
naive_resize = layers.experimental.preprocessing.Resizing(
*TARGET_SIZE, interpolation=interpolation
)(inputs)
# First convolution block without batch normalization.
x = layers.Conv2D(filters=filters, kernel_size=7, strides=1, padding="same")(inputs)
x = layers.LeakyReLU(0.2)(x)
# Second convolution block with batch normalization.
x = layers.Conv2D(filters=filters, kernel_size=1, strides=1, padding="same")(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.BatchNormalization()(x)
# Intermediate resizing as a bottleneck.
bottleneck = layers.experimental.preprocessing.Resizing(
*TARGET_SIZE, interpolation=interpolation
)(x)
# Residual passes.
for _ in range(num_res_blocks):
x = res_block(bottleneck)
# Projection.
x = layers.Conv2D(
filters=filters, kernel_size=3, strides=1, padding="same", use_bias=False
)(x)
x = layers.BatchNormalization()(x)
# Skip connection.
x = layers.Add()([bottleneck, x])
# Final resized image.
x = layers.Conv2D(filters=3, kernel_size=7, strides=1, padding="same")(x)
final_resize = layers.Add()([naive_resize, x])
return tf.keras.Model(inputs, final_resize, name="learnable_resizer")
learnable_resizer = get_learnable_resizer()
"""
## Visualize the outputs of the learnable resizing module
Here, we visualize how the resized images would look like after being passed through the
random weights of the resizer.
"""
sample_images, _ = next(iter(train_ds))
plt.figure(figsize=(16, 10))
for i, image in enumerate(sample_images[:6]):
image = image / 255
ax = plt.subplot(3, 4, 2 * i + 1)
plt.title("Input Image")
plt.imshow(image.numpy().squeeze())
plt.axis("off")
ax = plt.subplot(3, 4, 2 * i + 2)
resized_image = learnable_resizer(image[None, ...])
plt.title("Resized Image")
plt.imshow(resized_image.numpy().squeeze())
plt.axis("off")
"""
## Model building utility
"""
def get_model():
backbone = tf.keras.applications.DenseNet121(
weights=None,
include_top=True,
classes=2,
input_shape=((TARGET_SIZE[0], TARGET_SIZE[1], 3)),
)
backbone.trainable = True
inputs = layers.Input((INP_SIZE[0], INP_SIZE[1], 3))
x = layers.experimental.preprocessing.Rescaling(scale=1.0 / 255)(inputs)
x = learnable_resizer(x)
outputs = backbone(x)
return tf.keras.Model(inputs, outputs)
"""
The structure of the learnable image resizer module allows for flexible integrations with
different vision models.
"""
"""
## Compile and train our model with learnable resizer
"""
model = get_model()
model.compile(
loss=keras.losses.CategoricalCrossentropy(label_smoothing=0.1),
optimizer="sgd",
metrics=["accuracy"],
)
model.fit(train_ds, validation_data=validation_ds, epochs=EPOCHS)
"""
## Visualize the outputs of the trained visualizer
"""
plt.figure(figsize=(16, 10))
for i, image in enumerate(sample_images[:6]):
image = image / 255
ax = plt.subplot(3, 4, 2 * i + 1)
plt.title("Input Image")
plt.imshow(image.numpy().squeeze())
plt.axis("off")
ax = plt.subplot(3, 4, 2 * i + 2)
resized_image = learnable_resizer(image[None, ...])
plt.title("Resized Image")
plt.imshow(resized_image.numpy().squeeze() / 10)
plt.axis("off")
"""
The plot shows that the visuals of the images have improved with training. The following
table shows the benefits of using the resizing module in comparison to using the bilinear
interpolation:
| Model | Number of parameters (Million) | Top-1 accuracy |
|:-------------------------: |:-------------------------------: |:--------------: |
| With the learnable resizer | 7.051717 | 67.67% |
| Without the learnable resizer | 7.039554 | 60.19% |
For more details, you can check out [this repository](https://github.com/sayakpaul/Learnable-Image-Resizing).
Note the above-reported models were trained for 10 epochs on 90% of the training set of
Cats and Dogs unlike this example. Also, note that the increase in the number of
parameters due to the resizing module is very negligible. To ensure that the improvement
in the performance is not due to stochasticity, the models were trained using the same
initial random weights.
Now, a question worth asking here is - _isn't the improved accuracy simply a consequence
of adding more layers (the resizer is a mini network after all) to the model, compared to
the baseline?_
To show that it is not the case, the authors conduct the following experiment:
* Take a pre-trained model trained some size, say (224 x 224).
* Now, first, use it to infer predictions on images resized to a lower resolution. Record
the performance.
* For the second experiment, plug in the resizer module at the top of the pre-trained
model and warm-start the training. Record the performance.
Now, the authors argue that using the second option is better because it helps the model
learn how to adjust the representations better with respect to the given resolution.
Since the results purely are empirical, a few more experiments such as analyzing the
cross-channel interaction would have been even better. It is worth noting that elements
like [Squeeze and Excitation (SE) blocks](https://arxiv.org/abs/1709.01507), [Global Context (GC) blocks](https://arxiv.org/pdf/1904.11492) also add a few
parameters to an existing network but they are known to help a network process
information in systematic ways to improve the overall performance.
"""
"""
## Notes
* To impose shape bias inside the vision models, Geirhos et al. trained them with a
combination of natural and stylized images. It might be interesting to investigate if
this learnable resizing module could achieve something similar as the outputs seem to
discard the texture information.
* The resizer module can handle arbitrary resolutions and aspect ratios which is very
important for tasks like object detection and segmentation.
* There is another closely related topic on ***adaptive image resizing*** that attempts
to resize images/feature maps adaptively during training. [EfficientV2](https://arxiv.org/pdf/2104.00298)
uses this idea.
"""
| apache-2.0 |
aetilley/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
davidh-ssec/pyresample | pyresample/plot.py | 1 | 11967 | #!/usr/bin/env python
# encoding: utf8
#
# Copyright (C) 2010-2018
#
# Authors:
# Esben S. Nielsen
# Thomas Lavergne
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import numpy as np
def ellps2axis(ellps_name):
"""Get semi-major and semi-minor axis from ellipsis definition
Parameters
---------
ellps_name : str
Standard name of ellipsis
Returns
-------
(a, b) : semi-major and semi-minor axis
"""
ellps = {'helmert': {'a': 6378200.0, 'b': 6356818.1696278909},
'intl': {'a': 6378388.0, 'b': 6356911.9461279465},
'merit': {'a': 6378137.0, 'b': 6356752.2982159676},
'wgs72': {'a': 6378135.0, 'b': 6356750.5200160937},
'sphere': {'a': 6370997.0, 'b': 6370997.0},
'clrk66': {'a': 6378206.4000000004, 'b': 6356583.7999999998},
'nwl9d': {'a': 6378145.0, 'b': 6356759.7694886839},
'lerch': {'a': 6378139.0, 'b': 6356754.2915103417},
'evrstss': {'a': 6377298.5559999999, 'b': 6356097.5503008962},
'evrst30': {'a': 6377276.3449999997, 'b': 6356075.4131402401},
'mprts': {'a': 6397300.0, 'b': 6363806.2827225132},
'krass': {'a': 6378245.0, 'b': 6356863.0187730473},
'walbeck': {'a': 6376896.0, 'b': 6355834.8466999996},
'kaula': {'a': 6378163.0, 'b': 6356776.9920869097},
'wgs66': {'a': 6378145.0, 'b': 6356759.7694886839},
'evrst56': {'a': 6377301.2429999998, 'b': 6356100.2283681016},
'new_intl': {'a': 6378157.5, 'b': 6356772.2000000002},
'airy': {'a': 6377563.3959999997, 'b': 6356256.9100000001},
'bessel': {'a': 6377397.1550000003, 'b': 6356078.9628181886},
'seasia': {'a': 6378155.0, 'b': 6356773.3205000004},
'aust_sa': {'a': 6378160.0, 'b': 6356774.7191953054},
'wgs84': {'a': 6378137.0, 'b': 6356752.3142451793},
'hough': {'a': 6378270.0, 'b': 6356794.3434343431},
'wgs60': {'a': 6378165.0, 'b': 6356783.2869594367},
'engelis': {'a': 6378136.0499999998, 'b': 6356751.3227215428},
'apl4.9': {'a': 6378137.0, 'b': 6356751.796311819},
'andrae': {'a': 6377104.4299999997, 'b': 6355847.4152333336},
'sgs85': {'a': 6378136.0, 'b': 6356751.301568781},
'delmbr': {'a': 6376428.0, 'b': 6355957.9261637237},
'fschr60m': {'a': 6378155.0, 'b': 6356773.3204827355},
'iau76': {'a': 6378140.0, 'b': 6356755.2881575283},
'plessis': {'a': 6376523.0, 'b': 6355863.0},
'cpm': {'a': 6375738.7000000002, 'b': 6356666.221912113},
'fschr68': {'a': 6378150.0, 'b': 6356768.3372443849},
'mod_airy': {'a': 6377340.1890000002, 'b': 6356034.4460000005},
'grs80': {'a': 6378137.0, 'b': 6356752.3141403561},
'bess_nam': {'a': 6377483.8650000002, 'b': 6356165.3829663256},
'fschr60': {'a': 6378166.0, 'b': 6356784.2836071067},
'clrk80': {'a': 6378249.1449999996, 'b': 6356514.9658284895},
'evrst69': {'a': 6377295.6639999999, 'b': 6356094.6679152036},
'grs67': {'a': 6378160.0, 'b': 6356774.5160907144},
'evrst48': {'a': 6377304.0630000001, 'b': 6356103.0389931547}}
try:
ellps_axis = ellps[ellps_name.lower()]
a = ellps_axis['a']
b = ellps_axis['b']
except KeyError as e:
raise ValueError(('Could not determine semi-major and semi-minor axis '
'of specified ellipsis %s') % ellps_name)
return a, b
def area_def2basemap(area_def, **kwargs):
"""Get Basemap object from AreaDefinition
Parameters
---------
area_def : object
geometry.AreaDefinition object
\*\*kwargs: Keyword arguments
Additional initialization arguments for Basemap
Returns
-------
bmap : Basemap object
"""
import warnings
warnings.warn("Basemap is no longer maintained. Please switch to cartopy "
"by using 'area_def.to_cartopy_crs()'. See the pyresample "
"documentation for more details.", DeprecationWarning)
from mpl_toolkits.basemap import Basemap
try:
a, b = ellps2axis(area_def.proj_dict['ellps'])
rsphere = (a, b)
except KeyError:
try:
a = float(area_def.proj_dict['a'])
try:
b = float(area_def.proj_dict['b'])
rsphere = (a, b)
except KeyError:
rsphere = a
except KeyError:
# Default to WGS84 ellipsoid
a, b = ellps2axis('wgs84')
rsphere = (a, b)
# Add projection specific basemap args to args passed to function
basemap_args = kwargs
basemap_args['rsphere'] = rsphere
if area_def.proj_dict['proj'] in ('ortho', 'geos', 'nsper'):
llcrnrx, llcrnry, urcrnrx, urcrnry = area_def.area_extent
basemap_args['llcrnrx'] = llcrnrx
basemap_args['llcrnry'] = llcrnry
basemap_args['urcrnrx'] = urcrnrx
basemap_args['urcrnry'] = urcrnry
else:
llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat = area_def.area_extent_ll
basemap_args['llcrnrlon'] = llcrnrlon
basemap_args['llcrnrlat'] = llcrnrlat
basemap_args['urcrnrlon'] = urcrnrlon
basemap_args['urcrnrlat'] = urcrnrlat
if area_def.proj_dict['proj'] == 'eqc':
basemap_args['projection'] = 'cyl'
else:
basemap_args['projection'] = area_def.proj_dict['proj']
# Try adding potentially remaining args
for key in ('lon_0', 'lat_0', 'lon_1', 'lat_1', 'lon_2', 'lat_2',
'lat_ts'):
try:
basemap_args[key] = float(area_def.proj_dict[key])
except KeyError:
pass
return Basemap(**basemap_args)
def _basemap_get_quicklook(area_def, data, vmin=None, vmax=None,
label='Variable (units)', num_meridians=45,
num_parallels=10, coast_res='110m', cmap='jet'):
if area_def.shape != data.shape:
raise ValueError('area_def shape %s does not match data shape %s' %
(list(area_def.shape), list(data.shape)))
import matplotlib.pyplot as plt
bmap = area_def2basemap(area_def, resolution=coast_res)
bmap.drawcoastlines()
if num_meridians > 0:
bmap.drawmeridians(np.arange(-180, 180, num_meridians))
if num_parallels > 0:
bmap.drawparallels(np.arange(-90, 90, num_parallels))
if not (np.ma.isMaskedArray(data) and data.mask.all()):
col = bmap.imshow(data, origin='upper', vmin=vmin, vmax=vmax, cmap=cmap)
plt.colorbar(col, shrink=0.5, pad=0.05).set_label(label)
return plt
def _get_quicklook(area_def, data, vmin=None, vmax=None,
label='Variable (units)', num_meridians=45,
num_parallels=10, coast_res='110m', cmap='jet'):
"""Get default cartopy matplotlib plot."""
bmap_to_cartopy_res = {
'c': '110m',
'l': '110m',
'i': '50m',
'h': '10m',
'f': '10m'
}
try:
from pyresample import _cartopy # noqa
except ImportError:
if coast_res.endswith('m'):
_rev_map = {v: k for k, v in bmap_to_cartopy_res.items()}
coast_res = _rev_map[coast_res]
return _basemap_get_quicklook(
area_def, data, vmin, vmax, label, num_meridians,
num_parallels, coast_res=coast_res, cmap=cmap)
if coast_res and coast_res not in ['110m', '50m', '10m']:
import warnings
warnings.warn("'coast_res' should be either '110m', '50m', '10m'.")
coast_res = {
'c': '110m',
'l': '110m',
'i': '50m',
'h': '10m',
'f': '10m'
}[coast_res]
if area_def.shape != data.shape:
raise ValueError('area_def shape %s does not match data shape %s' %
(list(area_def.shape), list(data.shape)))
import matplotlib.pyplot as plt
crs = area_def.to_cartopy_crs()
ax = plt.axes(projection=crs)
ax.coastlines(resolution=coast_res)
ax.set_global()
xlocs = None
ylocs = None
if num_meridians:
xlocs = np.arange(-180, 180, num_meridians)
if num_parallels:
ylocs = np.arange(-90, 90, num_parallels)
ax.gridlines(xlocs=xlocs, ylocs=ylocs)
if not (np.ma.isMaskedArray(data) and data.mask.all()):
col = ax.imshow(data, transform=crs, extent=crs.bounds,
origin='upper', vmin=vmin, vmax=vmax, cmap=cmap)
plt.colorbar(col, shrink=0.5, pad=0.05).set_label(label)
return plt
def show_quicklook(area_def, data, vmin=None, vmax=None,
label='Variable (units)', num_meridians=45,
num_parallels=10, coast_res='110m', cmap='jet'):
"""Display default quicklook plot
Parameters
---------
area_def : object
geometry.AreaDefinition object
data : numpy array | numpy masked array
2D array matching area_def. Use masked array for transparent values
vmin : float, optional
Min value for luminescence scaling
vmax : float, optional
Max value for luminescence scaling
label : str, optional
Label for data
num_meridians : int, optional
Number of meridians to plot on the globe
num_parallels : int, optional
Number of parallels to plot on the globe
coast_res : {'c', 'l', 'i', 'h', 'f'}, optional
Resolution of coastlines
Returns
-------
bmap : Basemap object
"""
plt = _get_quicklook(area_def, data, vmin=vmin, vmax=vmax,
label=label, num_meridians=num_meridians,
num_parallels=num_parallels, coast_res=coast_res,
cmap=cmap)
plt.show()
plt.close()
def save_quicklook(filename, area_def, data, vmin=None, vmax=None,
label='Variable (units)', num_meridians=45,
num_parallels=10, coast_res='110m', backend='AGG',
cmap='jet'):
"""Display default quicklook plot
Parameters
----------
filename : str
path to output file
area_def : object
geometry.AreaDefinition object
data : numpy array | numpy masked array
2D array matching area_def. Use masked array for transparent values
vmin : float, optional
Min value for luminescence scaling
vmax : float, optional
Max value for luminescence scaling
label : str, optional
Label for data
num_meridians : int, optional
Number of meridians to plot on the globe
num_parallels : int, optional
Number of parallels to plot on the globe
coast_res : {'c', 'l', 'i', 'h', 'f'}, optional
Resolution of coastlines
backend : str, optional
matplotlib backend to use'
"""
import matplotlib
matplotlib.use(backend, warn=False)
plt = _get_quicklook(area_def, data, vmin=vmin, vmax=vmax,
label=label, num_meridians=num_meridians,
num_parallels=num_parallels, coast_res=coast_res)
plt.savefig(filename, bbox_inches='tight')
plt.close()
| lgpl-3.0 |
ThomasMiconi/htmresearch | projects/capybara/sandbox/sklearn/run_baseline.py | 9 | 2773 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
from sklearn.neural_network import MLPClassifier
from htmresearch.frameworks.classification.utils.traces import loadTraces
from utils import get_file_name, convert_to_sdrs
def load_sdrs(start_idx, end_idx, exp_name):
# Params
input_width = 2048 * 32
active_cells_weight = 0
predicted_active_cells_weight = 1
network_config = 'sp=True_tm=True_tp=False_SDRClassifier'
# load traces
file_name = get_file_name(exp_name, network_config)
traces = loadTraces(file_name)
num_records = len(traces['sensorValue'])
# start and end
if start_idx < 0:
start = num_records + start_idx
else:
start = start_idx
if end_idx < 0:
end = num_records + end_idx
else:
end = end_idx
# input data
sensor_values = traces['sensorValue'][start:end]
categories = traces['actualCategory'][start:end]
active_cells = traces['tmActiveCells'][start:end]
predicted_active_cells = traces['tmPredictedActiveCells'][start:end]
# generate sdrs to cluster
active_cells_sdrs = convert_to_sdrs(active_cells, input_width)
predicted_active_cells_sdrs = np.array(
convert_to_sdrs(predicted_active_cells, input_width))
sdrs = (float(active_cells_weight) * np.array(active_cells_sdrs) +
float(predicted_active_cells_weight) * predicted_active_cells_sdrs)
return sdrs, categories
def train_model(X, y):
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X, y)
return clf
if __name__ == "__main__":
exp_name = '1x.40000.body_acc_x'
start_idx = 600
end_idx = 800
sdrs, categories = load_sdrs(start_idx, end_idx, exp_name)
clf = train_model(sdrs, categories)
predictions = clf.predict([sdrs[0], sdrs[1]])
print "Predictions: %s" % predictions
| agpl-3.0 |
hrichstein/Stellar_mass_env_Density | Pickle_output/actual_real_pickle_in.py | 1 | 17734 | from __future__ import division, absolute_import
from matplotlib import rc,rcParams
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('font', weight='bold')
# rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
import astropy.stats
import cPickle as pickle
import glob
import math
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import os
import pandas as pd
from scipy import integrate,optimize,spatial
# class Vars(object):
# size_xlabel = 48
# size_ylabel = 48
# size_text = 20
# size_tick = 24
# size_legend = 24
class Vars(object):
size_xlabel = 24
size_ylabel = 24
size_text = 18
size_tick = 18
size_legend = 18
va = Vars()
plt.rc('font', **{'family': 'serif', 'serif':['Computer Modern']})
###############################################################################
pickle_in = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_Density"
pickle_in+= r"\Pickle_output"
###############################################################################
pickle_in_rats = pickle_in
pickle_in_rats+=r"\ratio_bands.p"
rats_vals = pickle.load(open(pickle_in_rats,"rb"))
one_dex_ratios = rats_vals[0]
two_dex_ratios = rats_vals[1]
three_dex_ratios = rats_vals[2]
one_dex_rat_dict = {1:one_dex_ratios['1_4'],5:one_dex_ratios['5_4'],\
20:one_dex_ratios['20_4']}
two_dex_rat_dict = {1:two_dex_ratios['1_4'],5:two_dex_ratios['5_4'],\
20:two_dex_ratios['20_4']}
three_dex_rat_dict = {1:three_dex_ratios['1_4'],5:three_dex_ratios['5_4'],\
20:three_dex_ratios['20_4']}
all_rat_dict = {1:one_dex_rat_dict,2:two_dex_rat_dict,3:three_dex_rat_dict}
###############################################################################
pickle_in_meds = pickle_in
pickle_in_meds+=r"\med_bands.p"
meds_vals = pickle.load(open(pickle_in_meds,"rb"))
one_dex_meds = meds_vals[0]
two_dex_meds = meds_vals[1]
three_dex_meds = meds_vals[2]
# one_dm_slim = {1:one_dex_meds['1'],5:one_dex_meds['5'],20:one_dex_meds['20']}
# two_dm_slim = {1:two_dex_meds['1'],5:two_dex_meds['5'],20:two_dex_meds['20']}
# three_dm_slim = {1:three_dex_meds['1'],5:three_dex_meds['5'],\
# 20:three_dex_meds['20']}
one_dex_meds_dict = {1:one_dex_meds['1'],5:one_dex_meds['5'],\
20:one_dex_meds['20']}
two_dex_meds_dict = {1:two_dex_meds['1'],5:two_dex_meds['5'],\
20:two_dex_meds['20']}
three_dex_meds_dict = {1:three_dex_meds['1'],5:three_dex_meds['5'],\
20:three_dex_meds['20']}
all_meds_dict = {1:one_dex_meds_dict,2:two_dex_meds_dict,3:three_dex_meds_dict}
##dictionaries with [['10', '20', '1', '3', '2', '5']] keys
##yields a list with two arrays (upper and lower bounds)
###############################################################################
pickle_in_hists = pickle_in
pickle_in_hists+=r"\hist_bands.p"
hists_vals = pickle.load(open(pickle_in_hists,"rb"))
two_dex_hists_low = hists_vals[2]
hists_dict_low = {1:two_dex_hists_low['1_4'],5:two_dex_hists_low['5_4'],\
20:two_dex_hists_low['20_4']}
two_dex_hists_high = hists_vals[3]
hists_dict_high = {1:two_dex_hists_high['1_4'],5:two_dex_hists_high['5_4'],\
20:two_dex_hists_high['20_4']}
# for ii in neigh_vals:
# for tt in range (2):
# print len(hists_dict[ii][tt])
###############################################################################
##eco_low,eco_high,eco_ratio_info, eco_final_bins,eco_medians
pickle_in_eco = pickle_in
pickle_in_eco+=r"\eco_data.p"
eco_vals = pickle.load(open(pickle_in_eco,"rb"))
eco_low_hist = eco_vals[0]
eco_high_hist = eco_vals[1]
eco_ratio = {1:eco_vals[2][0][0][4],5:eco_vals[2][3][0][4],\
20:eco_vals[2][5][0][4]}
eco_rat_err = {1:eco_vals[2][0][1][1],5:eco_vals[2][3][1][1],\
20:eco_vals[2][5][1][1]}
eco_bins = {1:eco_vals[3][0][1],5:eco_vals[3][3][1],20:eco_vals[3][5][1]}
eco_meds = {1:eco_vals[4][0],5:eco_vals[4][3],20:eco_vals[4][5]}
bins = np.arange(9.1,11.9,0.2)
bin_centers= 0.5*(bins[:-1]+bins[1:])
##eco_meds... eventually 3 arrays. First is median line, second and third are \
##low and high bootstrap
###############################################################################
pickle_in_eco_hists = pickle_in
pickle_in_eco_hists+=r"\eco_hists.p"
eco_hists = pickle.load(open(pickle_in_eco_hists,"rb"))
eco_high_counts = {1:(eco_hists[1][1][4],eco_hists[1][1]['err_4']),\
5:(eco_hists[1][5][4],eco_hists[1][5]['err_4']),\
20:(eco_hists[1][20][4],eco_hists[1][20]['err_4'])}
eco_low_counts = {1:(eco_hists[0][1][4],eco_hists[0][1]['err_4']),\
5:(eco_hists[0][5][4],eco_hists[0][5]['err_4']),\
20:(eco_hists[0][20][4],eco_hists[0][20]['err_4'])}
eco_high_bins = {1:eco_hists[3][1][4],5:eco_hists[3][5][4],\
20:eco_hists[3][20][4]}
eco_low_bins = {1:eco_hists[2][1][4],5:eco_hists[2][5][4],\
20:eco_hists[2][20][4]}
###############################################################################
# def plot_bands(bin_centers,upper,lower,ax,plot_idx,color='silver',label=None):
# """
# """
# # ax.set_yscale('symlog')
# ax.set_ylim(0,4)
# ax.set_xlim(9.1,11.8)
# ax.set_xticks(np.arange(9.5, 12., 0.5))
# ax.set_yticks([0,1,2,3,4])
# ax.tick_params(axis='both', labelsize=12)
# ax.fill_between(bin_centers,upper,lower,color=color,alpha=0.1,label=label)
# if plot_idx == 0:
# ax.legend(loc='best')
# plot_neigh_dict = {0:1,1:5,2:20}
# title = 'n = {0}'.format(plot_neigh_dict[plot_idx])
# ax.text(0.05, 0.05, title,horizontalalignment='left',\
# verticalalignment='bottom',transform=ax.transAxes,fontsize=18)
# ###############################################################################
# def plot_eco_rats(bin_centers,y_vals,y_err,neigh_val,ax,frac_val,plot_idx):
# """
# """
# if plot_idx ==1:
# ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
# ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0)
# ax.errorbar(bin_centers,y_vals,yerr=y_err,\
# color='deeppink',linewidth=1,label='ECO')
###############################################################################
def plot_every_rat(bin_cens,upper,lower,ax,plot_idx,neigh_val,eco_bins,\
eco_vals,eco_err,color='silver',label=None,alpha=0.1):
label_eco = None
ax.set_ylim(0,4)
ax.set_xlim(9.1,11.8)
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([0,1,2,3,4])
ax.tick_params(axis='both', labelsize=va.size_tick)
ax.fill_between(bin_cens,upper,lower,color=color,alpha=alpha,label=label)
plot_neigh_dict = {0:1,1:5,2:20}
title = r"\boldmath$N=%d$"%(neigh_val)
if plot_idx == 2:
ax.text(0.05, 0.05, title,horizontalalignment='left',\
verticalalignment='bottom',transform=ax.transAxes,\
fontsize=va.size_text)
# if plot_idx ==2:
if neigh_val == 5:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=va.size_xlabel)
if neigh_val == 1:
label_eco = 'ECO'
# ax.set_ylabel(r'\begin{center}$\textnormal{Ratio of Quartiles} \\ \Large (N_{Q4}/N_{Q1})$\end{center}',
# fontsize=va.size_ylabel,multialignment='center')
ax.set_ylabel(r'$\textnormal{N}_{high}\ /\ \textnormal{N}_{low}$',
fontsize=va.size_ylabel)
else:
label_eco = None
ax.axhline(y=1,c="darkorchid",linewidth=2,zorder=0)
ax.errorbar(eco_bins,eco_vals,yerr=eco_err,\
color='deeppink',linewidth=1,label=label_eco)
if neigh_val == 1:
ax.legend(loc='best',numpoints=1,fontsize=va.size_legend)
###############################################################################
nrow_num = int(1)
ncol_num = int(3)
dict_to_neigh = {1:1,5:2,20:3}
dict_to_zz = {1:0,5:1,20:2}
neigh_vals = np.array([1,5,20])
fig, axes = plt.subplots(nrows=nrow_num,ncols=ncol_num,figsize=(14,4),\
sharey=True)
# figure_title = fig.suptitle\
# (r"Abundance Ratio of Galaxies in Top/Bottom 25\% Density Regions", \
# fontsize=20)
# figure_title.set_y(1.0)
# fig.subplots_adjust(bottom=0.17, right=0.99, left=0.03,top=0.94, hspace=0, wspace=0)
scatter_dict_params = {1:['darkorchid','0.1 dex', 0.5],
2:['royalblue','0.2 dex', 0.4],
3:['violet','0.3 dex', 0.4]}
axes_flat= axes.flatten()
zz = int(0)
for yy in range(2,3):
for xx in neigh_vals:
upper = all_rat_dict[yy][xx][0]
lower = all_rat_dict[yy][xx][1]
if xx == 1:
ax_as = axes_flat[0]
if xx == 5:
ax_as = axes_flat[1]
if xx == 20:
ax_as = axes_flat[2]
# Color parameters
color, label, alpha = scatter_dict_params[yy]
# if yy == 1:
# color = 'lightpink'
# label = '0.1 dex'
# alpha = 0.5
if yy ==2:
color = 'royalblue'
label = '0.2 dex'
# label=None
alpha = 0.4
# if yy ==3:
# color = 'violet'
# label = '0.3 dex'
# alpha = 0.4
plot_every_rat(bin_centers[:-1],upper,lower,ax_as,yy,xx,\
eco_bins[xx],eco_ratio[xx],eco_rat_err[xx],\
color=color,label=label,alpha=alpha)
# zz = int(0)
# while zz == 0:
# for yy in range(2,3):
# for xx in neigh_vals:
# upper = all_rat_dict[yy][xx][0]
# lower = all_rat_dict[yy][xx][1]
# if xx == 1:
# ax_as = axes_flat[0]
# if xx == 5:
# ax_as = axes_flat[1]
# if xx == 20:
# ax_as = axes_flat[2]
# if yy == 1:
# color = 'lightpink'
# label = '0.1 dex'
# alpha = 0.5
# if yy ==2:
# color = 'royalblue'
# label = '0.2 dex'
# # label=None
# alpha = 0.4
# if yy ==3:
# color = 'violet'
# label = '0.3 dex'
# alpha = 0.4
# plot_every_rat(bin_centers[:-1],upper,lower,ax_as,yy,xx,\
# eco_bins[xx],eco_ratio[xx],eco_rat_err[xx],\
# color=color,label=label,alpha=alpha)
# zz+=1
# plt.tight_layout()
# plt.subplots_adjust(top=0.93,bottom=0.21,left=0.11,right=0.99,hspace=0.20,wspace=0.)
plt.subplots_adjust(top=0.93,bottom=0.21,left=0.06,right=0.99,hspace=0.20,\
wspace=0)
plt.show()
###############################################################################
def plot_every_med(bin_cens,upper,lower,ax,plot_idx,\
eco_vals,neigh_val,color='silver',label=None,alpha=0.1):
label_eco = None
ax.set_yscale('symlog')
ax.set_xlim(9.1,11.8)
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.tick_params(axis='both', labelsize=va.size_tick)
ax.set_ylim(0,10**1)
ax.set_yticks(np.arange(0,12,1))
ax.set_yticklabels(np.arange(1,10,4))
# ax.fill_between(bin_cens,upper,lower,color=color,alpha=alpha,label=label)
title = r"\boldmath$N=%d$"%(neigh_val)
ybot = np.array(eco_vals[0] - eco_vals[1])
ytop = np.array(eco_vals[2] - eco_vals[0])
ax.errorbar(bin_cens,eco_vals[0],yerr=(ybot,ytop),\
color='deeppink',linewidth=1,label=label_eco)
if neigh_val == 1:
ax.set_ylabel(r'$D_{N}\ \textnormal{(Mpc)}$',fontsize = \
va.size_ylabel)
if plot_idx == 2:
ax.text(0.05, 0.05, title,horizontalalignment='left',\
verticalalignment='bottom',transform=ax.transAxes,\
fontsize=va.size_text)
if neigh_val == 1:
ax.legend(loc='upper left',numpoints=1,fontsize=va.size_legend)
label_eco = 'ECO'
else:
label_eco = None
if neigh_val == 5:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=va.size_xlabel)
###############################################################################
nrow_num = int(1)
ncol_num = int(3)
neigh_vals = np.array([1,5,20])
fig,axes = plt.subplots(nrows=nrow_num,ncols=ncol_num,figsize=(14,4),\
sharey=True,sharex=True)
# figure_title = fig.suptitle(r"Median Distance to Nth Nearest Neighbor", \
# fontsize=20)
# figure_title.set_y(1.0)
# fig.subplots_adjust(bottom=0.17, right=0.99, left=0.06,top=0.94, hspace=0, wspace=0)
axes_flat= axes.flatten()
# for yy in all_meds_dict:
for yy in range(2,3):
color, label, alpha = scatter_dict_params[yy]
for xx in neigh_vals:
upper = all_meds_dict[yy][xx][0]
lower = all_meds_dict[yy][xx][1]
if xx == 1:
ax_as = axes_flat[0]
if xx == 5:
ax_as = axes_flat[1]
if xx == 20:
ax_as = axes_flat[2]
plot_every_med(bin_centers[:-1],upper,lower,ax_as,yy,\
eco_meds[xx],xx,color=color,label=label,alpha=alpha)
# zz = int(0)
# while zz == 0:
# # for yy in all_meds_dict:
# for yy in ([1,3]):
# for xx in neigh_vals:
# upper = all_meds_dict[yy][xx][0]
# lower = all_meds_dict[yy][xx][1]
# if xx == 1:
# ax_as = axes_flat[0]
# if xx == 5:
# ax_as = axes_flat[1]
# if xx == 20:
# ax_as = axes_flat[2]
# if yy == 1:
# color = 'darkviolet'
# label = '0.1 dex'
# alpha = 0.5
# if yy == 2:
# color = 'royalblue'
# label = '0.2 dex'
# alpha = 0.3
# if yy == 3:
# color = 'springgreen'
# label = '0.3 dex'
# alpha = 0.5
# plot_every_med(bin_centers[:-1],upper,lower,ax_as,yy,\
# eco_meds[xx],xx,color=color,label=label,alpha=alpha)
# zz+=1
plt.subplots_adjust(top=0.93,bottom=0.21,left=0.06,right=0.99,hspace=0.20,\
wspace=0)
plt.show()
###############################################################################
def plot_eco_hists(bins_high,bins_low,high_counts,low_counts, \
high_counts_err,low_counts_err,ax,bin_centers,\
upper_h,lower_h,upper_l,lower_l,neigh_val):
ax.set_yscale('log')
ax.set_xticks(np.arange(9.5,12,0.5))
ax.set_xlim(9.1,11.7)
ax.tick_params(axis='both', labelsize=va.size_tick)
if neigh_val == 1:
label_high = 'High Density'
label_low = 'Low Density'
else:
label_high = None
label_low = None
# ax.fill_between(bin_centers,upper_h,lower_h,color='royalblue',\
# alpha=0.3)
# ax.fill_between(bin_centers,upper_l,lower_l,color='royalblue',\
# alpha=0.3)
ax.errorbar(bins_low,low_counts,\
yerr=low_counts_err,drawstyle='steps-mid',\
color='darkblue',label=label_low)
ax.errorbar(bins_high,high_counts,\
yerr=high_counts_err,drawstyle='steps-mid',\
color='deeppink',label=label_high)
title = r"\boldmath$N=%d$"%(neigh_val)
ax.text(0.05, 0.55, title,horizontalalignment='left',\
verticalalignment='bottom',transform=ax.transAxes,\
fontsize=va.size_text)
if neigh_val == 1:
ax.set_ylabel('Counts',fontsize=va.size_ylabel)
# (r'$\log\ \left(\frac{\textnormal{N}_{gal/bin}}{\textnormal{N}_{total}\ * \ dlogM}\right)$',\
# fontsize=24)
ax.legend(loc='best',fontsize=va.size_legend)
if neigh_val == 5:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=va.size_xlabel)
###############################################################################
eco_low_hist = eco_vals[0]
eco_high_hist = eco_vals[1]
fig,axes = plt.subplots(nrows=nrow_num,ncols=ncol_num,figsize=(14,4),\
sharey=True,sharex=True)
# figure_title = fig.suptitle(r"Abundance of Galaxies in Top/Bottom 25\% Density Regions", \
# fontsize=20)
# figure_title.set_y(1.0)
# fig.subplots_adjust(bottom=0.17, right=0.99, left=0.06,top=0.94, hspace=0, wspace=0)
axes_flat = axes.flatten()
for xx in neigh_vals:
if xx == 1:
ax_as = axes_flat[0]
if xx == 5:
ax_as = axes_flat[1]
if xx == 20:
ax_as = axes_flat[2]
plot_eco_hists(eco_high_bins[xx],eco_low_bins[xx],\
eco_high_counts[xx][0],eco_low_counts[xx][0],\
eco_high_counts[xx][1],eco_low_counts[xx][1],\
ax_as,bin_centers[:-1],hists_dict_high[xx][0],\
hists_dict_high[xx][1],hists_dict_low[xx][0],\
hists_dict_low[xx][1],xx)
# zz = int(0)
# while zz ==0:
# for xx in neigh_vals:
# if xx == 1:
# ax_as = axes_flat[0]
# if xx == 5:
# ax_as = axes_flat[1]
# if xx == 20:
# ax_as = axes_flat[2]
# plot_eco_hists(eco_high_bins[xx],eco_low_bins[xx],\
# eco_high_counts[xx][0],eco_low_counts[xx][0],\
# eco_high_counts[xx][1],eco_low_counts[xx][1],\
# ax_as,bin_centers[:-1],hists_dict_high[xx][0],\
# hists_dict_high[xx][1],hists_dict_low[xx][0],\
# hists_dict_low[xx][1],xx)
# zz+=1
plt.subplots_adjust(top=0.93,bottom=0.21,left=0.11,right=0.99,hspace=0.20,
wspace=0)
plt.show() | mit |
sniemi/SamPy | bolshoi/plotdarkmattermf.py | 1 | 29775 | '''
Plots a dark matter halo mass function at different
redshifts. Input data are from the Bolshoi simulation.
:author: Sami-Matias Niemi
'''
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 14
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.2)
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['legend.handlelength'] = 2
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
matplotlib.use('PDF')
import pylab as P
import numpy as N
import glob as g
import os
#From Sami's repo
import astronomy.differentialfunctions as df
import smnIO.read as io
import db.sqlite
import log.Logger as lg
def plot_mass_function(redshift, h, no_phantoms, *data):
#fudge factor
fudge = 2.0
#http://adsabs.harvard.edu/abs/2001MNRAS.321..372J
#Jenkins et al. paper has sqrt(2./pi) while
#Rachel's code has 1/(sqrt(2*pi))
#ratio of these two is the fudge factor
#read data to dictionary
dt = {}
for x in data[0]:
if 'Bolshoi' in x:
dt[x] = io.readBolshoiDMfile(data[0][x], 0, no_phantoms)
else:
dt[x] = N.loadtxt(data[0][x])
#calculate the mass functions from the Bolshoi data
mbin0, mf0 = df.diffFunctionLogBinning(dt['Bolshoi'] / h,
nbins=35,
h=0.7,
mmin=10 ** 9.0,
mmax=10 ** 15.0,
physical_units=True)
del dt['Bolshoi']
#use chain rule to get dN / dM
#dN/dM = dN/dlog10(M) * dlog10(M)/dM
#d/dM (log10(M)) = 1 / (M*ln(10))
mf0 *= 1. / (mbin0 * N.log(10))
#put mass back to power
mbin0 = 10 ** mbin0
#title
if no_phantoms:
ax1.set_title('Bolshoi Dark Matter Mass Functions (no phantoms)')
else:
ax1.set_title('Bolshoi Dark Matter Mass Functions')
#mark redshift
for a, b in zip(mbin0[::-1], mf0[::-1]):
if b > 10 ** -6:
break
ax1.annotate('$z \sim %.1f$' % redshift,
(0.98 * a, 3 * 10 ** -6), size='x-small')
#Analytical MFs
#0th column: log10 of mass (Msolar, NOT Msolar/h)
#1st column: mass (Msolar/h)
#2nd column: (dn/dM)*dM, per Mpc^3 (NOT h^3/Mpc^3)
xST = 10 ** dt['Sheth-Tormen'][:, 0]
yST = dt['Sheth-Tormen'][:, 2] * fudge
sh = ax1.plot(xST, yST, 'b-', lw=1.3)
#PS
xPS = 10 ** dt['Press-Schecter'][:, 0]
yPS = dt['Press-Schecter'][:, 2] * fudge
ps = ax1.plot(xPS, yPS, 'g--', lw=1.1)
#MF from Bolshoi
bolshoi = ax1.plot(mbin0, mf0, 'ro:', ms=5)
#delete data to save memory, dt is not needed any longer
del dt
#plot the residuals
if round(float(redshift), 1) < 1.5:
#interploate to right x scale
yST = N.interp(mbin0, xST, yST)
yPS = N.interp(mbin0, xPS, yPS)
#make the plot
ax2.annotate('$z \sim %.1f$' % redshift,
(1.5 * 10 ** 9, 1.05), xycoords='data',
size=10)
ax2.axhline(1.0, color='b')
ax2.plot(mbin0, mf0 / yST, 'b-')
ax2.plot(mbin0, mf0 / yPS, 'g-')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylim(10 ** -7, 10 ** -1)
ax2.set_ylim(0.45, 1.55)
ax1.set_xlim(10 ** 9, 10 ** 15)
ax2.set_xlim(10 ** 9, 10 ** 15)
ax1.set_xticklabels([])
ax2.set_xlabel(r'$M_{\mathrm{vir}} \quad [M_{\odot}]$')
ax1.set_ylabel(r'$\mathrm{d}N / \mathrm{d}M_{\mathrm{vir}} \quad [\mathrm{Mpc}^{-3} \mathrm{dex}^{-1}]$')
ax2.set_ylabel(r'$\frac{\mathrm{Bolshoi}}{\mathrm{Model}}$')
ax1.legend((bolshoi, sh, ps),
('Bolshoi', 'Sheth-Tormen', 'Press-Schecter'),
shadow=True, fancybox=True,
numpoints=1)
def plot_mass_functionAnalytical2(redshift, h, no_phantoms, *data):
#fudge factor
fudge = 1.
#http://adsabs.harvard.edu/abs/2001MNRAS.321..372J
#Jenkins et al. paper has sqrt(2./pi) while
#Rachel's code has 1/(sqrt(2*pi))
#ratio of these two is the fudge factor
#read data
dt = {}
for x in data[0]:
if 'Bolshoi' in x:
dt[x] = io.readBolshoiDMfile(data[0][x], 0, no_phantoms)
else:
#M dN/dM dNcorr/dM dN/dlog10(M) dN/dlog10(Mcorr)
d = N.loadtxt(data[0][x])
dt['Press-Schecter'] = N.array([d[:, 0], d[:, 3]])
dt['Sheth-Tormen'] = N.array([d[:, 0], d[:, 4]])
#calculate the mass functions from the Bolshoi data
mbin0, mf0 = df.diffFunctionLogBinning(dt['Bolshoi'] / h,
nbins=35,
h=0.7,
mmin=10 ** 9.0,
mmax=10 ** 15.0,
physical_units=True)
del dt['Bolshoi']
mbin0 = 10 ** mbin0
#title
if no_phantoms:
ax1.set_title('Bolshoi Dark Matter Mass Functions (no phantoms)')
else:
ax1.set_title('Bolshoi Dark Matter Mass Functions')
#mark redshift
for a, b in zip(mbin0[::-1], mf0[::-1]):
if b > 10 ** -6:
break
ax1.annotate('$z \sim %.1f$' % redshift,
(0.98 * a, 3 * 10 ** -6), size='x-small')
#Analytical MFs
xST = dt['Sheth-Tormen'][0]
yST = dt['Sheth-Tormen'][1] * fudge
print xST[1000], yST[1000]
sh = ax1.plot(xST, yST, 'b-', lw=1.3)
#PS
xPS = dt['Press-Schecter'][0]
yPS = dt['Press-Schecter'][1] * fudge
ps = ax1.plot(xPS, yPS, 'g--', lw=1.1)
#MF from Bolshoi
bolshoi = ax1.plot(mbin0, mf0, 'ro:', ms=5)
#delete data to save memory, dt is not needed any longer
del dt
#plot the residuals
if round(float(redshift), 1) < 1.5:
#interploate to right x scale
mfintST = N.interp(xST, mbin0, mf0)
mfintPS = N.interp(xPS, mbin0, mf0)
#yST = N.interp(mbin0, xST, yST)
#yPS = N.interp(mbin0, xPS, yPS)
#make the plot
ax2.annotate('$z \sim %.0f$' % redshift,
(1.5 * 10 ** 9, 1.05), xycoords='data',
size=10)
ax2.axhline(1.0, color='b')
ax2.plot(xST, mfintST / yST, 'b-')
ax2.plot(xPS, mfintPS / yPS, 'g-')
#ax2.plot(mbin0, mf0 / yST, 'b-')
#ax2.plot(mbin0, mf0 / yPS, 'g-')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylim(3 * 10 ** -7, 10 ** 0)
ax2.set_ylim(0.45, 1.55)
ax1.set_xlim(10 ** 9, 10 ** 15)
ax2.set_xlim(10 ** 9, 10 ** 15)
ax1.set_xticklabels([])
ax2.set_xlabel(r'$M_{\mathrm{vir}} \quad [M_{\odot}]$')
ax1.set_ylabel(r'$\mathrm{d}N / \mathrm{d}\log_{10}(M_{\mathrm{vir}}) \quad [\mathrm{Mpc}^{-3} \mathrm{dex}^{-1}]$')
ax2.set_ylabel(r'$\frac{\mathrm{Bolshoi}}{\mathrm{Model}}$')
ax1.legend((bolshoi, sh, ps),
('Bolshoi', 'Sheth-Tormen', 'Press-Schecter'),
shadow=True, fancybox=True,
numpoints=1)
def plotDMMFfromGalpropz(redshift, h, *data):
#fudge factor
fudge = 2.0
#http://adsabs.harvard.edu/abs/2001MNRAS.321..372J
#Jenkins et al. paper has sqrt(2./pi) while
#Rachel's code has 1/(sqrt(2*pi))
#ratio of these two is the fudge factor
#find the home directory, because the output is to dropbox
#and my user name is not always the same, this hack is required.
hm = os.getenv('HOME')
path = hm + '/Dropbox/Research/Bolshoi/run/trial2/'
database = 'sams.db'
rlow = redshift - 0.1
rhigh = redshift + 0.1
query = '''select mhalo from galpropz where
galpropz.zgal > %f and galpropz.zgal <= %f and
galpropz.gal_id = 1
''' % (rlow, rhigh)
print query
#read data
dt = {}
for x in data[0]:
dt[x] = N.loadtxt(data[0][x])
dt['Bolshoi'] = db.sqlite.get_data_sqlite(path, database, query) * 1e9
#calculate the mass functions from the Bolshoi data
mbin0, mf0 = df.diffFunctionLogBinning(dt['Bolshoi'] / h,
nbins=35,
h=0.7,
mmin=10 ** 9.0,
mmax=10 ** 15.0,
volume=50,
nvols=26,
physical_units=True)
del dt['Bolshoi']
#use chain rule to get dN / dM
#dN/dM = dN/dlog10(M) * dlog10(M)/dM
#d/dM (log10(M)) = 1 / (M*ln(10))
mf0 *= 1. / (mbin0 * N.log(10))
mbin0 = 10 ** mbin0
#title
ax1.set_title('Dark Matter Halo Mass Functions (galpropz.dat)')
#mark redshift
for a, b in zip(mbin0[::-1], mf0[::-1]):
if b > 10 ** -6:
break
ax1.annotate('$z \sim %.0f$' % redshift,
(0.98 * a, 3 * 10 ** -7), size='x-small')
#Analytical MFs
#0th column: log10 of mass (Msolar, NOT Msolar/h)
#1st column: mass (Msolar/h)
#2nd column: (dn/dM)*dM, per Mpc^3 (NOT h^3/Mpc^3)
xST = 10 ** dt['Sheth-Tormen'][:, 0]
yST = dt['Sheth-Tormen'][:, 2] * fudge
sh = ax1.plot(xST, yST, 'b-', lw=1.3)
#PS
xPS = 10 ** dt['Press-Schecter'][:, 0]
yPS = dt['Press-Schecter'][:, 2] * fudge
ps = ax1.plot(xPS, yPS, 'g--', lw=1.1)
#MF from Bolshoi
bolshoi = ax1.plot(mbin0, mf0, 'ro:', ms=5)
#delete data to save memory, dt is not needed any longer
del dt
#plot the residuals
if round(float(redshift), 1) < 1.5:
#interploate to right x scale
ySTint = N.interp(mbin0, xST, yST)
yPSint = N.interp(mbin0, xPS, yPS)
#make the plot
ax2.annotate('$z \sim %.0f$' % redshift,
(1.5 * 10 ** 9, 1.05), xycoords='data',
size=10)
ax2.axhline(1.0, color='b')
ax2.plot(mbin0, mf0 / ySTint, 'b-')
ax2.plot(mbin0, mf0 / yPSint, 'g-')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylim(5 * 10 ** -8, 10 ** -1)
ax2.set_ylim(0.45, 1.55)
ax1.set_xlim(10 ** 9, 10 ** 15)
ax2.set_xlim(10 ** 9, 10 ** 15)
ax1.set_xticklabels([])
ax2.set_xlabel(r'$M_{\mathrm{vir}} \quad [M_{\odot}]$')
ax1.set_ylabel(r'$\mathrm{d}N / \mathrm{d}M_{\mathrm{vir}} \quad [\mathrm{Mpc}^{-3} \mathrm{dex}^{-1}]$')
ax2.set_ylabel(r'$\frac{\mathrm{galpropz.dat}}{\mathrm{Model}}$')
ax1.legend((bolshoi, sh, ps),
('Bolshoi', 'Sheth-Tormen', 'Press-Schecter'),
shadow=True, fancybox=True,
numpoints=1)
def plotDMMFfromGalpropzAnalytical2(redshift, h, *data):
#fudge factor
fudge = 1.
#http://adsabs.harvard.edu/abs/2001MNRAS.321..372J
#Jenkins et al. paper has sqrt(2./pi) while
#Rachel's code has 1/(sqrt(2*pi))
#ratio of these two is the fudge factor
#find the home directory, because the output is to dropbox
#and my user name is not always the same, this hack is required.
hm = os.getenv('HOME')
path = hm + '/Dropbox/Research/Bolshoi/run/trial2/'
database = 'sams.db'
rlow = redshift - 0.1
rhigh = redshift + 0.1
query = '''select mhalo from galpropz where
galpropz.zgal > %f and galpropz.zgal <= %f and
galpropz.gal_id = 1
''' % (rlow, rhigh)
#Hubble constants
h3 = h ** 3
#read data
dt = {}
for x in data[0]:
#M dN/dM dNcorr/dM dN/dlog10(M) dN/dlog10(Mcorr)
d = N.loadtxt(data[0][x])
dt['Press-Schecter'] = N.array([d[:, 0], d[:, 3]])
dt['Sheth-Tormen'] = N.array([d[:, 0], d[:, 4]])
dt['Bolshoi'] = db.sqlite.get_data_sqlite(path, database, query) * 1e9
print len(dt['Bolshoi'])
#calculate the mass functions from the Bolshoi data
mbin0, mf0 = df.diffFunctionLogBinning(dt['Bolshoi'] / h,
nbins=35,
h=0.7,
mmin=10 ** 9.0,
mmax=10 ** 15.0,
volume=50,
nvols=26,
physical_units=True)
del dt['Bolshoi']
mbin0 = 10 ** mbin0
#title
ax1.set_title('Dark Matter Halo Mass Functions (galpropz.dat)')
#mark redshift
for a, b in zip(mbin0[::-1], mf0[::-1]):
if b > 10 ** -5:
break
ax1.annotate('$z \sim %.0f$' % redshift,
(0.98 * a, 3 * 10 ** -6), size='x-small')
#Analytical MFs
xST = dt['Sheth-Tormen'][0]
yST = dt['Sheth-Tormen'][1] * fudge
sh = ax1.plot(xST, yST, 'b-', lw=1.3)
#PS
xPS = dt['Press-Schecter'][0]
yPS = dt['Press-Schecter'][1] * fudge
ps = ax1.plot(xPS, yPS, 'g--', lw=1.1)
#MF from Bolshoi
bolshoi = ax1.plot(mbin0, mf0, 'ro:', ms=5)
#delete data to save memory, dt is not needed any longer
del dt
#plot the residuals
if redshift < 1.5:
#interploate to right x scale
mfintST = N.interp(xST, mbin0, mf0)
mfintPS = N.interp(xPS, mbin0, mf0)
#yST = N.interp(mbin0, xST, yST)
#yPS = N.interp(mbin0, xPS, yPS)
#make the plot
ax2.annotate('$z \sim %.0f$' % redshift,
(1.5 * 10 ** 9, 1.05), xycoords='data',
size=10)
ax2.axhline(1.0, color='b')
ax2.plot(xST, mfintST / yST, 'b-')
ax2.plot(xPS, mfintPS / yPS, 'g-')
#ax2.plot(mbin0, mf0 / yST, 'b-')
#ax2.plot(mbin0, mf0 / yPS, 'g-')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylim(10 ** -6, 10 ** -0)
ax2.set_ylim(0.45, 1.55)
ax1.set_xlim(10 ** 9, 10 ** 15)
ax2.set_xlim(10 ** 9, 10 ** 15)
ax1.set_xticklabels([])
ax2.set_xlabel(r'$M_{\mathrm{vir}} \quad [M_{\odot}]$')
ax1.set_ylabel(r'$\mathrm{d}N / \mathrm{d}\log_{10}(M_{\mathrm{vir}}) \quad [\mathrm{Mpc}^{-3} \mathrm{dex}^{-1}]$')
ax2.set_ylabel(r'$\frac{\mathrm{galpropz.dat}}{\mathrm{Model}}$')
ax1.legend((bolshoi, sh, ps),
('Bolshoi', 'Sheth-Tormen', 'Press-Schecter'),
shadow=True, fancybox=True,
numpoints=1)
def compareGalpropzToBolshoiTrees(analyticalData,
BolshoiTrees,
redshifts,
h,
outputdir,
nvols=18,
no_phantoms=True,
galid=True):
#data storage
data = {}
#figure definitions
left, width = 0.1, 0.8
rect1 = [left, 0.1, width, 0.2]
rect2 = [left, 0.3, width, 0.65]
#note that this is only available on tuonela.stsci.edu
simuPath = '/Users/niemi/Desktop/Research/run/newtree1/'
simuDB = 'sams.db'
#star the figure
fig = P.figure()
ax1 = fig.add_axes(rect2) #left, bottom, width, height
ax2 = fig.add_axes(rect1)
#set title
if galid:
ax1.set_title('Dark Matter Halo Mass Functions (gal\_id = 1; 26 volumes)')
else:
ax1.set_title('Dark Matter Halo Mass Functions (galpropz: 26 volumes)')
#loop over the data and redshift range
for redsh, BolshoiTree, anaData in zip(sorted(redshifts.itervalues()),
BolshoiTrees,
analyticalData):
#skip some redshifts
if redsh < 1.0 or redsh == 2.0276 or redsh == 5.1614\
or redsh == 6.5586 or redsh == 3.0584:
continue
#if redsh == 1.0064 or redsh == 3.0584 or redsh == 4.0429 or \
# redsh == 8.2251:
# continue
#change this to logging afterwords
logging.debug(redsh)
logging.debug(BolshoiTree)
logging.debug(anaData)
rlow = redsh - 0.02
rhigh = redsh + 0.02
if galid:
query = '''select mhalo from galpropz where
galpropz.zgal > {0:f} and galpropz.zgal < {1:f} and
galpropz.gal_id = 1'''.format(rlow, rhigh)
else:
query = '''select mhalo from galpropz where
galpropz.zgal > {0:f} and
galpropz.zgal < {1:f}'''.format(rlow, rhigh)
logging.debug(query)
data['SAM'] = db.sqlite.get_data_sqlite(simuPath, simuDB, query) * 1e9
#calculate the mass functions from the SAM data, only x volumes
mbin0SAM, mf0SAM = df.diffFunctionLogBinning(data['SAM'],
nbins=30,
h=h,
mmin=1e9,
mmax=1e15,
volume=50.0,
nvols=nvols,
physical_units=True)
mbin0SAM = 10 ** mbin0SAM
#mf0SAM = mf0SAM * 1.7
#read the Bolshoi merger trees
data['Bolshoi'] = io.readBolshoiDMfile(BolshoiTree, 0, no_phantoms)
#calculate the mass functions from the full Bolshoi data
mbin0Bolshoi, mf0Bolshoi = df.diffFunctionLogBinning(data['Bolshoi'] / h,
nbins=30,
h=h,
mmin=1e9,
mmax=1e15,
volume=50.0,
nvols=125,
physical_units=True)
mbin0Bolshoi = 10 ** mbin0Bolshoi
#Analytical MFs
#get Rachel's analytical curves
#M dN/dM dNcorr/dM dN/dlog10(M) dN/dlog10(Mcorr)
d = N.loadtxt(anaData)
data['Press-Schecter'] = N.array([d[:, 0], d[:, 3]])
data['Sheth-Tormen'] = N.array([d[:, 0], d[:, 4]])
#ST
sh = ax1.plot(data['Sheth-Tormen'][0],
data['Sheth-Tormen'][1],
'k-', lw=0.9)
#PS
#ps = ax1.plot(data['Press-Schecter'][0],
# data['Press-Schecter'][1],
# 'g--', lw = 1.1)
#MF from Bolshoi
bolshoiax = ax1.plot(mbin0Bolshoi,
mf0Bolshoi,
'ro--', ms=4)
#MF from the SAM run
samax = ax1.plot(mbin0SAM,
mf0SAM,
'gs--', ms=4)
#mark redshift
for a, b in zip(mbin0Bolshoi[::-1], mf0Bolshoi[::-1]):
if b > 10 ** -5:
break
ax1.annotate('$z \sim {0:.2f}$'.format(redsh),
(0.6 * a, 3 * 10 ** -6), size='x-small')
#plot the residuals
if redsh < 1.5:
#make the plot
ax2.annotate('$z \sim {0:.2f}$'.format(redsh),
(1.5 * 10 ** 9, 1.05), xycoords='data',
size=10)
ax2.axhline(1.0, color='k')
msk = mf0SAM / mf0Bolshoi > 0
ax2.plot(mbin0SAM[msk],
mf0SAM[msk] / mf0Bolshoi[msk],
'r-')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylim(1e-6, 10 ** -0)
ax2.set_ylim(0.45, 1.55)
ax1.set_xlim(2e9, 4e14)
ax2.set_xlim(2e9, 4e14)
ax1.set_xticklabels([])
ax2.set_xlabel(r'$M_{\mathrm{vir}} \quad [M_{\odot}]$')
ax1.set_ylabel(r'$\mathrm{d}N / \mathrm{d}\log_{10}(M_{\mathrm{vir}}) \quad [\mathrm{Mpc}^{-3} \mathrm{dex}^{-1}]$')
ax2.set_ylabel(r'$\frac{\mathrm{galpropz.dat}}{\mathrm{IsoTree}}$')
ax1.legend((sh, bolshoiax, samax),
('Sheth-Tormen', 'Bolshoi', 'galpropz'),
shadow=True, fancybox=True,
numpoints=1)
if galid:
P.savefig(outputdir + 'IsotreesVSgalpropzGalID.pdf')
else:
P.savefig(outputdir + 'IsotreesVSgalpropz.pdf')
if __name__ == '__main__':
#Hubble constant
h = 0.7
#output directory
wrkdir = os.getenv('HOME') + '/Dropbox/Research/Bolshoi/dm_halo_mf/'
outdir = wrkdir + 'plots/'
#logging
log_filename = 'plotDarkMatterMassFunction.log'
logging = lg.setUpLogger(outdir + log_filename)
#find files
simus = g.glob(wrkdir + 'simu/*.txt')
sheth = g.glob(wrkdir + 'analytical/*sheth*_?_??-fit.dat')
press = g.glob(wrkdir + 'analytical/*press*_?_??-fit.dat')
warren = g.glob(wrkdir + 'analytical/*warren*_?_??-fit.dat')
#analytical 2, Rachel's code
analytical = g.glob(os.getenv('HOME') + '/Dropbox/Research/Bolshoi/var/z*')
#figure definitions
left, width = 0.1, 0.8
rect1 = [left, 0.1, width, 0.2]
rect2 = [left, 0.3, width, 0.65]
# #make the individual plots
# fig = P.figure()
# ax1 = fig.add_axes(rect2) #left, bottom, width, height
# ax2 = fig.add_axes(rect1)
# for a, b, c, d in zip(simus, sheth, press, warren):
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Bolshoi' : a,
# 'Sheth-Tormen': b,
# 'Press-Schecter': c,
# 'Warren' : d}
#
# if b.find('_1_01') > -1 or b.find('_6_56') > -1 or b.find('_3_06') > -1 or b.find('_5_16') > -1:
# continue
# else:
# logging.debug('Plotting redshift %.2f dark matter mass functions', redshift)
# print a, b, c, d
# plot_mass_function(redshift, h, True, data)
# P.savefig(outdir + 'DMmfzNoPhantoms1.pdf')
# P.close()
#
# #make the individual plots 2
# fig = P.figure()
# ax1 = fig.add_axes(rect2)
# ax2 = fig.add_axes(rect1)
# for a, b, c, d in zip(simus, sheth, press, warren):
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Bolshoi' : a,
# 'Sheth-Tormen': b,
# 'Press-Schecter': c,
# 'Warren' : d}
#
# if b.find('_1_01') > -1 or b.find('_6_56') > -1 or b.find('_3_06') > -1 or b.find('_5_16') > -1:
# continue
# else:
# logging.debug('Plotting redshift %.2f dark matter mass functions', redshift)
# print a, b, c, d
# plot_mass_function(redshift, h, False, data)
# P.savefig(outdir + 'DMmfz1.pdf')
# P.close()
#
# #make the individual plots 3
# fig = P.figure()
# ax1 = fig.add_axes(rect2)
# ax2 = fig.add_axes(rect1)
# for a, b, c, d in zip(simus, sheth, press, warren):
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Bolshoi' : a,
# 'Sheth-Tormen': b,
# 'Press-Schecter': c,
# 'Warren' : d}
#
# if b.find('_1_01') > -1 or b.find('_6_56') > -1 or b.find('_3_06') > -1 or b.find('_5_16') > -1:
# logging.debug('Plotting redshift %.2f dark matter mass functions', redshift)
# print a, b, c, d
# plot_mass_function(redshift, h, True, data)
# P.savefig(outdir + 'DMmfzNoPhantoms2.pdf')
# P.close()
#
# #make the individual plots 4
# fig = P.figure()
# ax1 = fig.add_axes(rect2)
# ax2 = fig.add_axes(rect1)
# for a, b, c, d in zip(simus, sheth, press, warren):
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Bolshoi' : a,
# 'Sheth-Tormen': b,
# 'Press-Schecter': c,
# 'Warren' : d}
#
# if b.find('_1_01') > -1 or b.find('_6_56') > -1 or b.find('_3_06') > -1 or b.find('_5_16') > -1:
# logging.debug('Plotting redshift %.2f dark matter mass functions', redshift)
# print a, b, c, d
# plot_mass_function(redshift, h, False, data)
# P.savefig(outdir + 'DMmfz2.pdf')
# P.close()
#
##############################
# #With Rachel's analytical
# #make the individual plots
# fig = P.figure()
# ax1 = fig.add_axes(rect2) #left, bottom, width, height
# ax2 = fig.add_axes(rect1)
# for a, b in zip(simus, analytical):
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Bolshoi' : a,
# 'Analytical': b}
#
# if round(redshift,1) not in [1.0, 2.0, 4.0, 8.2]:
# logging.debug('Plotting redshift %.2f dark matter mass functions' % redshift)
# print a, b
# plot_mass_functionAnalytical2(redshift, h, True, data)
# P.savefig(outdir + 'DMmfzNoPhantoms1RAnalytical.pdf')
# P.close()
#
# #make the individual plots 2
# fig = P.figure()
# ax1 = fig.add_axes(rect2)
# ax2 = fig.add_axes(rect1)
# for a, b in zip(simus, analytical):
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Bolshoi' : a,
# 'Analytical': b}
#
# if round(redshift,1) not in [1.0, 2.0, 4.0, 5.2]:
# logging.debug('Plotting redshift %.2f dark matter mass functions' % redshift)
# print a, b
# plot_mass_functionAnalytical2(redshift, h, False, data)
# P.savefig(outdir + 'DMmfz1RAnalytical.pdf')
# P.close()
#
# #make the individual plots 3
# fig = P.figure()
# ax1 = fig.add_axes(rect2)
# ax2 = fig.add_axes(rect1)
# for a, b in zip(simus, analytical):
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Bolshoi' : a,
# 'Analytical': b}
#
# if round(redshift,1) in [1.0, 4.0, 6.6]:
# logging.debug('Plotting redshift %.2f dark matter mass functions', redshift)
# print a, b
# plot_mass_functionAnalytical2(redshift, h, True, data)
# P.savefig(outdir + 'DMmfzNoPhantoms2RAnalytical.pdf')
# P.close()
#
# #make the individual plots 4
# fig = P.figure()
# ax1 = fig.add_axes(rect2)
# ax2 = fig.add_axes(rect1)
# for a, b in zip(simus, analytical):
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Bolshoi' : a,
# 'Analytical': b}
#
# if round(redshift,1) in [1.0, 4.0, 6.6]:
# logging.debug('Plotting redshift %.2f dark matter mass functions' % redshift)
# print a, b
# plot_mass_functionAnalytical2(redshift, h, False, data)
# P.savefig(outdir + 'DMmfz2RAnalytical.pdf')
# P.close()
#
#################################
# #Haloes from galpropz.dat
#
# sheth = g.glob(wrkdir + 'analytical/*sheth*_?_?-fit.dat')
# press = g.glob(wrkdir + 'analytical/*press*_?_?-fit.dat')
# warren = g.glob(wrkdir + 'analytical/*warren*_?_?-fit.dat')
#
# #make the individual plots 2
# fig = P.figure()
# ax1 = fig.add_axes(rect2)
# ax2 = fig.add_axes(rect1)
# for a, c, d in zip(sheth, press, warren):
# redshift = float(a.split('tormen_')[1].split('-fit')[0].replace('_', '.'))
# data = {'Sheth-Tormen': a,
# 'Press-Schecter': c,
# 'Warren' : d}
#
# if a.find('_2_0') > -1 or a.find('_6_0') > -1 or a.find('_3_0') > -1 or a.find('_5_0') > -1 or a.find('_0_0') > -1:
# continue
# else:
# logging.debug('Plotting redshift %.2f dark matter mass functions' % redshift)
# print a, c, d
# plotDMMFfromGalpropz(redshift, h, data)
# P.savefig(outdir + 'DMmfz1GalpropZ.pdf')
# P.close()
#
# #a new plot
# fig = P.figure()
# ax1 = fig.add_axes(rect2)
# ax2 = fig.add_axes(rect1)
# for a in analytical:
# redshift = float(a.split('z')[1].split('.')[0].replace('_', '.'))
# data = {'Analytical': a}
# if round(redshift,1) in [1.0, 4.0, 8.0]:
# logging.debug('Plotting redshift %.2f dark matter mass functions (Analytical 2)' % redshift)
# print a
# plotDMMFfromGalpropzAnalytical2(redshift, h, data)
# P.savefig(outdir + 'DMmfz1GalpropZAnalytical2.pdf')
# P.close()
########################################
#Compare dark matter halo mass functions of Rachel's SAM and Bolshoi trees
redshifts = {0.9943: 0.0057,
0.4984: 1.0064,
0.2464: 3.0584,
0.1983: 4.0429,
0.1323: 6.5586,
0.1084: 8.2251,
0.1623: 5.1614,
0.3303: 2.0276}
compareGalpropzToBolshoiTrees(analytical, simus, redshifts, h, outdir)
#compareGalpropzToBolshoiTrees(analytical, simus, redshifts, h, outdir, galid=False) | bsd-2-clause |
BlackHC/mdp | setup.py | 1 | 3504 | # Copyright 2017 Andreas Kirsch <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import sys
here = path.abspath(path.dirname(__file__))
# NOTE: this trick is being used by the gym: I might be cargo-culting here.
# Don't import mdp here since deps might not have been installed yet
sys.path.insert(0, path.join(here, 'blackhc/mdp'))
from version import VERSION
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='blackhc.mdp',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=VERSION,
description='MDP framework for the OpenAI Gym',
long_description=long_description,
# The project's main homepage.
url='https://github.com/blackhc/mdp',
# Author details
author='Andreas Kirsch',
author_email='[email protected]',
# Choose your license
license='Apache',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='mdp rl',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['blackhc.mdp', 'blackhc.mdp.dsl'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['gym>=0.9.2', 'numpy', 'matplotlib', 'networkx>=1.11.0,<2.0.0', 'pydotplus', 'ipython>=6.1.0', 'ipywidgets', 'typing'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage', 'pytest'],
},
setup_requires=['pytest-runner'],
)
| apache-2.0 |
sknepneklab/SAMoS | utils/pvmodel/ioutils.py | 1 | 6712 | import numpy as np
from collections import OrderedDict
# This is a vital cellmesh.py method for reading the faces files from samos
def readfc(fcfile):
faces= []
with open(fcfile, 'r') as fc:
for line in fc:
face = map(int, line.split())
faceid, face = face[0], face[1:]
if len(face) > 3:
# should be a boundary face
continue
faces.append(face)
simp = np.array(faces)
boundary = 'not implemented'
return simp, boundary
### These are general methods for reading columnar text files like the SAMoS output
#print square data to file, first column is int and rest are floats.
def dump(dd, fo, htag='#', outstr=None):
nc = len(dd.keys())
#fo.write(''.join([htag+' ', '%s\t '*nc, '\n']) % tuple(dd.keys()))
fo.write((htag+' '+'{:<14} '*nc + '\n').format(*dd.keys()))
ddv = dd.values()
nr = len(ddv[0]) # assumption
if not outstr:
outstr = ' '+'{:<14} '*nc + '\n'
for i in range(nr):
dv = [d[i] for d in ddv]
fo.write(outstr.format(*dv))
def datdump(dd, fo):
htag = 'keys:'
dump(dd, fo, htag=htag)
# This only reads dumps of floats and ints
#def readdump(fo, firstc=float):
def readdump(fo):
headers = fo.next()[1:].split()
dd = OrderedDict()
for h in headers:
dd[h] = []
for line in fo:
for i, dat in enumerate(line.split()):
try:
cdat = int(dat)
except ValueError:
try:
cdat = float(dat)
except ValueError:
print 'data item {} is not float or int'.format(dat)
raise
dd[headers[i]].append( cdat )
return dd
def freaddump(ifile):
with open(ifile,'r') as fi:
return readdump(fi)
def dumpargs(dd, fo):
r1 = max(map(len, dd.keys()))
argsformat = '{:<%d} {:>10}\n' % r1
for k, v in dd.items():
fo.write(argsformat.format(k, v))
# debugging
def dirk(A):
print A
print dir(A)
sys.exit()
def shiv(al):
for a in al:
print a
print eval(a)
from matplotlib import pyplot as plt
def plotrange(f, a, b, n=100):
x = np.linspace(a,b,n+1)
y = map(f, x)
plt.plot(x, y)
plt.show()
#np.set_printoptions(threshold=np.nan)
import contextlib
import cStringIO
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
yield
sys.stdout = save_stdout
# want to print a vector object
def dumpvec(vec):
print omvec(vec)
def scatter(mesh, mesh2):
arrl = []
for vh in mesh.vertices():
arrl.append(omvec(mesh.point(vh)))
npts = np.column_stack(arrl)
x = npts[:][0]
y = npts[:][1]
plt.scatter(x, y, color='red')
mesh = mesh2
arrl = []
for vh in mesh.vertices():
arrl.append(omvec(mesh.point(vh)))
npts = np.column_stack(arrl)
x = npts[:][0]
y = npts[:][1]
plt.scatter(x, y, color='blue')
plt.show()
# how to print a dictionary containing serious data
def stddict(dd):
for k, v in dd.items():
print k
print v
print
# Important!
# this is shared code for cellmesh and writemesh for interacting with openmesh
# openmesh has a vector object
# too lazy to use this to convert to numpy arrays
# fixed to three dimensions...
def omvec(vec):
return np.array([vec[0], vec[1], vec[2]])
def idtopt(mesh, rmuid):
return omvec(mesh.point(mesh.vertex_handle(rmuid)))
# OrderedSet borrowed from internet
#http://code.activestate.com/recipes/576694/
#see also
#http://stackoverflow.com/questions/1653970/does-python-have-an-ordered-set
import collections
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
### Totatly general utility methods
from glob import glob
import os
def rotation_2d(theta):
c, s = np.cos(theta), np.sin(theta)
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
return R
# utility for running the 'macro' from all the directories we can find
excluded = ['plots/', 'tmp/']
def finddirs(re='*/'):
dirs = sorted( glob(re) )
for ex in excluded:
if ex in dirs:
dirs.remove(ex)
return dirs
def diriterate(macro, re='*/'):
dirs = finddirs(re)
bdir = os.getcwd()
print dirs
for di in dirs:
os.chdir(di)
print '$utools$ Entering directory ', di
macro()
os.chdir(bdir)
return dirs
# A cute line object
class Line(object):
def __init__(self, p, n):
self.p = np.array(p)
self.n = np.array(n)
self.n /= lg.norm(self.n)
def __call__(self, m):
return self.p + m* self.n
# return a perpendicular line
def perp(self, p):
R = io.rotation_2d(np.pi/4)
n = np.einsum('mn,n->m', R, self.n)
return Line(p,n)
def intersect(self, line):
coeffs = [[self.n[0], -line.n[0]], [self.n[1], -line.n[1]]]
ords = [self.p[0] + line.p[0], self.p[1] + line.p[1]]
m1, m2 = lg.solve(coeffs, ords)
return self(m1)
| gpl-3.0 |
LoveYakamoz/Quantitative_Trading | joinQuant/T_0/T_0_ShipanE.py | 2 | 21973 | from jqdata import *
import numpy as np
import pandas as pd
import talib as ta
from math import isnan, floor
from math import atan
import tushare as ts
try:
import shipane_sdk
except:
pass
# 股票池来源
class Source(Enum):
AUTO = 0 # 程序根据波动率及股价自动从沪深300中获取股票
CLIENT = 1 # 使用用户提供的股票
g.stocks_source = Source.CLIENT # 默认使用自动的方法获得股票
g.stock_id_list_from_client = ["300059.XSHE", "600206.XSHG"]
g.stock_position = {"300059.XSHE": 100,
"600206.XSHG": 100}
# 持仓股票池详细信息
g.basestock_pool = []
# 用于统计结果
g.repeat_signal_count = 0
g.reset_order_count = 0
g.success_count = 0
# MA平均的天数
g.ma_4day_count = 4
g.ma_13day_count = 13
# 每次调整的比例
g.adjust_scale = 0.25
# 期望收益率
g.expected_revenue = 0.003
# 角度阈值
g.angle_threshold = 30
class Angle(Enum):
UP = 1 # 角度>30
MIDDLE = 0 # 角度<=30 且 角度>=-30
DOWN = -1 # 角度<-30
class Status(Enum):
INIT = 0 # 在每天交易开始时,置为INIT
WORKING = 1 # 处于买/卖中
NONE = 2 # 今天不再做任何交易
class Break(Enum):
UP = 0 # 上穿
DOWN = 1 # 下穿
NONE = 2
'''
记录股票详细信息
'''
class BaseStock(object):
def __init__(self, stock, close, min_vol, max_vol, lowest, highest, status, position, sell_order_id, buy_order_id):
self.stock = stock
self.close = close
self.min_vol = min_vol
self.max_vol = max_vol
self.lowest = lowest
self.highest = highest
self.status = status
self.position = position
self.sell_order_id = sell_order_id
self.sell_price = 0
self.buy_order_id = buy_order_id
self.buy_price = 0
self.break_throught_type = Break.NONE # 突破类型 up or down
self.break_throught_time = None # 突破时间点
self.delay_amount = 0 # 反向挂单量
self.delay_price = 0 # 反向挂单价格
self.operator_value_4 = 0
self.operator_value_13 = 0
self.angle = 1000
def print_stock(self):
log.info(
"stock: %s, close: %f, min_vol: %f, max_vol: %f, lowest: %f, hightest: %f, position: %f, sell_roder_id: %d, buy_order_id: %d, operator_value_4: %f, operator_value_13: %f"
, self.stock, self.close, self.min_vol, self.max_vol, self.lowest, self.highest, self.position,
self.sell_order_id, self.buy_order_id, self.operator_value_4, self.operator_value_13)
def get_stocks_by_client(context):
'''
直接从客户得到股票列表
'''
select_count = 0
for stock_id in g.stock_id_list_from_client:
stock_obj = BaseStock(stock_id, 0, 0, 0, 0, 0, Status.INIT, g.stock_position[stock_id], -1, -1)
stock_obj.print_stock()
g.basestock_pool.append(stock_obj)
select_count += 1
if select_count < g.position_count:
g.position_count = select_count
def get_stock_angle(context, stock):
'''ATAN((五日收盘价均线值/昨日的五日收盘均线值-1)*100)*57.3'''
df_close = get_price(stock, count=6, end_date=str(context.current_dt), frequency='daily', fields=['close'])
close_list = [item for item in df_close['close']]
yesterday_5MA = (reduce(lambda x, y: x + y, close_list) - close_list[5]) / 5
today_5MA = (reduce(lambda x, y: x + y, close_list) - close_list[0]) / 5
angle = math.atan((today_5MA / yesterday_5MA - 1) * 100) * 57.3
log.info("股票:%s的角度为:%f", stock, angle)
return angle
def process_initialize(context):
g.__manager = shipane_sdk.JoinQuantStrategyManagerFactory(context).create('manager-1')
def initialize(context):
log.info("---> 策略初始化 @ %s" % (str(context.current_dt)))
g.repeat_signal_count = 0
g.reset_order_count = 0
g.success_count = 0
# 第一天运行时,需要选股入池,并且当天不可进行股票交易
g.firstrun = True
# 默认股票池容量
g.position_count = 30
if g.stocks_source == Source.AUTO:
log.info("程序根据波动率及股价自动从沪深300中获取股票")
get_stocks_by_vol(context)
elif g.stocks_source == Source.CLIENT:
log.info("使用用户提供的股票")
get_stocks_by_client(context)
else:
log.error("未提供获得股票方法!!!")
# 设置基准
set_benchmark('000300.XSHG')
set_option('use_real_price', True)
log.info("初始化完成")
# 在每天交易开始时,将状态置为可交易状态
def before_trading_start(context):
log.info("初始化买卖状态为INIT")
for i in range(g.position_count):
g.basestock_pool[i].status = Status.INIT
g.basestock_pool[i].lowest = 0
g.basestock_pool[i].highest = 0
g.basestock_pool[i].status = Status.INIT
g.basestock_pool[i].sell_order_id = -1
g.basestock_pool[i].sell_price = 0
g.basestock_pool[i].buy_order_id = -1
g.basestock_pool[i].buy_price = 0
g.basestock_pool[i].break_throught_time = None
g.basestock_pool[i].delay_amount = 0
g.basestock_pool[i].delay_price = 0
angle = get_stock_angle(context, g.basestock_pool[i].stock)
if angle > 30:
g.basestock_pool[i].angle = Angle.UP
elif angle < -30:
g.basestock_pool[i].angle = Angle.DOWN
else:
g.basestock_pool[i].angle = Angle.MIDDLE
g.repeat_signal_count = 0
g.reset_order_count = 0
g.success_count = 0
# 购买股票,并记录订单号,便于查询订单状态
def buy_stock(context, stock, amount, limit_price, index):
try:
buy_order = order(stock, amount, LimitOrderStyle(limit_price))
except Exception as e:
log.info('实盘易买股挂单失败:' + str(e))
finally:
g.__manager.work()
g.basestock_pool[index].buy_price = limit_price
if buy_order is not None:
g.basestock_pool[index].buy_order_id = buy_order.order_id
log.info("股票: %s, 以%f价格挂单,买入%d, 实盘易买股挂单成功", stock, limit_price, amount)
# 卖出股票,并记录订单号,便于查询订单状态
def sell_stock(context, stock, amount, limit_price, index):
try:
sell_order = order(stock, amount, LimitOrderStyle(limit_price))
except Exception as e:
log.info('实盘易买股挂单失败:' + str(e))
finally:
g.__manager.work()
g.basestock_pool[index].sell_price = limit_price
if sell_order is not None:
g.basestock_pool[index].sell_order_id = sell_order.order_id
log.info("股票: %s, 以%f价格挂单,卖出%d, 实盘易卖股挂单成功", stock, limit_price, amount)
def sell_signal(context, stock, close_price, index):
# 每次交易量为持仓量的g.adjust_scale
amount = g.adjust_scale * g.basestock_pool[index].position
log.info("sell scale: %f, src_posiont: %d, amount: %d", g.adjust_scale, g.basestock_pool[index].position, amount)
if amount <= 100:
amount = 100
else:
if amount % 100 != 0:
amount = amount - (amount % 100)
# 以收盘价 + 0.01 挂单卖出
limit_price = close_price + 0.01
if g.basestock_pool[index].status == Status.WORKING:
log.warn("股票: %s, 收到重复卖出信号,但不做交易", stock)
elif g.basestock_pool[index].status == Status.INIT:
if g.basestock_pool[index].angle == Angle.UP:
log.warn("股票:%s, 角度大于30, 忽略卖出信号", stock)
return
sell_ret = sell_stock(context, stock, -amount, limit_price, index)
if g.basestock_pool[index].sell_order_id != -1:
g.basestock_pool[index].break_throught_time = context.current_dt
# 以收盘价 - 价差 * expected_revenue 挂单买入
yesterday = get_price(stock, count=1, end_date=str(context.current_dt), frequency='daily', fields=['close'])
limit_price = close_price - yesterday.iat[0, 0] * g.expected_revenue
g.basestock_pool[index].delay_amount = amount
g.basestock_pool[index].delay_price = limit_price
g.basestock_pool[index].break_throught_type = Break.DOWN
g.basestock_pool[index].status = Status.WORKING # 更新交易状态
else:
log.error("股票: %s, 交易状态出错", stock)
def buy_signal(context, stock, close_price, index):
# 每次交易量为持仓量的g.adjust_scale
amount = floor(g.adjust_scale * g.basestock_pool[index].position)
log.info("buy scale: %f, src_posiont: %d, amount: %d", g.adjust_scale, g.basestock_pool[index].position, amount)
if amount <= 100:
amount = 100
else:
if amount % 100 != 0:
amount = amount - (amount % 100)
# 以收盘价 - 0.01 挂单买入
limit_price = close_price - 0.01
# 如果当前不是INIT状态,则表示已经处于一次交易中(未撮合完成)
if g.basestock_pool[index].status == Status.WORKING:
log.warn("股票: %s, 收到重复买入信号,但不做交易", stock)
elif g.basestock_pool[index].status == Status.INIT:
if g.basestock_pool[index].angle == Angle.DOWN:
log.warn("股票:%s, 角度小于-30, 忽略买入信号", stock)
return
buy_stock(context, stock, amount, limit_price, index)
if g.basestock_pool[index].buy_order_id != -1:
g.basestock_pool[index].break_throught_time = context.current_dt
# 以收盘价 + 价差 * expected_revenue 挂单卖出
yesterday = get_price(stock, count=1, end_date=str(context.current_dt), frequency='daily', fields=['close'])
limit_price = close_price + yesterday.iat[0, 0] * g.expected_revenue
g.basestock_pool[index].delay_amount = -amount
g.basestock_pool[index].delay_price = limit_price
g.basestock_pool[index].break_throught_type = Break.UP
g.basestock_pool[index].status = Status.WORKING # 更新交易状态
else:
log.error("股票: %s, 交易状态出错", stock)
# 计算当前时间点,是开市以来第几分钟
def get_minute_count(current_dt):
'''
9:30 -- 11:30
13:00 --- 15:00
'''
current_hour = current_dt.hour
current_min = current_dt.minute
if current_hour < 12:
minute_count = (current_hour - 9) * 60 + current_min - 30
else:
minute_count = (current_hour - 13) * 60 + current_min + 120
return minute_count
# 获取89分钟内的最低价,不足89分钟,则计算到当前时间点
def update_89_lowest(context):
minute_count = get_minute_count(context.current_dt)
if minute_count > 89:
minute_count = 89
for i in range(g.position_count):
low_df = get_price(g.basestock_pool[i].stock, count=minute_count, end_date=str(context.current_dt),
frequency='1m', fields=['low'])
g.basestock_pool[i].lowest_89 = min(low_df['low'])
# 获取233分钟内的最高价,不足233分钟,则计算到当前时间点
def update_233_highest(context):
minute_count = get_minute_count(context.current_dt)
if minute_count > 233:
minute_count = 233
for i in range(g.position_count):
high_df = get_price(g.basestock_pool[i].stock, count=minute_count, end_date=str(context.current_dt),
frequency='1m', fields=['high'])
g.basestock_pool[i].highest_233 = max(high_df['high'])
# high_df.sort(['high'], ascending = False).iat[0,0]
# 取消所有未完成的订单(未撮合成的订单)
def cancel_open_order(context):
orders = get_open_orders()
for _order in orders.values():
#cancel_order(_order)
pass
# 恢复所有股票到原有仓位
def reset_position(context):
for i in range(g.position_count):
stock = g.basestock_pool[i].stock
src_position = g.basestock_pool[i].position
cur_position = context.portfolio.positions[stock].total_amount
if src_position != cur_position:
log.info("src_position : cur_position", src_position, cur_position)
try:
_order = order(stock, src_position - cur_position)
finally:
g.__manager.work()
log.warn("reset posiont: ", _order)
g.reset_order_count += 1
def update_socket_statue(context):
orders = get_orders()
if len(orders) == 0:
return
hour = context.current_dt.hour
minute = context.current_dt.minute
for i in range(g.position_count):
stock = g.basestock_pool[i].stock
sell_order_id = g.basestock_pool[i].sell_order_id
buy_order_id = g.basestock_pool[i].buy_order_id
status = g.basestock_pool[i].status
if (status == Status.WORKING) and ((sell_order_id != -1) and (buy_order_id != -1)):
sell_order = orders.get(sell_order_id)
buy_order = orders.get(buy_order_id)
if (sell_order is not None) and (buy_order is not None):
if sell_order.status == OrderStatus.held and buy_order.status == OrderStatus.held:
log.info("股票:%s回转交易完成 ==============> SUCCESS", stock)
g.basestock_pool[i].sell_order_id = -1
g.basestock_pool[i].buy_order_id = -1
g.basestock_pool[i].status = Status.INIT # 一次完整交易(买/卖)结束,可以进行下一次交易
g.basestock_pool[i].buy_price = 0
g.basestock_pool[i].sell_price = 0
g.basestock_pool[i].delay_amount = 0
g.basestock_pool[i].delay_price = 0
g.basestock_pool[i].break_throught_time = None
g.basestock_pool[i].break_throught_type = Break.NONE
g.success_count += 1
# 每天14点后, 不再进行新的买卖
if hour == 14 and g.basestock_pool[i].status == Status.INIT:
g.basestock_pool[i].status = Status.NONE
for i in range(g.position_count):
stock = g.basestock_pool[i].stock
sell_order_id = g.basestock_pool[i].sell_order_id
buy_order_id = g.basestock_pool[i].buy_order_id
status = g.basestock_pool[i].status
# 买完再卖
if (status == Status.WORKING) and (sell_order_id == -1):
buy_order = orders.get(buy_order_id)
if (buy_order is not None):
if buy_order.status == OrderStatus.held:
log.info("买完再卖: stock %s, delay_amount: %d", stock, g.basestock_pool[i].delay_amount)
sell_stock(context, stock, g.basestock_pool[i].delay_amount, g.basestock_pool[i].delay_price, i)
# 卖完再买
if (status == Status.WORKING) and (buy_order_id == -1):
sell_order = orders.get(sell_order_id)
if (sell_order is not None):
if sell_order.status == OrderStatus.held:
log.info("卖完再买: stock %s, delay_amount: %d", stock, g.basestock_pool[i].delay_amount)
buy_stock(context, stock, g.basestock_pool[i].delay_amount, g.basestock_pool[i].delay_price, i)
def get_delta_minute(datetime1, datetime2):
minute1 = get_minute_count(datetime1)
minute2 = get_minute_count(datetime2)
return abs(minute2 - minute1)
def price_and_volume_up(context, stock):
df = get_price(stock, end_date=context.current_dt, count=3, frequency='1m', fields=['close', 'volume'])
if (df['close'][0] < df['close'][1] < df['close'][2]) and (df['volume'][0] < df['volume'][1] < df['volume'][2]):
log.info("量价买入:%s, close: %f, %f, %f; volume: %d, %d, %d", stock, df['close'][0], df['close'][1],
df['close'][2],
df['volume'][0], df['volume'][1], df['volume'][2])
return True
else:
return False
def handle_data(context, data):
hour = context.current_dt.hour
minute = context.current_dt.minute
# 每天14点55分钟 将未完成的订单强制恢复到原有持仓量
if hour == 14 and minute == 55:
cancel_open_order(context)
reset_position(context)
# 14点00分钟后, 不再有新的交易
if hour == 14 and minute >= 0:
return
# 因为要计算移动平均线,所以每天前g.ma_13day_count分钟,不做交易
if get_minute_count(context.current_dt) < g.ma_13day_count:
# log.info("13分钟后才有交易")
return
# 更新89分钟内的最低收盘价,不足89分钟,则按到当前时间的最低收盘价
update_89_lowest(context)
# 更新233分钟内的最高收盘价,不足233分钟,则按到当前时间的最高收盘价
update_233_highest(context)
# 根据订单状态来更新,如果交易均结束(买与卖均成交),则置为INIT状态,表示可以再进行交易
update_socket_statue(context)
# 1. 循环股票列表,看当前价格是否有买入或卖出信号
for i in range(g.position_count):
stock = g.basestock_pool[i].stock
if isnan(g.basestock_pool[i].lowest_89) is True:
log.error("stock: %s's lowest_89 is None", stock)
continue
else:
lowest_89 = g.basestock_pool[i].lowest_89
if isnan(g.basestock_pool[i].highest_233) is True:
log.error("stock: %s's highest_233 is None", stock)
continue
else:
highest_233 = g.basestock_pool[i].highest_233
if g.basestock_pool[i].status == Status.NONE:
continue
# 如果在开市前几分钟,价格不变化,则求突破线时,会出现除数为0,如果遇到这种情况,表示不会有突破,所以直接过掉
if lowest_89 == highest_233:
continue
# 求取当前是否有突破
close_m = get_price(stock, count=g.ma_13day_count, end_date=str(context.current_dt), frequency='1m',
fields=['close'])
close_4 = array([0.0, 0.0, 0.0, 0.0], dtype=float)
close_13 = array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=float)
for j in range(g.ma_13day_count):
close_13[j] = close_m.iat[j, 0]
for j in range(g.ma_13day_count):
close_13[j] = ((close_13[j] - lowest_89) * 1.0 / (highest_233 - lowest_89)) * 4
close_4 = close_13[9:]
if close_13 is not None:
operator_line_13 = 0
operator_line_4 = 0
for item in close_13:
operator_line_13 += item
for item in close_4:
operator_line_4 += item
operator_line_13 = operator_line_13 / g.ma_13day_count
operator_line_4 = operator_line_4 / g.ma_4day_count
else:
log.warn("股票: %s 可能由于停牌等原因无法求解MA", stock)
continue
# 买入信号产生
if ((g.basestock_pool[i].operator_value_4 < g.basestock_pool[i].operator_value_13) and (
operator_line_4 > operator_line_13) and (operator_line_13 < 0.3) and (
close_m.iat[g.ma_13day_count - 1, 0] > close_m.iat[g.ma_13day_count - 2, 0] * 0.97)):
log.info(
"金叉买入:%s, ma_4 from %f to %f, ma_13 from %f to %f, close_price: %f, yesterday_close_price: %f, lowest_89: %f, highest_233: %f",
stock, g.basestock_pool[i].operator_value_4, operator_line_4, g.basestock_pool[i].operator_value_13,
operator_line_13, close_m.iat[g.ma_4day_count - 1, 0], close_m.iat[g.ma_13day_count - 2, 0], lowest_89,
highest_233)
buy_signal(context, stock, close_m.iat[g.ma_13day_count - 1, 0], i)
# 卖出信号产生
elif ((g.basestock_pool[i].operator_value_4 > g.basestock_pool[i].operator_value_13) and (
operator_line_4 < operator_line_13) and (operator_line_13 > 3.7) and (
close_m.iat[g.ma_13day_count - 1, 0] < close_m.iat[g.ma_13day_count - 2, 0] * 1.03)):
log.info(
"死叉卖出:%s, ma_4 from %f to %f, ma_13 from %f to %f, close_price: %f, yesterday_close_price: %f, lowest_89: %f, highest_233: %f",
stock, g.basestock_pool[i].operator_value_4, operator_line_4, g.basestock_pool[i].operator_value_13,
operator_line_13, close_m.iat[g.ma_4day_count - 1, 0], close_m.iat[g.ma_13day_count - 2, 0], lowest_89,
highest_233)
sell_signal(context, stock, close_m.iat[g.ma_13day_count - 1, 0], i)
# 价格与成交量均上涨,也是买入信号
elif (price_and_volume_up(context, stock)):
buy_signal(context, stock, close_m.iat[g.ma_13day_count - 1, 0], i)
else:
# log.info("%s, ma_4 from %f to %f, ma_13 from %f to %f, close_price: %f, yesterday_close_price: %f, lowest_89: %f, highest_233: %f", stock, g.basestock_pool[i].operator_value_4, operator_line_4, g.basestock_pool[i].operator_value_13, operator_line_13, close_m.iat[g.ma_4day_count-1,0], close_m.iat[g.ma_13day_count-2,0], lowest_89, highest_233)
pass
g.basestock_pool[i].operator_value_4 = operator_line_4
g.basestock_pool[i].operator_value_13 = operator_line_13
def after_trading_end(context):
log.info("===========================================================================")
log.info("[统计数据]成功交易次数: %d, 重复信号交易次数: %d, 收盘前强制交易次数: %d", g.success_count, g.repeat_signal_count,
g.reset_order_count)
log.info("===========================================================================")
| apache-2.0 |
kashif/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
dib-lab/kevlar | notebook/ssc-14153/evaluate.py | 2 | 7875 | #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
import argparse
from collections import defaultdict
import sys
import intervaltree
from intervaltree import IntervalTree
import pandas
from evalutils import IntervalForest, populate_index_from_bed, compact
from evalutils import assess_variants_vcf, assess_variants_mvf
from evalutils import subset_variants_bed, subset_vcf, subset_mvf
from evalutils import load_kevlar_vcf, load_triodenovo_vcf, load_gatk_mvf
import kevlar
from kevlar.vcf import VCFReader
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tolerance', type=int, metavar='T', default=10,
help='extend real variants by T nucleotides when '
'querying for overlap with variant calls; default is '
'10')
parser.add_argument('--mode', choices=('Kevlar', 'GATK', 'TrioDenovo'),
default='Kevlar', help='Kevlar|GATK|TrioDenovo')
parser.add_argument('--cov', default='30', help='coverage')
parser.add_argument('--correct', help='print correct variants to file')
parser.add_argument('--missing', help='print missing variants to file')
parser.add_argument('--false', help='print false variants to file')
parser.add_argument('--collisions', help='print calls that match the '
'same variant')
parser.add_argument('--vartype', choices=('SNV', 'INDEL'), default=None)
parser.add_argument('--minlength', type=int, default=None)
parser.add_argument('--maxlength', type=int, default=None)
parser.add_argument('--do-all', action='store_true', help='ignore all '
'other arguments and analyze all data')
parser.add_argument('simvar', help='simulated variants in .bed format')
parser.add_argument('varcalls', help='VCF file of variant calls')
return parser
def load_index(simvarfile, vartype=None, minlength=None, maxlength=None):
with kevlar.open(simvarfile, 'r') as instream:
if vartype:
instream = subset_variants_bed(
instream, vartype, minlength=minlength, maxlength=maxlength
)
index = populate_index_from_bed(instream)
return index
def handle_collisions(mapping, outfile):
numcollisions = 0
for variant, calllist in mapping.items():
if len(calllist) > 1:
numcollisions += 1
if numcollisions > 0:
print('WARNING:', numcollisions, 'variants matched by multiple calls',
file=sys.stderr)
if outfile is None:
return
with open(outfile, 'w') as outstream:
for variant, calllist in mapping.items():
if len(calllist) > 1:
print('\n#VARIANT:', variant, file=outstream)
for varcall in calllist:
if args.mvf:
print(' -', varcall, file=outstream)
else:
print(' -', varcall.vcf, file=outstream)
def handle_missing(missing, outfile):
if outfile is None:
return
with kevlar.open(outfile, 'w') as outstream:
for variant in missing:
print(variant.begin, *variant.data.split('<-'), sep='\t',
file=outstream)
def handle_calls(calls, outfile, mvf=False):
if outfile is None:
return
with kevlar.open(outfile, 'w') as outstream:
if mvf:
for varcall in calls:
print(varcall, file=outstream)
else:
writer = kevlar.vcf.VCFWriter(outstream)
for varcall in sorted(calls, key=lambda c: float(c.attribute('LIKESCORE')), reverse=True):
writer.write(varcall)
def evaluate(simvarfile, varcalls, mode, vartype=None, minlength=None,
maxlength=None, tolerance=10, coverage='30', correctfile=None,
falsefile=None, missingfile=None, collisionsfile=None):
assert mode in ('Kevlar', 'GATK', 'TrioDenovo')
index = load_index(simvarfile, vartype, minlength, maxlength)
if mode == 'GATK':
variants = load_gatk_mvf(varcalls, vartype, minlength, maxlength)
assess_func = assess_variants_mvf
elif mode == 'Kevlar':
variants = load_kevlar_vcf(
varcalls, index, delta=tolerance, vartype=vartype,
minlength=minlength, maxlength=maxlength
)
assess_func = assess_variants_vcf
elif mode == 'TrioDenovo':
variants = load_triodenovo_vcf(
varcalls, vartype, minlength, maxlength, coverage
)
assess_func = assess_variants_vcf
correct, false, missing, mapping = assess_func(
variants, index, delta=tolerance
)
handle_collisions(mapping, collisionsfile)
handle_missing(missing, missingfile)
handle_calls(correct, correctfile, mvf=(mode == 'GATK'))
handle_calls(false, falsefile, mvf=(mode == 'GATK'))
return len(mapping), len(false), len(missing)
################################################################################
def vartypestr(vartype, minlength, maxlength):
if vartype is None:
return 'All'
assert vartype in ('SNV', 'INDEL')
if vartype == 'SNV':
return 'SNV'
return 'INDEL {}-{}bp'.format(minlength, maxlength)
def main(args):
correct, false, missing = evaluate(
args.simvar, args.varcalls, args.mode, vartype=args.vartype,
minlength=args.minlength, maxlength=args.maxlength,
tolerance=args.tolerance, coverage=args.cov, correctfile=args.correct,
falsefile=args.false, missingfile=args.missing,
collisionsfile=args.collisions
)
vartype = vartypestr(args.vartype, args.minlength, args.maxlength)
colnames = ['Caller', 'Coverage', 'VarType', 'Correct', 'False', 'Missing']
data = [args.mode, args.cov, vartype, correct, false, missing]
row = {c: v for c, v in zip(colnames, data)}
table = pandas.DataFrame(columns=colnames)
table = table.append(row, ignore_index=True)
print(table.to_string(index=False))
def do_all():
infiles = {
'Kevlar': 'kevlar_calls_{cov}x.vcf.gz',
'GATK': 'GATK_calls_{cov}x.mvf.gz',
'TrioDenovo': 'triodenovo_calls_{cov}x.vcf.gz',
}
vartypes = (
('SNV', None, None),
('INDEL', 1, 10),
('INDEL', 11, 100),
('INDEL', 101, 200),
('INDEL', 201, 300),
('INDEL', 301, 400),
)
colnames = ['Caller', 'Coverage', 'VarType', 'Correct', 'False', 'Missing']
table = pandas.DataFrame(columns=colnames)
for coverage in ('10', '20', '30', '50'):
for vartype, minlen, maxlen in vartypes:
varstr = vartypestr(vartype, minlen, maxlen)
for caller in ('Kevlar', 'GATK', 'TrioDenovo'):
simvar = 'SimulatedVariants_chr17_hg38.tsv.gz'
varcalls = infiles[caller].format(cov=coverage)
correct, false, missing = evaluate(
simvar, varcalls, caller, vartype=vartype, minlength=minlen,
maxlength=maxlen, tolerance=args.tolerance,
coverage=coverage
)
data = [caller, coverage, varstr, correct, false, missing]
row = {c: v for c, v in zip(colnames, data)}
table = table.append(row, ignore_index=True)
print(table.to_string(index=False))
if __name__ == '__main__':
args = get_parser().parse_args()
if args.do_all:
do_all()
else:
main(args)
| mit |
MonoCloud/zipline | zipline/examples/buyapple.py | 11 | 2079 | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.api import order, record, symbol
def initialize(context):
pass
def handle_data(context, data):
order(symbol('AAPL'), 10)
record(AAPL=data[symbol('AAPL')].price)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
# Plot the portfolio and asset data.
ax1 = plt.subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = plt.subplot(212, sharex=ax1)
results.AAPL.plot(ax=ax2)
ax2.set_ylabel('AAPL price (USD)')
# Show the plot.
plt.gcf().set_size_inches(18, 8)
plt.show()
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Set the simulation start and end dates
start = datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2014, 11, 1, 0, 0, 0, 0, pytz.utc)
# Load price data from yahoo.
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data,
identifiers=['AAPL'])
results = algo.run(data)
analyze(results=results)
| apache-2.0 |
awanke/bokeh | bokeh/compat/mpl_helpers.py | 20 | 5287 | "Helpers function for mpl module."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from itertools import cycle, islice
from scipy import interpolate, signal
from ..models import GlyphRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def convert_color(mplcolor):
"Converts mpl color formats to Bokeh color formats."
charmap = dict(b="blue", g="green", r="red", c="cyan", m="magenta",
y="yellow", k="black", w="white")
if mplcolor in charmap:
return charmap[mplcolor]
try:
colorfloat = float(mplcolor)
if 0 <= colorfloat <= 1.0:
# This is a grayscale value
return tuple([int(255 * colorfloat)] * 3)
except:
pass
if isinstance(mplcolor, tuple):
# These will be floats in the range 0..1
return int(255 * mplcolor[0]), int(255 * mplcolor[1]), int(255 * mplcolor[2])
return mplcolor
def convert_dashes(dash):
""" Converts a Matplotlib dash specification
bokeh.properties.DashPattern supports the matplotlib named dash styles,
but not the little shorthand characters. This function takes care of
mapping those.
"""
mpl_dash_map = {
"-": "solid",
"--": "dashed",
":": "dotted",
"-.": "dashdot",
}
# If the value doesn't exist in the map, then just return the value back.
return mpl_dash_map.get(dash, dash)
def get_props_cycled(col, prop, fx=lambda x: x):
""" We need to cycle the `get.property` list (where property can be colors,
line_width, etc) as matplotlib does. We use itertools tools for do this
cycling ans slice manipulation.
Parameters:
col: matplotlib collection object
prop: property we want to get from matplotlib collection
fx: funtion (optional) to transform the elements from list obtained
after the property call. Deafults to identity function.
"""
n = len(col.get_paths())
t_prop = [fx(x) for x in prop]
sliced = islice(cycle(t_prop), None, n)
return list(sliced)
def is_ax_end(r):
"Check if the 'name' (if it exists) in the Glyph's datasource is 'ax_end'"
if isinstance(r, GlyphRenderer):
try:
if r.data_source.data["name"] == "ax_end":
return True
except KeyError:
return False
else:
return False
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.001, f3=5):
"""
Mimic a hand-drawn line from (x, y) data
Source: http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
| bsd-3-clause |
miguelfrde/stanford-cs231n | assignment1/cs231n/classifiers/neural_net.py | 1 | 10841 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from past.builtins import xrange
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
hidden1 = np.maximum(0, X.dot(W1) + b1)
scores = hidden1.dot(W2) + b2
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
shift_scores = scores - np.max(scores, axis=1).reshape(-1, 1)
s = np.exp(shift_scores) / np.sum(np.exp(shift_scores), axis=1).reshape(-1, 1)
loss = np.sum(-np.log(s[range(N), y]))
loss = loss/N + 0.5*reg*(np.sum(W1 * W1) + np.sum(W2 * W2))
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
indices = np.zeros(s.shape)
indices[range(N), y] = 1
d_scores = (s - indices)/N
d_hidden = d_scores.dot(W2.T)
d_hidden = (hidden1 > 0) * d_hidden
grads['W2'] = np.dot(hidden1.T, d_scores) + reg * W2
grads['b2'] = np.sum(d_scores, axis=0)
grads['W1'] = X.T.dot(d_hidden) + reg*W1
grads['b1'] = np.sum(d_hidden, axis=0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in xrange(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
indices = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[indices]
y_batch = y[indices]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
self.params['W1'] -= learning_rate * grads['W1']
self.params['W2'] -= learning_rate * grads['W2']
self.params['b1'] -= learning_rate * grads['b1']
self.params['b2'] -= learning_rate * grads['b2']
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
hidden1 = np.maximum(0, X.dot(W1) + b1)
y_pred = np.argmax(hidden1.dot(W2) + b2, axis=1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
| mit |
sgagnon/lyman-tools | roi/extract_local_max.py | 1 | 4059 | #! /usr/bin/env python
"""
This script finds clusters in group FFX (MNI space, smoothed), and then outputs
the peaks within a specified ROI as a csv file.
"""
import numpy as np
import glob
import os
import os.path as op
from scipy import stats
import nibabel as nib
import pandas as pd
from moss import locator
from nipype import IdentityInterface, Node, Workflow
from nipype.interfaces import fsl, freesurfer
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as niu # Data i/o
z_thresh = 2.3
grf_pthresh = 0.05
peak_dist = 30
exp = 'localizer-1respmod'
contrast = 'eye-hand'
hemi = 'lh'
rois = ['Angular G',
'Sup Par Lobule',
'Supramarg G, ant',
'Supramarg G, post',
'Lat Occ Ctx, sup']
subjects = np.loadtxt('/Volumes/group/awagner/sgagnon/RM/scripts/subjects.txt', str)
filedir = '/Volumes/group/awagner/sgagnon/RM/analysis/{exp}/{{subid}}/ffx/mni/smoothed/{contrast}'
basedir = filedir.format(exp=exp,contrast=contrast)
peaks_outdir = '/Volumes/group/awagner/sgagnon/RM/analysis/'+exp+'/peaks_'+ contrast
outdir = op.join('/Volumes/group/awagner/sgagnon/RM/analysis',
exp, 'maxclusters_' + contrast + '.csv')
#####################
# Set up nodes
#####################
infosource = Node(IdentityInterface(fields=['subid']),
name="infosource")
infosource.iterables = [('subid', subjects)]
templates = {'func': op.join(basedir, 'zstat1.nii.gz'),
'mask': op.join(basedir, 'mask.nii.gz')}
selectfiles = Node(nio.SelectFiles(templates),
name="selectfiles")
smoothest = Node(fsl.SmoothEstimate(), name="smoothest")
cluster = Node(fsl.Cluster(threshold=z_thresh,
pthreshold=grf_pthresh,
peak_distance=peak_dist,
use_mm=True), name="cluster")
cluster_nogrf = Node(fsl.Cluster(threshold=z_thresh,
peak_distance=peak_dist,
use_mm=True,
out_localmax_txt_file=True), name="cluster")
datasink = Node(nio.DataSink(), name='sinker')
datasink.inputs.base_directory = peaks_outdir
#####################
# Construct workflow, and run
#####################
wf = Workflow(name=exp+'_' + contrast)
wf.connect([(infosource, selectfiles, [('subid', 'subid')]),
(selectfiles, cluster_nogrf, [('func', 'in_file')]),
(infosource, datasink, [('subid', 'container')]),
(cluster_nogrf, datasink, [('localmax_txt_file', 'peaks')])
])
wf.run()
#####################
# Just extract ROIs
#####################
def reformat_cluster_table(cluster_dir, rois, hemi):
"""Add some info to an FSL cluster file and format it properly."""
df = pd.read_table(cluster_dir, delimiter="\t")
df = df[["Cluster Index", "Value", "x", "y", "z"]]
df.columns = ["Cluster", "Value", "x", "y", "z"]
df.index.name = "Peak"
# Find out where the peaks most likely are
if len(df):
coords = df[["x", "y", "z"]].values
loc_df = locator.locate_peaks(coords)
df = pd.concat([df, loc_df], axis=1)
mni_coords = locator.vox_to_mni(coords).T
for i, ax in enumerate(["x", "y", "z"]):
df[ax] = mni_coords[i]
df_trim = df.loc[df['MaxProb Region'].isin(rois)]
if hemi == 'lh':
df_trim = df_trim.loc[df_trim.x < 0]
else:
df_trim = df_trim.loc[df_trim.x > 0]
return df_trim.reset_index()
df = pd.DataFrame()
for subid in subjects:
print subid
cluster_dir = '/Volumes/group/awagner/sgagnon/RM/analysis/'+ exp +'/peaks_' + contrast+'/{subid}/peaks/_subid_{subid}/zstat1_localmax.txt'.format(subid=subid)
# reformat clusters (note that xyz from FSL is in diff voxel coordinates, not MNI)
df_sub = reformat_cluster_table(cluster_dir, rois, hemi)
df_sub['subid'] = subid
df_sub['exp'] = exp
df_sub['contrast'] = contrast
df = df.append(df_sub)
df.to_csv(outdir) | bsd-2-clause |
ky822/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
jos4uke/getSeqFlankBlatHit | lib/python2.7/site-packages/numpy/linalg/linalg.py | 35 | 67345 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t))
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
A : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t))
vt = vt.astype(result_t)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t)
s = s.astype(_realType(result_t))
vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(2, 2, 2
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute the extreme singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Check the default case first and handle it immediately.
if ord is None and axis is None:
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
return sqrt(sqnorm)
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis)
elif ord == -Inf:
return abs(x).min(axis=axis)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
return sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
else:
raise ValueError("Improper number of dimensions to norm.")
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.