repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Minhua722/NMF | egs/ar/local/ar_extract_nmf_feats.py | 1 | 3850 | #!/usr/bin/env python
import cv2
import numpy as np
import argparse
import math
import pickle
from sklearn.decomposition import PCA
from nmf_support import *
import sys, os
if __name__ == '__main__':
#------------------------------------------------------
# Args parser
#------------------------------------------------------
parser = argparse.ArgumentParser(description='Extract PCA coefficients for each image')
parser.add_argument('--bases_dir', '-base',
action='store', type=str, required=True,
help='directory of bases (eigen vectors)')
parser.add_argument('--exp_id', '-id',
action='store', type=str, required=True,
help='experiment id (related to directory where bases and feats are stored)')
parser.add_argument('--input_dir', '-in',
action='store', type=str, required=True,
help='data dir with a list of image filenames and labels for training (extracted features will also be stored here)')
args = parser.parse_args()
data_dir = args.input_dir.strip('/')
train_list = "%s/train.list" % data_dir
if not os.path.isfile(train_list):
sys.exit(1)
test_sets = []
for set_i in range(2, 14):
test_sets.append("test%d" % set_i)
bases_dir = "%s/%s/bases" % (args.bases_dir.strip('/'), args.exp_id)
bases_pname = "%s/bases.pickle" % bases_dir
if not os.path.isfile(bases_pname):
sys.exit(1)
feats_dir = "%s/%s" % (args.input_dir, args.exp_id)
with open(bases_pname, "rb") as f:
W = pickle.load(f) # each col of W is a basis
D = W.shape[1] # num of bases (feature dimension)
print "%d NMF bases loaded from %s" % (D, bases_pname)
##########################################################################
# Extract training data features
# load img in each col of V
V_raw, img_height, img_width, train_labels = load_data(train_list)
V = normalize_data(V_raw)
train_label_pname = "%s/train_label.pickle" % data_dir
with open(train_label_pname, "wb") as f:
pickle.dump(train_labels, f)
N = V.shape[1]
#train_coefs_pname = "%s/coefs.pickle" % bases_dir
#with open(train_coefs_pname, "rb") as f:
# H = pickle.load(f)
#print H.shape
#assert(H.shape[0] == D and H.shape[1] == N)
# mean and variance normailization for each row
train_feats = np.transpose(np.dot(V.T, W))
train_feats = train_feats - np.mean(train_feats, axis=0).reshape(1, N)
train_feats = train_feats / np.std(train_feats, axis=0).reshape(1, N)
train_feats_pname = "%s/train_feats.pickle" % feats_dir
with open(train_feats_pname, "wb") as f:
pickle.dump(train_feats, f)
#print np.mean(train_feats, axis=0)
#print np.std(train_feats, axis=0)
print "train set nmf feats stored in %s" % train_feats_pname
############################################################################
# Extract test data features
for set_name in test_sets:
test_list = "%s/%s.list" % (data_dir, set_name)
print "Process %s" % test_list
# load img in each col of V
V_raw, img_height, img_width, test_labels = load_data(test_list)
V = normalize_data(V_raw)
test_label_pname = "%s/%s_label.pickle" % (data_dir, set_name)
with open(test_label_pname, "wb") as f:
pickle.dump(test_labels, f)
N = V.shape[1]
print "%d test images of size %dx%d loaded" % (N, img_height, img_width)
test_feats = np.transpose(np.dot(V.T, W)) # each col is nmf feats for one image
assert(test_feats.shape[0] == D and test_feats.shape[1] == N)
# mean and variance normailization for each col
test_feats = test_feats - np.mean(test_feats, axis=0).reshape(1, N)
test_feats = test_feats / np.std(test_feats, axis=0).reshape(1, N)
test_feats_pname = "%s/%s_feats.pickle" % (feats_dir, set_name)
with open(test_feats_pname, "wb") as f:
pickle.dump(test_feats, f)
#print np.mean(test_feats, axis=0)
#print np.std(test_feats, axis=0)
print "%s nmf feats stored in %s" % (set_name, test_feats_pname)
| apache-2.0 |
qPCR4vir/orange | Orange/projection/mds.py | 6 | 14713 | """
.. index:: multidimensional scaling (mds)
.. index::
single: projection; multidimensional scaling (mds)
**********************************
Multidimensional scaling (``mds``)
**********************************
The functionality to perform multidimensional scaling
(http://en.wikipedia.org/wiki/Multidimensional_scaling).
The main class to perform multidimensional scaling is
:class:`Orange.projection.mds.MDS`
.. autoclass:: Orange.projection.mds.MDS
:members:
:exclude-members: Torgerson, get_distance, get_stress, calc_stress, run
.. automethod:: calc_stress(stress_func=SgnRelStress)
.. automethod:: run(iter, stress_func=SgnRelStress, eps=1e-3, progress_callback=None)
Stress functions
================
Stress functions that can be used for MDS have to be implemented as functions
or callable classes:
.. method:: \ __call__(correct, current, weight=1.0)
Compute the stress using the correct and the current distance value (the
:obj:`Orange.projection.mds.MDS.distances` and
:obj:`Orange.projection.mds.MDS.projected_distances` elements).
:param correct: correct (actual) distance between elements, represented by
the two points.
:type correct: float
:param current: current distance between the points in the MDS space.
:type current: float
This module provides the following stress functions:
* :obj:`SgnRelStress`
* :obj:`KruskalStress`
* :obj:`SammonStress`
* :obj:`SgnSammonStress`
Examples
========
MDS Scatterplot
---------------
The following script computes the Euclidean distance between the data
instances and runs MDS. Final coordinates are plotted with matplotlib
(not included with orange, http://matplotlib.sourceforge.net/).
Example (:download:`mds-scatterplot.py <code/mds-scatterplot.py>`)
.. literalinclude:: code/mds-scatterplot.py
:lines: 7-
The script produces a file *mds-scatterplot.py.png*. Color denotes
the class. Iris is a relatively simple data set with respect to
classification; to no surprise we see that MDS finds such instance
placement in 2D where instances of different classes are well separated.
Note that MDS has no knowledge of points' classes.
.. image:: files/mds-scatterplot.png
A more advanced example
-----------------------
The following script performs 10 steps of Smacof optimization before computing
the stress. This is suitable if you have a large dataset and want to save some
time.
Example (:download:`mds-advanced.py <code/mds-advanced.py>`)
.. literalinclude:: code/mds-advanced.py
:lines: 7-
A few representative lines of the output are::
<-0.633911848068, 0.112218663096> [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']
<-0.624193906784, -0.111143872142> [4.9, 3.0, 1.4, 0.2, 'Iris-setosa']
...
<0.265250980854, 0.237793982029> [7.0, 3.2, 4.7, 1.4, 'Iris-versicolor']
<0.208580598235, 0.116296850145> [6.4, 3.2, 4.5, 1.5, 'Iris-versicolor']
...
<0.635814905167, 0.238721415401> [6.3, 3.3, 6.0, 2.5, 'Iris-virginica']
<0.356859534979, -0.175976261497> [5.8, 2.7, 5.1, 1.9, 'Iris-virginica']
...
"""
import numpy
from numpy.linalg import svd
import Orange.core
from Orange import orangeom as orangemds
from Orange.utils import deprecated_keywords
from Orange.utils import deprecated_members
KruskalStress = orangemds.KruskalStress()
SammonStress = orangemds.SammonStress()
SgnSammonStress = orangemds.SgnSammonStress()
SgnRelStress = orangemds.SgnRelStress()
PointList = Orange.core.FloatListList
FloatListList = Orange.core.FloatListList
def _mycompare((a,aa),(b,bb)):
if a == b:
return 0
if a < b:
return -1
else:
return 1
class PivotMDS(object):
def __init__(self, distances=None, pivots=50, dim=2, **kwargs):
self.dst = numpy.array([m for m in distances])
self.n = len(self.dst)
if type(pivots) == type(1):
self.k = pivots
self.pivots = numpy.random.permutation(len(self.dst))[:pivots]
#self.pivots.sort()
elif type(pivots) == type([]):
self.pivots = pivots
#self.pivots.sort()
self.k = len(self.pivots)
else:
raise AttributeError('pivots')
def optimize(self):
# # Classical MDS (Torgerson)
# J = identity(self.n) - (1/float(self.n))
# B = -1/2. * dot(dot(J, self.dst**2), J)
# w,v = linalg.eig(B)
# tmp = zip([float(val) for val in w], range(self.n))
# tmp.sort()
# w1, w2 = tmp[-1][0], tmp[-2][0]
# v1, v2 = v[:, tmp[-1][1]], v[:, tmp[-2][1]]
# return v1 * sqrt(w1), v2 * sqrt(w2)
# Pivot MDS
d = self.dst[[self.pivots]].T
C = d**2
# double-center d
cavg = numpy.sum(d, axis=0)/(self.k+0.0) # column sum
ravg = numpy.sum(d, axis=1)/(self.n+0.0) # row sum
tavg = numpy.sum(cavg)/(self.n+0.0) # total sum
# TODO: optimize
for i in xrange(self.n):
for j in xrange(self.k):
C[i,j] += -ravg[i] - cavg[j]
C = -0.5 * (C + tavg)
w,v = numpy.linalg.eig(numpy.dot(C.T, C))
tmp = zip([float(val) for val in w], range(self.n))
tmp.sort()
w1, w2 = tmp[-1][0], tmp[-2][0]
v1, v2 = v[:, tmp[-1][1]], v[:, tmp[-2][1]]
x = numpy.dot(C, v1)
y = numpy.dot(C, v2)
return x, y
@deprecated_members(
{"projectedDistances": "projected_distances",
"originalDistances": "original_distances",
"avgStress": "avg_stress",
"progressCallback": "progress_callback",
"getStress": "calc_stress",
"get_stress": "calc_stress",
"calcStress": "calc_stress",
"getDistance": "calc_distance",
"get_distance": "calc_distance",
"calcDistance": "calc_distance",
"Torgerson": "torgerson",
"SMACOFstep": "smacof_step",
"LSMT": "lsmt"})
class MDS(object):
"""
Main class for performing multidimensional scaling.
:param distances: original dissimilarity - a distance matrix to operate on.
:type distances: :class:`Orange.misc.SymMatrix`
:param dim: dimension of the projected space.
:type dim: int
:param points: an initial configuration of points (optional)
:type points: :class:`Orange.core.FloatListList`
An instance of MDS object has the following attributes and functions:
.. attribute:: points
Holds the current configuration of projected points in an
:class:`Orange.core.FloatListList` object.
.. attribute:: distances
An :class:`Orange.misc.SymMatrix` containing the distances that we
want to achieve (lsmt changes these).
.. attribute:: projected_distances
An :class:`Orange.misc.SymMatrix` containing the distances between
projected points.
.. attribute:: original_distances
An :class:`Orange.misc.SymMatrix` containing the original distances
between points.
.. attribute:: stress
An :class:`Orange.misc.SymMatrix` holding the stress.
.. attribute:: dim
An integer holding the dimension of the projected space.
.. attribute:: n
An integer holding the number of elements (points).
.. attribute:: avg_stress
A float holding the average stress in the :obj:`stress` matrix.
.. attribute:: progress_callback
A function that gets called after each optimization step in the
:func:`run` method.
"""
def __init__(self, distances=None, dim=2, **kwargs):
self.mds=orangemds.MDS(distances, dim, **kwargs)
self.original_distances=Orange.misc.SymMatrix([m for m in self.distances])
def __getattr__(self, name):
if name in ["points", "projected_distances", "distances" ,"stress",
"progress_callback", "n", "dim", "avg_stress"]:
#print "rec:",name
return self.__dict__["mds"].__dict__[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
#print "setattr"
if name=="points":
for i in range(len(value)):
for j in range(len(value[i])):
self.mds.points[i][j]=value[i][j]
return
if name in ["projected_distances", "distances" ,"stress",
"progress_callback"]:
self.mds.__setattr__(name, value)
else:
self.__dict__[name]=value
def __nonzero__(self):
return True
def smacof_step(self):
"""
Perform a single iteration of a Smacof algorithm that optimizes
:obj:`stress` and updates the :obj:`points`.
"""
self.mds.SMACOFstep()
def calc_distance(self):
"""
Compute the distances between points and update the
:obj:`projected_distances` matrix.
"""
self.mds.get_distance()
@deprecated_keywords({"stressFunc": "stress_func"})
def calc_stress(self, stress_func=SgnRelStress):
"""
Compute the stress between the current :obj:`projected_distances` and
:obj:`distances` matrix using *stress_func* and update the
:obj:`stress` matrix and :obj:`avgStress` accordingly.
"""
self.mds.getStress(stress_func)
@deprecated_keywords({"stressFunc": "stress_func"})
def optimize(self, iter, stress_func=SgnRelStress, eps=1e-3,
progress_callback=None):
self.mds.progress_callback=progress_callback
self.mds.optimize(iter, stress_func, eps)
@deprecated_keywords({"stressFunc": "stress_func"})
def run(self, iter, stress_func=SgnRelStress, eps=1e-3,
progress_callback=None):
"""
Perform optimization until stopping conditions are met.
Stopping conditions are:
* optimization runs for *iter* iterations of smacof_step function, or
* stress improvement (old stress minus new stress) is smaller than
eps * old stress.
:param iter: maximum number of optimization iterations.
:type iter: int
:param stress_func: stress function.
"""
self.optimize(iter, stress_func, eps, progress_callback)
def torgerson(self):
"""
Run the Torgerson algorithm that computes an initial analytical
solution of the problem.
"""
# Torgerson's initial approximation
O = numpy.array([m for m in self.distances])
## #B = matrixmultiply(O,O)
## # bug!? B = O**2
## B = dot(O,O)
## # double-center B
## cavg = sum(B, axis=0)/(self.n+0.0) # column sum
## ravg = sum(B, axis=1)/(self.n+0.0) # row sum
## tavg = sum(cavg)/(self.n+0.0) # total sum
## # B[row][column]
## for i in xrange(self.n):
## for j in xrange(self.n):
## B[i,j] += -cavg[j]-ravg[i]
## B = -0.5*(B+tavg)
# B = double-center O**2 !!!
J = numpy.identity(self.n) - (1/numpy.float(self.n))
B = -0.5 * numpy.dot(numpy.dot(J, O**2), J)
# SVD-solve B = ULU'
#(U,L,V) = singular_value_decomposition(B)
(U,L,V)=svd(B)
# X = U(L^0.5)
# # self.X = matrixmultiply(U,identity(self.n)*sqrt(L))
# X is n-dimensional, we take the two dimensions with the largest singular values
idx = numpy.argsort(L)[-self.dim:].tolist()
idx.reverse()
Lt = numpy.take(L,idx) # take those singular values
Ut = numpy.take(U,idx,axis=1) # take those columns that are enabled
Dt = numpy.identity(self.dim)*numpy.sqrt(Lt) # make a diagonal matrix, with squarooted values
self.points = Orange.core.FloatListList(numpy.dot(Ut,Dt))
self.freshD = 0
# D = identity(self.n)*sqrt(L) # make a diagonal matrix, with squarooted values
# X = matrixmultiply(U,D)
# self.X = take(X,idx,1)
# Kruskal's monotone transformation
def lsmt(self):
"""
Execute Kruskal monotone transformation.
"""
# optimize the distance transformation
# build vector o
effect = 0
self.getDistance()
o = []
for i in xrange(1,self.n):
for j in xrange(i):
o.append((self.original_distances[i,j],(i,j)))
o.sort(_mycompare)
# find the ties in o, and construct the d vector sorting in order within ties
d = []
td = []
uv = [] # numbers of consecutively tied o values
(i,j) = o[0][1]
distnorm = self.projected_distances[i,j]*self.projected_distances[i,j]
td = [self.projected_distances[i,j]] # fetch distance
for l in xrange(1,len(o)):
# copy now sorted distances in an array
# but sort distances within a tied o
(i,j) = o[l][1]
cd = self.projected_distances[i,j]
distnorm += self.projected_distances[i,j]*self.projected_distances[i,j]
if o[l][0] != o[l-1][0]:
# differing value, flush
sum = reduce(lambda x,y:x+y,td)+0.0
d.append([sum,len(td),sum/len(td),td])
td = []
td.append(cd)
sum = reduce(lambda x,y:x+y,td)+0.0
d.append([sum,len(td),sum/len(td),td])
####
# keep merging non-monotonous areas in d
monotony = 0
while not monotony and len(d) > 1:
monotony = 1
pi = 0 # index
n = 1 # n-areas
nd = []
r = d[0] # current area
for i in range(1,len(d)):
tr = d[i]
if r[2]>=tr[2]:
monotony = 0
effect = 1
r[0] += tr[0]
r[1] += tr[1]
r[2] = tr[0]/tr[1]
r[3] += tr[3]
else:
nd.append(r)
r = tr
nd.append(r)
d = nd
# normalizing multiplier
sum = 0.0
for i in d:
sum += i[2]*i[2]*i[1]
f = numpy.sqrt(distnorm/numpy.max(sum,1e-6))
# transform O
k = 0
for i in d:
for j in range(i[1]):
(ii,jj) = o[k][1]
self.distances[ii,jj] = f*i[2]
k += 1
assert(len(o) == k)
self.freshD = 0
return effect
| gpl-3.0 |
maryklayne/Funcao | examples/intermediate/mplot3d.py | 14 | 1261 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x,y,z) #seems to be a bug in matplotlib
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
rkmaddox/mne-python | examples/visualization/topo_compare_conditions.py | 20 | 1828 | """
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and auditory responses is created.
Both conditions are then accessed by their respective names to create a sensor
layout plot of the related evoked responses.
"""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks='meg', baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
| bsd-3-clause |
esatel/ADCPy | doc/source/conf.py | 1 | 8929 | # -*- coding: utf-8 -*-
#
# ADCpy documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 07 11:54:34 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.ipython_directive',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.doctest','numpydoc',
'sphinx.ext.autosummary']
#'numpydoc']
#'ipython_console_highlighting',
#'inheritance_diagram',
#'numpydoc']
autodoc_member_order = 'alphabetical'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ADCPy'
copyright = u'2014, California Department of Water Resources'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'dwrsmall.gif'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
# This prevents the weird 2-index result if you use numpydoc
html_domain_indices = ['py-modindex']
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ADCPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ADCPy.tex', u'ADCPy Documentation',
u'Benjamin Saenz, David Ralston, Rusty Holleman,\nEd Gross, Eli Ateljevich', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = ['py-modindex']
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'adcpy', u'ADCpy Documentation',
[u'Benjamin Saenz, David Ralston, Rusty Holleman, Ed Gross, Eli Ateljevich'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ADCpy', u'ADCpy Documentation',
u'Benjamin Saenz, David Ralston, Rusty Holleman, Ed Gross, Eli Ateljevich', 'ADCPy', 'Tools for ADCP analysis and visualization.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
vdt/SimpleCV | SimpleCV/examples/util/ColorCube.py | 13 | 1901 | from SimpleCV import Image, Camera, Display, Color
import pygame as pg
import numpy as np
from pylab import *
from mpl_toolkits.mplot3d import axes3d
from matplotlib.backends.backend_agg import FigureCanvasAgg
import cv2
bins = 8
#precompute
idxs = []
colors = []
offset = bins/2
skip = 255/bins
for x in range(0,bins):
for y in range(0,bins):
for z in range(0,bins):
b = ((x*skip)+offset)/255.0
g = ((y*skip)+offset)/255.0
r = ((z*skip)+offset)/255.0
idxs.append((x,y,z,(r,g,b)))
# plot points in 3D
cam = Camera()
disp = Display((800,600))
fig = figure()
fig.set_size_inches( (10,7) )
canvas = FigureCanvasAgg(fig)
azim = 0
while disp.isNotDone():
ax = fig.gca(projection='3d')
ax.set_xlabel('BLUE', color=(0,0,1) )
ax.set_ylabel('GREEN',color=(0,1,0))
ax.set_zlabel('RED',color=(1,0,0))
# Get the color histogram
img = cam.getImage().scale(0.3)
rgb = img.getNumpyCv2()
hist = cv2.calcHist([rgb],[0,1,2],None,[bins,bins,bins],[0,256,0,256,0,256])
hist = hist/np.max(hist)
# render everything
[ ax.plot([x],[y],[z],'.',markersize=max(hist[x,y,z]*100,6),color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
#[ ax.plot([x],[y],[z],'.',color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
ax.set_xlim3d(0, bins-1)
ax.set_ylim3d(0, bins-1)
ax.set_zlim3d(0, bins-1)
azim = (azim+0.5)%360
ax.view_init(elev=35, azim=azim)
########### convert matplotlib to SimpleCV image
canvas.draw()
renderer = canvas.get_renderer()
raw_data = renderer.tostring_rgb()
size = canvas.get_width_height()
surf = pg.image.fromstring(raw_data, size, "RGB")
figure = Image(surf)
############ All done
figure = figure.floodFill((0,0), tolerance=5,color=Color.WHITE)
result = figure.blit(img, pos=(20,20))
result.save(disp)
fig.clf()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/mpl_toolkits/mplot3d/axis3d.py | 7 | 17489 | #!/usr/bin/python
# axis3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts rewritten by Reinier Heeres <[email protected]>
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import math
import copy
from matplotlib import lines as mlines, axis as maxis, \
patches as mpatches
from . import art3d
from . import proj3d
import numpy as np
def get_flip_min_max(coord, index, mins, maxs):
if coord[index] == mins[index]:
return maxs[index]
else:
return mins[index]
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
'''Return a coordinate that is moved by "deltas" away from the center.'''
coord = copy.copy(coord)
#print coord, centers, deltas, axmask
for i in range(3):
if not axmask[i]:
continue
if coord[i] < centers[i]:
coord[i] -= deltas[i]
else:
coord[i] += deltas[i]
return coord
def tick_update_position(tick, tickxs, tickys, labelpos):
'''Update tick line and label position and style.'''
for (label, on) in ((tick.label1, tick.label1On), \
(tick.label2, tick.label2On)):
if on:
label.set_position(labelpos)
tick.tick1On, tick.tick2On = True, False
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
# adir identifies which axes this is
self.adir = adir
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
self._axinfo.update({'label' : {'va': 'center',
'ha': 'center'},
'tick' : {'inward_factor': 0.2,
'outward_factor': 0.1},
'axisline': {'linewidth': 0.75,
'color': (0, 0, 0, 1)},
'grid' : {'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
self.set_rotate_label(kwargs.get('rotate_label', None))
def init3d(self):
self.line = mlines.Line2D(xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True,
)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(np.array([[0,0], [0,1], [1,0], [0,0]]),
closed=False,
alpha=0.8,
facecolor=(1,1,1,0),
edgecolor=(1,1,1,0))
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([], )
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_tick_positions(self):
majorLocs = self.major.locator()
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
return majorLabels, majorLocs
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
t.tick1line.set_transform(self.axes.transData)
t.tick2line.set_transform(self.axes.transData)
t.gridline.set_transform(self.axes.transData)
t.label1.set_transform(self.axes.transData)
t.label2.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:,:2]
self.pane.xy = xys
self.stale = True
def set_pane_color(self, color):
'''Set pane color to a RGBA tuple'''
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
self.stale = True
def set_rotate_label(self, val):
'''
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
'''
self._rotate_label = val
self.stale = True
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
if minx > maxx:
minx, maxx = maxx, minx
if miny > maxy:
miny, maxy = maxy, miny
if minz > maxz:
minz, maxz = maxz, minz
mins = np.array((minx, miny, minz))
maxs = np.array((maxx, maxy, maxz))
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for \
p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d')
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d')
# code from XAxis
majorTicks = self.get_major_ticks()
majorLocs = self.major.locator()
info = self._axinfo
index = info['i']
# filter locations here so that no extra grid lines are drawn
locmin, locmax = self.get_view_interval()
if locmin > locmax:
locmin, locmax = locmax, locmin
# Rudimentary clipping
majorLocs = [loc for loc in majorLocs if
locmin <= loc <= locmax]
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
edgep2 = edgep1.copy()
edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
centpt = proj3d.proj_transform(centers[0], centers[1], centers[2], renderer.M)
self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = []
for val in majorLocs:
coord = minmax.copy()
coord[index] = val
xyz0.append(coord)
# Draw labels
peparray = np.asanyarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform([peparray[0:2, 1]]) -
self.axes.transAxes.transform([peparray[0:2, 0]]))[0]
lxyz = 0.5*(edgep1 + edgep2)
# A rough estimate; points are ambiguous since 3D plots rotate
ax_scale = self.axes.bbox.size / self.figure.bbox.size
ax_inches = np.multiply(ax_scale, self.figure.get_size_inches())
ax_points_estimate = sum(72. * ax_inches)
deltas_per_point = 48. / ax_points_estimate
default_offset = 21.
labeldeltas = (self.labelpad + default_offset) * deltas_per_point\
* deltas
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2], \
renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2 :
outeredgep = edgep1
outerindex = 0
else :
outeredgep = edgep2
outerindex = 1
pos = copy.copy(outeredgep)
pos = move_from_center(pos, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(pos[0], pos[1], pos[2], renderer.M)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.set_position( (olx, oly) )
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#-----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but
# this seems to be more of a geometry issue (in other words, I
# might be using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
# centpt[index] <= peparray[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array
# of bools from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
# if FT and if highs has an even number of Trues
if (centpt[index] <= peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2) :
align = 'left'
else :
align = 'right'
else :
# The FF case
align = 'left'
else :
# if TF and if highs has an even number of Trues
if (centpt[index] > peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually mean align left, except if it is axis 2
if index == 2 :
align = 'right'
else :
align = 'left'
else :
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
# Draw grid lines
if len(xyz0) > 0:
# Grid points at end of one plane
xyz1 = copy.deepcopy(xyz0)
newindex = (index + 1) % 3
newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz1[i][newindex] = newval
# Grid points at end of the other plane
xyz2 = copy.deepcopy(xyz0)
newindex = (index + 2) % 3
newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz2[i][newindex] = newval
lines = list(zip(xyz1, xyz0, xyz2))
if self.axes._draw_grid:
self.gridlines.set_segments(lines)
self.gridlines.set_color([info['grid']['color']] * len(lines))
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
if tick is None:
continue
# Get tick line positions
pos = copy.copy(edgep1)
pos[index] = loc
pos[tickdir] = edgep1[tickdir] + info['tick']['outward_factor'] * \
ticksign * tickdelta
x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
pos[tickdir] = edgep1[tickdir] - info['tick']['inward_factor'] * \
ticksign * tickdelta
x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
# Get position of label
default_offset = 8. # A rough estimate
labeldeltas = (tick.get_pad() + default_offset) * deltas_per_point\
* deltas
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
renderer.close_group('axis3d')
self.stale = False
def get_view_interval(self):
"""return the Interval instance for this 3d axis view limits"""
return self.v_interval
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.v_interval = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
# TODO: Get this to work properly when mplot3d supports
# the transforms framework.
def get_tightbbox(self, renderer) :
# Currently returns None so that Axis.get_tightbbox
# doesn't return junk info.
return None
# Use classes to look at different data limits
class XAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervalx
class YAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervaly
class ZAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.zz_dataLim.intervalx
| mit |
scottpurdy/NAB | tests/integration/corpus_test.py | 10 | 4895 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import numpy as np
import os
import pandas
import shutil
import tempfile
import unittest
import nab.corpus
from nab.util import recur
class CorpusTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
depth = 3
cls.root = recur(os.path.dirname, os.path.realpath(__file__), depth)
cls.corpusSource = os.path.join(cls.root, "tests", "test_data")
def setUp(self):
self.corpus = nab.corpus.Corpus(self.corpusSource)
def testGetDataFiles(self):
"""
Test the getDataFiles() function, specifically check if corpus.dataFiles
is a dictionary containing DataFile objects containing pandas.DataFrame
objects to represent the underlying data.
"""
for df in self.corpus.dataFiles.values():
self.assertIsInstance(df, nab.corpus.DataFile)
self.assertIsInstance(df.data, pandas.DataFrame)
self.assertEqual(set(df.data.columns.values),
set(["timestamp", "value"]))
def testAddColumn(self):
"""
Test the addColumn() function, specificially check if a new column named
"test" is added.
"""
columnData = {}
for relativePath, df in self.corpus.dataFiles.iteritems():
rows, _ = df.data.shape
columnData[relativePath] = pandas.Series(np.zeros(rows))
self.corpus.addColumn("test", columnData, write=False)
for df in self.corpus.dataFiles.values():
self.assertEqual(set(df.data.columns.values),
set(["timestamp", "value", "test"]))
def testRemoveColumn(self):
"""
Test the removeColumn() function, specifically check if an added column
named "test" is removed.
"""
columnData = {}
for relativePath, df in self.corpus.dataFiles.iteritems():
rows, _ = df.data.shape
columnData[relativePath] = pandas.Series(np.zeros(rows))
self.corpus.addColumn("test", columnData, write=False)
self.corpus.removeColumn("test", write=False)
for df in self.corpus.dataFiles.values():
self.assertEqual(set(df.data.columns.values),
set(["timestamp", "value"]))
def testCopy(self):
"""
Test the copy() function, specifically check if it copies the whole corpus
to another directory and that the copied corpus is the exact same as the
original.
"""
copyLocation = os.path.join(tempfile.mkdtemp(), "test")
self.corpus.copy(copyLocation)
copyCorpus = nab.corpus.Corpus(copyLocation)
for relativePath in self.corpus.dataFiles.keys():
self.assertIn(relativePath, copyCorpus.dataFiles.keys())
self.assertTrue(
all(self.corpus.dataFiles[relativePath].data == \
copyCorpus.dataFiles[relativePath].data))
shutil.rmtree(copyLocation)
def testAddDataSet(self):
"""
Test the addDataSet() function, specifically check if it adds a new
data file in the correct location in directory and into the dataFiles
attribute.
"""
copyLocation = os.path.join(tempfile.mkdtemp(), "test")
copyCorpus = self.corpus.copy(copyLocation)
for relativePath, df in self.corpus.dataFiles.iteritems():
newPath = relativePath + "_copy"
copyCorpus.addDataSet(newPath, copy.deepcopy(df))
self.assertTrue(all(copyCorpus.dataFiles[newPath].data == df.data))
shutil.rmtree(copyLocation)
def testGetDataSubset(self):
"""
Test the getDataSubset() function, specifically check if it returns only
dataFiles with relativePaths that contain the query given.
"""
query1 = "realAWSCloudwatch"
subset1 = self.corpus.getDataSubset(query1)
self.assertEqual(len(subset1), 2)
for relativePath in subset1.keys():
self.assertIn(query1, relativePath)
query2 = "artificialWithAnomaly"
subset2 = self.corpus.getDataSubset(query2)
self.assertEqual(len(subset2), 1)
for relativePath in subset2.keys():
self.assertIn(query2, relativePath)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
NSLS-II-SRX/ipython_ophyd | profile_xf05id1-noX11/startup/85-bs_callbacks.py | 1 | 3670 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 24 12:30:06 2016
@author: xf05id1
"""
from bluesky.callbacks import CallbackBase,LivePlot
#import os
#import time as ttime
#from databroker import DataBroker as db, get_events
#from databroker.databroker import fill_event
import filestore.api as fsapi
#from metadatastore.commands import run_start_given_uid, descriptors_by_start
#import matplotlib.pyplot as plt
from xray_vision.backend.mpl.cross_section_2d import CrossSection
#from .callbacks import CallbackBase
#import numpy as np
#import doct
#from databroker import DataBroker as db
i0_baseline = 7.24e-10
class NormalizeLivePlot(LivePlot):
def __init__(self, *args, norm_key=None, **kwargs):
super().__init__(*args, **kwargs)
if norm_key is None:
raise RuntimeError("norm key is required kwarg")
self._norm = norm_key
def event(self, doc):
"Update line with data from this Event."
try:
if self.x is not None:
# this try/except block is needed because multiple event streams
# will be emitted by the RunEngine and not all event streams will
# have the keys we want
new_x = doc['data'][self.x]
else:
new_x = doc['seq_num']
new_y = doc['data'][self.y]
new_norm = doc['data'][self._norm]
except KeyError:
# wrong event stream, skip it
return
self.y_data.append(new_y / abs(new_norm-i0_baseline))
self.x_data.append(new_x)
self.current_line.set_data(self.x_data, self.y_data)
# Rescale and redraw.
self.ax.relim(visible_only=True)
self.ax.autoscale_view(tight=True)
self.ax.figure.canvas.draw_idle()
#class LiveImagePiXi(CallbackBase):
"""
Stream 2D images in a cross-section viewer.
Parameters
----------
field : string
name of data field in an Event
Note
----
Requires a matplotlib fix that is not released as of this writing. The
relevant commit is a951b7.
"""
# def __init__(self, field):
# super().__init__()
# self.field = field
# fig = plt.figure()
# self.cs = CrossSection(fig)
# self.cs._fig.show()
# def event(self, doc):
# #uid = doc['data'][self.field]
# #data = fsapi.retrieve(uid)
# data = doc['data']['pixi_image']
# self.cs.update_image(data)
# self.cs._fig.canvas.draw()
# self.cs._fig.canvas.flush_events()
#
def make_live_image(image_axes, key):
"""
Example
p--------
fig, ax = plt.subplots()
image_axes = ax.imshow(np.zeros((476, 512)), vmin=0, vmax=2)
cb = make_live_image(image_axes, 'pixi_image_array_data')
RE(Count([pixi]), subs={'event': [cb]})
"""
def live_image(name, doc):
if name != 'event':
return
image_axes.set_data(doc['data'][key].reshape(476, 512))
return live_image
class SRXLiveImage(CallbackBase):
"""
Stream 2D images in a cross-section viewer.
Parameters
----------
field : string name of data field in an Event
Note
----
Requires a matplotlib fix that is not released as of this writing. The
relevant commit is a951b7.
"""
def __init__(self, field):
super().__init__()
self.field = field
fig = plt.figure()
self.cs = CrossSection(fig)
self.cs._fig.show()
def event(self, doc):
uid = doc['data'][self.field]
data = fsapi.retrieve(uid)
self.cs.update_image(data)
self.cs._fig.canvas.draw_idle()
| bsd-2-clause |
leon-adams/datascience | algorithms/hobfield.py | 1 | 5247 | #
# Leon Adams
#
# Python Module for running a hopfield network to relocate the memory from a perturbed image.
# The raw data set is represented in png image format. This code takes the three color channels (rgb)
# Converts to a single channel gray scaled image and then transforms the output to a [-1,1] vector
# for use in calculation of a hobfield neural network.
#
# Dependencies: numpy; matplotlib
#
# Usage
# Can use as normal python module or can be used as a python script.
# When calling from command line as script supply corruption percent at end of call
#
# Example: python hopfield.py 2 3 4
# This will produced 2, 3, and 4 percent perturbation on the image file and then
# attempt to locate closest memorized pattern using hopfield network with hebb learning rule.
# If called without perturbation parameters default to [1, 5, 10, 15, 20, 25] corruption percentages.
# Output: output of the execution is a series of images showing first the perturbed
# image with the corrupted percentages in the title. Then we show the closest memorized
# image found from the hobfield network.
# begin import needed libraries
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# end import libraries
def rgb_to_gray_array(rgb):
'''
Helper function to convert from rgb tensor to matrix gray-scaled image representation.
Input: rgb tensor matrix of the three rgb color channels.
output: numpy array of gray-scaled numeric values.
'''
return np.dot(rgb[...,:3], np.array([0.299, 0.587, 0.114]))
def read_images(filenames):
'''
Read images to set to memory. Convert from rgb tensor to gray scale representation.
Takes a list of filenames in directory containing pixel images. Returns a list
of numpy arrays converted to gray-scale.
'''
data = [( mpimg.imread(number) ) for number in filenames]
return data, data[0].shape
def create_vector_image(data_array):
'''
Converts a gray-scaled image to [-1, +1] vector representation for hopfield networks.
'''
data_array = np.where(data_array < 0.99, -1, 1)
return data_array.flatten()
def print_unique_cnts(array):
print( np.unique(array, return_counts=True ) )
def train(memories):
'''
Training function for hobfield neural network. Trained with Hebb update rule.
'''
rate, c = memories.shape
Weight = np.zeros((c, c))
for p in memories:
Weight = Weight + np.outer(p,p)
Weight[np.diag_indices(c)] = 0
return Weight/rate
def look_up(Weight_matrix, candidate_pattern, shape, percent_corrupted, steps=5):
'''
Given a candidate pattern, lookup closet memorized stable state. Return the
stable memorized state.
'''
sgn = np.vectorize(lambda x: -1 if x<0 else 1)
img = None
for i in range(steps):
im = show_pattern(candidate_pattern, shape)
candidate_pattern = sgn(np.dot(candidate_pattern, Weight_matrix))
if img is None:
img = plt.imshow(im, cmap=plt.cm.binary, interpolation='nearest')
plt.title(str(percent_corrupted) + ' percent corrupted pixels')
else:
img.set_data(im)
plt.pause(.2)
plt.draw()
return candidate_pattern
def hopfield_energy(Weight, patterns):
'''
Calculates the current energy value for a given pattern and weight matrix.
'''
return np.array([-0.5*np.dot(np.dot(p.T, Weight), p) for p in patterns])
def show_img(image, shape):
'''
Helper function to produce visualization of an image.
'''
plt.imshow(image.reshape(shape), cmap=plt.cm.binary, interpolation='nearest')
plt.show()
def show_pattern(pattern, shape):
return np.where(pattern < 0, 0, 1).reshape(shape)
def corrupts(pattern, percentage):
'''
Helper function for deriving corrupted pattern images. Specify stable memory pattern
and the percentage of pixels to switch.
'''
counts = int( 2*np.ceil( len(pattern) * percentage / 200 ) )
neg_mask = np.where(pattern <= 0)[0]
pos_mask = np.where(pattern > 0)[0]
neg_corrupt_indices = np.random.choice(neg_mask, counts/2, replace = False)
pos_corrupt_indices = np.random.choice(pos_mask, counts/2, replace = False)
corrupt_pattern = np.copy(pattern)
corrupt_pattern[neg_corrupt_indices] = 1
corrupt_pattern[pos_corrupt_indices] = -1
return corrupt_pattern
data, shape = read_images(['datasets/C.png', 'datasets/D.png', 'datasets/J.png'])
stable_memories = np.array([create_vector_image(rgb_to_gray_array(array)) for array in data ])
norm_weight_matrix = train(stable_memories)
def test_stable_memories(stable_memory_patterns, corrupt_perentages):
for memory in stable_memory_patterns:
for percent in corrupt_perentages:
crpt_memory = corrupts(memory, percent)
look_up(norm_weight_matrix, crpt_memory, shape[0:2], percent_corrupted = percent, steps=5)
if __name__ == "__main__":
user_input = sys.argv
if len(user_input) > 1:
test_stable_memories(stable_memories, [float(i) for i in user_input[1:] ])
else:
test_stable_memories(stable_memories, [1, 5, 10, 15, 20, 25])
| mpl-2.0 |
lxsmnv/spark | examples/src/main/python/sql/arrow.py | 13 | 3997 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
# $example on:dataframe_with_arrow$
import numpy as np
import pandas as pd
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
# $example off:dataframe_with_arrow$
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
# $example on:scalar_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a, b):
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
# $example off:scalar_pandas_udf$
def grouped_map_pandas_udf_example(spark):
# $example on:grouped_map_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def substract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").apply(substract_mean).show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
# $example off:grouped_map_pandas_udf$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf scalar example")
scalar_pandas_udf_example(spark)
print("Running pandas_udf grouped map example")
grouped_map_pandas_udf_example(spark)
spark.stop()
| apache-2.0 |
dancingdan/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
karvenka/sp17-i524 | project/S17-IR-P014/code/delay.py | 15 | 5276 | import sys
import csv
import sip
#import org.apache.log4j.{Level, Logger}
import matplotlib
#matplotlib.user('agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from pyspark import SparkContext, SparkConf
from datetime import datetime
from operator import add, itemgetter
from collections import namedtuple
from datetime import datetime
import os
import time
from StringIO import StringIO
#Defining the fields, Creating a Flights class with the following fields as a tuple
#Each row is converted into a list
timestarted = time.time()
fields = ('date', 'airline', 'flightnum', 'origin', 'dest', 'dep',
'dep_delay', 'arv', 'arv_delay', 'airtime', 'distance')
Flight = namedtuple('Flight', fields, verbose=True)
DATE_FMT = "%Y-%m-%d"
TIME_FMT = "%H%M"
# User Defined Functions
def toCSVLine(data):
return ','.join(str(d) for d in data)
def split(line):
reader = csv.reader(StringIO(line))
return reader.next()
def parse(row):
row[0] = datetime.strptime(row[0], DATE_FMT).time()
row[5] = datetime.strptime(row[5], TIME_FMT).time()
row[6] = float(row[6])
row[7] = datetime.strptime(row[7], TIME_FMT).time()
row[8] = float(row[8])
row[9] = float(row[9])
row[10] = float(row[10])
return Flight(*row[:11])
def notHeader(row):
return "Description" not in row
def plot(airlinesdelays):
airlines = [d[0] for d in airlinesdelays]
minutes = [d[1] for d in airlinesdelays]
index = list(xrange(len(airlines)))
#Above we retrieved the respective columns from the list
#Here we mention the plot as a horizontal bar plot
fig, axe = plt.subplots()
bars = axe.barh(index, minutes)
# Add the total minutes to the right
for idx, air, min in zip(index, airlines, minutes):
if min > 0:
bars[idx].set_color('#d9230f')
axe.annotate(" %0.0f min" % min, xy=(min+1, idx+0.5), va='center')
else:
bars[idx].set_color('#469408')
axe.annotate(" %0.0f min" % min, xy=(10, idx+0.5), va='center')
# Set the ticks
ticks = plt.yticks([idx+ 0.5 for idx in index], airlines)
xt = plt.xticks()[0]
plt.xticks(xt, [' '] * len(xt))
# minimize chart junk
plt.grid(axis = 'x', color ='white', linestyle='-')
plt.title('Total Minutes Delayed per Airline')
plt.savefig('airlines.png')
#airlines.filter(notHeader).take(10)
#main method is the entry point for the following program
if __name__ == "__main__":
conf = SparkConf().setAppName("average")
sc = SparkContext(conf=conf)
#setting log level to error
# val rootLogger = Logger.getRootLogger()
# rootLogger.setLevel(Level.ERROR)
#importing data from HDFS for performing analysis
airlines = sc.textFile(sys.argv[1])
# airlines = sc.textFile("hdfs://192.168.1.8:8020/fltdata/airlines.csv")
flights = sc.textFile(sys.argv[2])
airports =sc.textFile(sys.argv[3])
airlinesParsed = dict(airlines.map(split).collect())
airportsParsed= airports.filter(notHeader).map(split)
# print "without header and spliting up", airlines.take(10)
# print "without header and spliting up", airlines.take(10)
flightsParsed= flights.map(lambda x: x.split(',')).map(parse)
#print "The average delay is "+str(sumCount[0]/float(sumCount[1]))
airportDelays = flightsParsed.map(lambda x: (x.origin,x.dep_delay))
# First find the total delay per airport
airportTotalDelay=airportDelays.reduceByKey(lambda x,y:x+y)
# Find the count per airport
airportCount=airportDelays.mapValues(lambda x:1).reduceByKey(lambda x,y:x+y)
# Join to have the sum, count in 1 RDD
airportSumCount=airportTotalDelay.join(airportCount)
# Compute avg delay per airport
airportAvgDelay=airportSumCount.mapValues(lambda x : x[0]/float(x[1]))
airportDelay = airportAvgDelay.sortBy(lambda x:-x[1])
print "", airportDelay.take(10)
airportLookup=airportsParsed.collectAsMap()
#airlineLookup=airlinesParsed.collectAsMap()
airline_lookup = sc.broadcast(airlinesParsed)
airlinesdelays = flightsParsed.map(lambda f: (airline_lookup.value[f.airline],add(f.dep_delay, f.arv_delay)))
airlinesdelays = delays.reduceByKey(add).collect()
airlinesdelays = sorted(delays, key=itemgetter(1))
#tenairlines = delays.map(toCSVLine)
ten = airportAvgDelay.map(lambda x: (airportLookup[x[0]],x[1]))
#print "", ten.take(10)
for d in airlinesdelays:
print "%0.0f minutes delayed\t%s" % (d[1], d[0])
airportBC=sc.broadcast(airportLookup)
topTenAirportsWithDelays = airportAvgDelay.map(lambda x: (airportBC.value[x[0]],x[1])).sortBy(lambda x:-x[1])
lines = topTenAirportsWithDelays.take(10)
topten = "/home/hadoop/"
tenairlines = "/home/hadoop/"
#For collecting the outputs into csv files
with open('topten', "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in lines:
writer.writerows([val])
with open('tenairlines',"w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in delays:
writer.writerows([val])
plot(airlinesdelays)
#Final time taken will be calculated here
timetaken = time.time()-timestarted
print "", timetaken
| apache-2.0 |
Carnon/nlp | TextClassify/textclassify/textdata.py | 1 | 2565 | import os
import codecs
import re
import jieba
import numpy as np
from tqdm import tqdm
from tensorflow.contrib import learn
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
class TextData(object):
def __init__(self,args):
self.args = args
corpus_dir = self.args.corpus_dir
self.load_data(corpus_dir)
def load_data(self,corpus_dir):
self.text = []
self.label = []
self.label_name = {}
self.max_doc_len = 0
self.label_num = 0
self.vocab_size = 0
raw_text = []
raw_label = []
tag_list = os.listdir(corpus_dir)
for tag in tqdm(tag_list,desc='load_data',leave=False):
data_path = os.path.join(corpus_dir,tag)
for data_file in os.listdir(data_path):
file_name = os.path.join(data_path,data_file)
with codecs.open(file_name,'r',encoding='utf-8') as fr_raw:
raw_content = fr_raw.read()
text_word = [word for word in jieba.cut(raw_content) if re.match(u".*[\u4e00-\u9fa5]+", word)]
if text_word.__len__() < self.args.max_doc_len:
raw_text.append(text_word)
raw_label.append(tag)
labelEncode = LabelEncoder()
num_label = labelEncode.fit_transform(raw_label)
self.label = OneHotEncoder(sparse=False).fit_transform(np.reshape(num_label,[-1,1]))
self.label_num = len(labelEncode.classes_)
#self.max_doc_len = max([len(doc) for doc in raw_text])
self.max_doc_len = self.args.max_doc_len
vocab_processor = learn.preprocessing.VocabularyProcessor(self.max_doc_len,tokenizer_fn=tokenizer_fn)
self.text = np.array(list(vocab_processor.fit_transform(raw_text)))
self.vocab_size = len(vocab_processor.vocabulary_)
def shuffle_data(self):
np.random.seed(3)
shuffled = np.random.permutation(np.arange(len(self.label)))
self.text = self.text[shuffled]
self.label = self.label[shuffled]
def get_batches(self):
self.shuffle_data()
sample_size = len(self.text)
batch_size = self.args.batch_size
for i in range(0,sample_size,batch_size):
yield self.text[i:min(i+batch_size,sample_size)],self.label[i:min(i+batch_size,sample_size)]
#chinese cut word has completed instead of tensorflow cut word that cannt support chinese word
def tokenizer_fn(iterator):
for value in iterator:
yield value
| apache-2.0 |
yarikoptic/NiPy-OLD | examples/neurospin/demo_dmtx.py | 1 | 2005 | """ test code to make a design matrix
"""
import numpy as np
from nipy.neurospin.utils.design_matrix import dmtx_light
tr = 1.0
frametimes = np.linspace(0,127*tr,128)
conditions = [0,0,0,1,1,1,3,3,3]
onsets=[30,70,100,10,30,90,30,40,60]
hrf_model = 'Canonical'
motion = np.cumsum(np.random.randn(128,6),0)
add_reg_names = ['tx','ty','tz','rx','ry','rz']
#event-related design matrix
paradigm = np.vstack(([conditions, onsets])).T
x1,name1 = dmtx_light(frametimes, paradigm, drift_model='Polynomial',
drift_order=3, add_regs=motion, add_reg_names=add_reg_names)
# block design matrix
duration = 7*np.ones(9)
paradigm = np.vstack(([conditions, onsets, duration])).T
x2,name2 = dmtx_light(frametimes, paradigm, drift_model='Polynomial', drift_order=3)
# FIR model
paradigm = np.vstack(([conditions, onsets])).T
hrf_model = 'FIR'
x3,name3 = dmtx_light(frametimes, paradigm, hrf_model = 'FIR',
drift_model='Polynomial', drift_order=3,
fir_delays = range(1,6))
import matplotlib.pylab as mp
mp.figure()
mp.imshow(x1/np.sqrt(np.sum(x1**2,0)),interpolation='Nearest', aspect='auto')
mp.xlabel('conditions')
mp.ylabel('scan number')
if name1!=None:
mp.xticks(np.arange(len(name1)),name1,rotation=60,ha='right')
mp.subplots_adjust(top=0.95,bottom=0.25)
mp.title('Example of event-related design matrix')
mp.figure()
mp.imshow(x2/np.sqrt(np.sum(x2**2,0)),interpolation='Nearest', aspect='auto')
mp.xlabel('conditions')
mp.ylabel('scan number')
if name2!=None:
mp.xticks(np.arange(len(name2)),name2,rotation=60,ha='right')
mp.subplots_adjust(top=0.95,bottom=0.25)
mp.title('Example of block design matrix')
mp.figure()
mp.imshow(x3/np.sqrt(np.sum(x3**2,0)),interpolation='Nearest', aspect='auto')
mp.xlabel('conditions')
mp.ylabel('scan number')
if name3!=None:
mp.xticks(np.arange(len(name3)),name3,rotation=60,ha='right')
mp.subplots_adjust(top=0.95,bottom=0.25)
mp.title('Example of FIR design matrix')
mp.show()
| bsd-3-clause |
sarathid/Learning | Intro_to_ML/pca/eigenfaces.py | 9 | 4989 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
original source: http://scikit-learn.org/stable/auto_examples/applications/face_recognition.html
"""
print __doc__
from time import time
import logging
import pylab as pl
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
np.random.seed(42)
# for machine learning we use the data directly (as relative pixel
# position info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print "Total dataset size:"
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
print "n_classes: %d" % n_classes
###############################################################################
# Split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print "Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print "done in %0.3fs" % (time() - t0)
eigenfaces = pca.components_.reshape((n_components, h, w))
print "Projecting the input data on the eigenfaces orthonormal basis"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print "done in %0.3fs" % (time() - t0)
###############################################################################
# Train a SVM classification model
print "Fitting the classifier to the training set"
t0 = time()
param_grid = {
'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
# for sklearn version 0.16 or prior, the class_weight parameter value is 'auto'
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print "done in %0.3fs" % (time() - t0)
print "Best estimator found by grid search:"
print clf.best_estimator_
###############################################################################
# Quantitative evaluation of the model quality on the test set
print "Predicting the people names on the testing set"
t0 = time()
y_pred = clf.predict(X_test_pca)
print "done in %0.3fs" % (time() - t0)
print classification_report(y_test, y_pred, target_names=target_names)
print confusion_matrix(y_test, y_pred, labels=range(n_classes))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
pl.show()
| gpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/pandas/tools/tests/test_util.py | 7 | 16721 | import os
import locale
import codecs
import nose
import numpy as np
from numpy import iinfo
import pandas as pd
from pandas import (date_range, Index, _np_version_under1p9)
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result1, result2 = cartesian_product([x, y])
expected1 = np.array(['A', 'A', 'B', 'B', 'C', 'C'])
expected2 = np.array([1, 22, 1, 22, 1, 22])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result1, result2 = [Index(y).day for y in cartesian_product([x, x])]
expected1 = np.array([1, 1, 2, 2], dtype=np.int32)
expected2 = np.array([1, 2, 1, 2], dtype=np.int32)
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_empty(self):
# product of empty factors
X = [[], [0, 1], []]
Y = [[], [], ['a', 'b', 'c']]
for x, y in zip(X, Y):
expected1 = np.array([], dtype=np.asarray(x).dtype)
expected2 = np.array([], dtype=np.asarray(y).dtype)
result1, result2 = cartesian_product([x, y])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
# empty product (empty input):
result = cartesian_product([])
expected = []
tm.assert_equal(result, expected)
def test_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
msg = "Input must be a list-like of list-likes"
for X in invalid_inputs:
tm.assertRaisesRegexp(TypeError, msg, cartesian_product, X=X)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is None:
lang, enc = 'it_CH', 'UTF-8'
elif LOCALE_OVERRIDE == 'C':
lang, enc = 'en_US', 'ascii'
else:
lang, enc = LOCALE_OVERRIDE.split('.')
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
self.assertEqual(pd.to_numeric(1), 1)
self.assertEqual(pd.to_numeric(1.1), 1.1)
self.assertEqual(pd.to_numeric('1'), 1)
self.assertEqual(pd.to_numeric('1.1'), 1.1)
with tm.assertRaises(ValueError):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with self.assertRaisesRegexp(TypeError, "Invalid object type"):
pd.to_numeric(s)
def test_downcast(self):
# see gh-13352
mixed_data = ['1', 2, 3]
int_data = [1, 2, 3]
date_data = np.array(['1970-01-02', '1970-01-03',
'1970-01-04'], dtype='datetime64[D]')
invalid_downcast = 'unsigned-integer'
msg = 'invalid downcasting method provided'
smallest_int_dtype = np.dtype(np.typecodes['Integer'][0])
smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0])
# support below np.float32 is rare and far between
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
for data in (mixed_data, int_data, date_data):
with self.assertRaisesRegexp(ValueError, msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
for signed_downcast in ('integer', 'signed'):
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast='float')
tm.assert_numpy_array_equal(res, expected)
# if we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter
data = ['foo', 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors='ignore',
downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an unsigned integer because
# we have a negative number
data = ['-1', 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an integer (signed or unsigned)
# because we have a float number
data = ['1.1', 2, 3]
expected = np.array([1.1, 2, 3], dtype=np.float64)
for downcast in ('integer', 'signed', 'unsigned'):
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
# the smallest integer dtype need not be np.(u)int8
data = ['256', 257, 258]
for downcast, expected_dtype in zip(
['integer', 'signed', 'unsigned'],
[np.int16, np.int16, np.uint16]):
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
def test_downcast_limits(self):
# Test the limits of each downcast. Bug: #14401.
# Check to make sure numpy is new enough to run this test.
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
i = 'integer'
u = 'unsigned'
dtype_downcast_min_max = [
('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]),
('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]),
('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]),
('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]),
('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]),
# Test will be skipped until there is more uint64 support.
# ('uint64', u, [iinfo(uint64).min, iinfo(uint64).max]),
('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
# Test will be skipped until there is more uint64 support.
# ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]),
]
for dtype, downcast, min_max in dtype_downcast_min_max:
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
tm.assert_equal(series.dtype, dtype)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
rboyes/KerasScripts | CSVTrainer.py | 1 | 5321 | import os
import datetime
import sys
import time
import string
import random
import pandas as pd
import numpy as np
import gc
if(len(sys.argv) < 2):
print('Usage: CSVTrainer.py train.csv validation.csv model.h5 log.txt')
sys.exit(1)
trainingName = sys.argv[1]
validationName = sys.argv[2]
modelName = sys.argv[3]
logName = sys.argv[4]
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import *
import keras.preprocessing.image as image
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.layers import Input, merge, Dropout, Dense, Flatten, Activation
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam, SGD
from keras.models import Model, load_model
from keras import regularizers
from keras import backend as K
from keras.utils.data_utils import get_file
from sklearn.metrics import accuracy_score
from keras.applications import resnet50
def readCSV(fileList):
namesDataFrame = pd.read_csv(fileList)
flatten = lambda l: [item for sublist in l for item in sublist]
labels = sorted(list(set(flatten([l.split(' ') for l in namesDataFrame['tags'].values]))))
labelMap = {l: i for i, l in enumerate(labels)}
numberOfLabels = len(labels)
numberOfImages = len(namesDataFrame)
fileNames = []
y = np.zeros((numberOfImages, numberOfLabels), np.float32)
for index in range(0, numberOfImages):
inputImage = image.img_to_array(image.load_img(namesDataFrame.iloc[index][0]))
fileNames.append(namesDataFrame.iloc[index][0])
tags = namesDataFrame.iloc[index][1]
for t in tags.split(' '):
y[index, labelMap[t]] = 1.0
return (fileNames, y, labelMap)
print('Loading images..........', end = '',flush = True)
(trainingFileNames, trainY, trainingLabelMap) = readCSV(trainingName)
(validationFileNames, validationY, validationLabelMap) = readCSV(validationName)
print('done.', flush = True)
if len(trainingLabelMap) != len(validationLabelMap):
print("Label maps for training and validation are not equal")
sys.exit(1)
numberOfTrainingImages = len(trainingFileNames)
numberOfValidationImages = len(validationFileNames)
numberOfChannels = 3
nx = 256
ny = 256
batchSize = 25
lossName = 'binary_crossentropy'
activationName = 'sigmoid'
resnetModel = resnet50.ResNet50(include_top=False, weights='imagenet', input_shape=(numberOfChannels, nx, ny))
print('The number of layers in the resnet model = %d' % (len(resnetModel.layers)))
bottleneckTrainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckValidationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckTrainingGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(trainingFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckValidationGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(validationFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckTrainingFeatures = resnetModel.predict_generator(bottleneckTrainingGenerator, numberOfTrainingImages)
bottleneckValidationFeatures = resnetModel.predict_generator(bottleneckValidationGenerator, numberOfValidationImages)
newTop = Sequential()
newTop.add(Flatten(input_shape = bottleneckTrainingFeatures.shape[1:]))
newTop.add(Dense(512, activation='relu'))
newTop.add(Dropout(0.5))
newTop.add(Dense(len(trainingLabelMap), activation=activationName, name='predictions'))
newTop.compile(loss=lossName, optimizer=Adam(lr=1.0E-3))
print('Fitting predicted features...', flush = True)
newTop.fit(bottleneckTrainingFeatures, trainY, validation_data = (bottleneckValidationFeatures, validationY), verbose = 1, batch_size = batchSize, nb_epoch = 25)
print('Done.', flush = True)
finalModel = Model(input = resnetModel.input, output = newTop(resnetModel.output))
print('The number of layers in the final model = %d' % (len(finalModel.layers)))
for layer in finalModel.layers[:(len(resnetModel.layers) - 21)]:
layer.trainable = False
finalModel.compile(loss=lossName,optimizer=SGD(lr=1e-4, momentum=0.9))
print(finalModel.summary())
# Could add vertical_flip = True
trainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0, rotation_range = 40, zoom_range = 0.15, horizontal_flip = True,
width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.1)
validationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
trainingGenerator = trainingDataGenerator.flow_from_filenames(trainingFileNames, trainY, batch_size = batchSize, target_size = (nx, ny))
validationGenerator = validationDataGenerator.flow_from_filenames(validationFileNames, validationY, batch_size = batchSize, target_size = (nx, ny))
csvLogger = CSVLogger(logName, append=True)
checkPointer = ModelCheckpoint(filepath=modelName, verbose = 1, save_best_only = True)
finalModel.fit_generator(trainingGenerator, numberOfTrainingImages, 50, validation_data = validationGenerator,
nb_val_samples = numberOfValidationImages, callbacks = [checkPointer, csvLogger])
| apache-2.0 |
DonBeo/statsmodels | statsmodels/graphics/tests/test_gofplots.py | 27 | 6814 | import numpy as np
from numpy.testing import dec
import statsmodels.api as sm
from statsmodels.graphics.gofplots import qqplot, qqline, ProbPlot
from scipy import stats
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
class BaseProbplotMixin(object):
def base_setup(self):
if have_matplotlib:
self.fig, self.ax = plt.subplots()
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_ppplot(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_probplot(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_array(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_array(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_array(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_prbplt(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_prbplt(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_prbplt(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_qqplot_custom_labels(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_ppplot_custom_labels(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_probplot_custom_labels(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_qqplot_pltkwargs(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_ppplot_pltkwargs(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_probplot_pltkwargs(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
class TestProbPlotLongely(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.line = 'r'
self.base_setup()
class TestProbPlotRandomNormalMinimal(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data)
self.line = None
self.base_setup()
class TestProbPlotRandomNormalWithFit(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, fit=True)
self.line = 'q'
self.base_setup()
class TestProbPlotRandomNormalLocScale(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, loc=8.25, scale=3.25)
self.line = '45'
self.base_setup()
class TestTopLevel(object):
def setup(self):
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.res = self.mod_fit.resid
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
fig = sm.qqplot(self.res, line='r')
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_ProbPlotObjects(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with `ProbPlot` instances
fig = sm.qqplot_2samples(self.prbplt, self.other_prbplot,
line=line)
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_arrays(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with arrays
fig = sm.qqplot_2samples(self.res, self.other_array, line=line)
| bsd-3-clause |
jolove/monmale | machineLearningLibrary.py | 1 | 14267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.cross_validation import train_test_split
from sklearn import linear_model
from sklearn import mixture
from sklearn import metrics
import logging
import sys, traceback, os
import uuid
import psutil
import getpass
import usefulLibraryFiles # Libreria propia del APP
import usefulLibrary # Libreria propia del APP
import numpy as np
from datetime import datetime
log = logging.getLogger()
log.setLevel('INFO')
def applyRegression(dicWrongValues,L2,dicL,L_train,coefVaration,dicAlarmsRegr,score,testSize,analysis,procID):
# Esta función tiene como objetivo:
# 1º. Sustituir los "wrong" values de la lista L2 por valores predecidos según el resto de características.
# NOTA: cuidado cuando existan mas de una característica "wrong" para la misma muestra.
# 2º. Validar uno a uno cada valor de la lista L2, es decir, comprobar que el valor real es el "mismo" que obtenemos al predecirlo. En
# el caso de que no sea (sea lo suficientemente diferente) generará una alarma.
#
# Lo idóneo es unir el array sacado de la BD con el del data set a analizar (quitando las muestras cuyas coordenadas tienen valore raro),
# y ya con esta lista dividir la en train y test, esto es válido solo para el PASO 1
# NOTA: se podría definir una variable "regressionMinScore" para sólo aplicar la regresión en caso de que el score sea mayor a este valor.
features=[]
for col in sorted(dicL.keys()):
features.append(dicL[col])
feature_names = np.array(features)
log.info(procID+" <applyRegression> Features: "+str(feature_names))
if (len(dicWrongValues.keys())>0):
L_full = usefulLibrary.unionListsWrong(dicWrongValues,L2,L_train,procID) # L_full contiene un array de la unión del array de
# entrenamiento mas el del fichero sin los valores a 0 que tenían strings
log.info(procID+" <applyRegression> Num columns L_full: "+str(len(L_full[0])))
# Toca recorrer el array: PASO 1 --> en busca de los "0" que hemos puesto en los valores malos
# - la forma de recorrer esta vez no será registro a registro, lo haremos por columna ya que a la hora de aplicar el algoritmo
# Sólo podremos aplicar la regresión lineal si se cumple que el tamaño del array L_full es:
# 1. Mayor que el valor definido por la variable test_size
# 2. Si es mayor lo divideremos hasta tener un array con el valor justo de "test_size", cogiendo muestras aleatorias
percentSizeTrain=(len(L_full)*100)/(len(L_full)+len(dicWrongValues.keys()))
if int(percentSizeTrain) >= int(testSize):
log.info(procID+" <applyRegression> STEP 1: Train array is upper to test_size "+str(testSize)+", the Lineal Regression will be executed.")
analysis=True
values_X, values_Y = [], []
columns=[]
for wrong in sorted(dicWrongValues.keys()):
log.info(procID+" <applyRegression> WrongDict key= "+str(wrong))
if dicWrongValues[wrong]["y"] not in columns:
columns.append(dicWrongValues[wrong]["y"])
log.info(procID+" <applyRegression> Num columns of wrong values= "+str(len(columns)))
for col in columns:
log.info(procID+" <applyRegression> Col= "+str(col))
values_Y, values_X = usefulLibrary.extractColumnArray(L_full,col,procID)
log.info(procID+" <applyRegression> Num rows values_Y: "+str(len(values_Y)))
log.info(procID+" <applyRegression> "+str(values_Y))
log.info(procID+" <applyRegression> Num columns values_X: "+str(len(values_X[0])))
values_X = np.array(values_X)
values_Y = np.array(values_Y)
# Antes de dividir tenemos que calcular el % del tamaño del test_size
#
perCsplit=(percentSizeTrain-int(testSize))/100 # EJ: 91% - 80% = 11% / 100 --> 0.11 del array a usar como test y 0.89 para train
# haciendo justo el 80% (definido por la variable)
log.info(procID+" <applyRegression> test_size= "+str(perCsplit))
X_train, X_test, y_train, y_test =train_test_split(values_X, values_Y, test_size=perCsplit,random_state=33)
log.debug(procID+" <applyRegression> X_train size: before= "+str(len(values_X))+" | now= "+str(len(X_train)))
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, y_train)
# Explained variance score: 1 is perfect prediction
log.info(procID+" <applyRegression> Variance score: "+str(regr.score(X_test,y_test)))
# Ahora tocaría estimar y sustituir
for reg in dicWrongValues.keys():
x=dicWrongValues[reg]["x"]
y=dicWrongValues[reg]["y"]
if L2[x][y] == 0 and y == col: # nosotros le pusimos este valor, así nos aseguramosxº
y_pred=regr.predict(usefulLibrary.extractColumnList(L2[x],y,procID))
log.info(procID+" <applyRegression> Value predict for wrong value in coordinates "+str(x)+","+str(y)+": "+str(y_pred))
aprox=round(y_pred,4)
log.info(procID+" <applyRegression> Aproximate predict value: "+str(aprox))
# Ahora deberíamos sustituirlo en la Lista definitiva
L2[x][y]=aprox
else:
log.info(procID+" <applyRegression> STEP1: Train array is lower to test_size "+str(testSize)+", the Lineal Regression will not be executed.")
# Para el PASO 2 no podemos unir la lista sacada de la BD con la del fichero ya que vamos a ir prediciendo cada valore del array del
# fichero y no podemos usar los valores que ya vienen en el.
log.info(procID+" <applyRegression> Num columns L_train: "+str(len(L_train[0])))
percentSizeTrain=(len(L_train)*100)/(len(L_train)+len(L2))
if int(percentSizeTrain) >= int(testSize):
log.info(procID+" <applyRegression> STEP 2: Train array is upper to test_size "+str(testSize)+", the Lineal Regression will be executed.")
analysis=True
values_X, values_Y = [], []
# Nos toca recorre todo el array del fichero prediciendo uno a uno cada valor, por columna
for colum in range(len(feature_names)):
log.info(procID+" <applyRegression> Predict values of Colum= "+str(colum))
values_Y, values_X = usefulLibrary.extractColumnArray(L_train,colum,procID)
values_X = np.array(values_X)
values_Y = np.array(values_Y)
# Antes de dividir tenemos que calcular el % del tamaño del test_size
#
perCsplit=(percentSizeTrain-int(testSize))/100 # EJ: 91% - 80% = 11% / 100 --> 0.11 del array a usar como test y 0.89 para train
# haciendo justo el 80% (definido por la variable)
log.info(procID+" <applyRegression> test_size= "+str(perCsplit))
X_train, X_test, y_train, y_test =train_test_split(values_X, values_Y, test_size=perCsplit,random_state=33)
log.debug(procID+" <applyRegression> X_train size: before= "+str(len(values_X))+" | now= "+str(len(X_train)))
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, y_train)
# Explained variance score: 1 is perfect prediction
score=regr.score(X_test,y_test)
log.info(procID+" - Variance score: "+str(score))
# Una vez ya tenemos el estimador entrenado comenzamos a predecir
for row in range(len(L2)):
subDalarm={}
newL = usefulLibrary.extractColumnList(L2[row],colum,procID)
log.info(procID+" <applyRegression> List of features to predict: "+str(newL))
y_pred=regr.predict(newL)
log.info(procID+" <applyRegression> Value predict for coordinates row,colum "+str(row)+","+str(colum)+" -> REAL: "+str(L2[row][colum])+" PRED: "+str(y_pred))
aprox=round(y_pred,4)
log.info(procID+" <applyRegression> Aproximate predict value: "+str(aprox))
coefV = usefulLibrary.desviacionTipica(L2[row][colum],aprox,procID)
if coefV > int(coefVaration):
# Como el coeficiente de variación es mayor a lo permitido por variable generamos la alarma como posible anomalía
subDalarm["x"]=row
subDalarm["y"]=colum
dicAlarmsRegr[len(dicAlarmsRegr.keys())]=subDalarm
log.info(procID+" <applyRegression> Alarm generated...[coefV= "+str(coefV)+"]")
else:
# Como el coeficiente de variación es menor a lo permitido por variable, consideramos que tanto el valor real como el predecido
# son semejantes por lo que no generaremos alarma
log.info(procID+" <applyRegression> Element with value between interval...[coefV= "+str(coefV)+"]")
else:
log.info(procID+" <applyRegression> STEP2: Train array is lower to test_size "+str(testSize)+", the Lineal Regression will not be executed.")
# Una vez recorrido el array y obtenidas todas las posibles alarmas, hemos terminado.
def applyClusteringTotal(L_predict,features,L_train,dicDBvariables,dicAlarmsClus,score,procID):
# EL objetivo de este procedimiento es aplicar un número de veces (según la variable proofGroup) el algoritmo de clustering
# Gaussian Mixture Models (GMM) aumentanto en cada una de las iteraciones el número de grupos a obtener (2^x). En cada iteración se obtendrá
# el grupo al que pertence cada una de las muestras del array a predecir (L_predict)
# Se aplicará por muestra (row)
log.info(procID+" <applyClusteringTotal> Features: "+str(features))
percentSizeTrain=(len(L_train)*100)/(len(L_train)+len(L_predict))
# Antes de dividir tenemos que calcular el % del tamaño del test_size
#
perCsplit=(percentSizeTrain-int(dicDBvariables["test_size"]))/100 # EJ: 91% - 80% = 11% / 100 --> 0.11 del array a usar como test y 0.89 para train
# haciendo justo el 80% (definido por la variable)
log.debug(procID+" <applyClusteringTotal> test_size= "+str(perCsplit))
X_train, X_test, y_train, y_test =train_test_split(L_train, L_train, test_size=perCsplit,random_state=33)
log.debug(procID+" <applyClusteringTotal> X_train size: before= "+str(len(L_train))+" | now= "+str(len(X_train)))
nComp=2
dicResultClusSample={}
dicResultClusGroup={}
for proof in range(int(dicDBvariables['proofGroup'])):
log.info(procID+" <applyClusteringTotal> Proof level:"+str(proof)+" - n_components: "+str(nComp))
gm = mixture.GMM(n_components=nComp,covariance_type='tied', random_state=42)
gm.fit(X_train)
y_pred = gm.predict(L_predict)
usefulLibrary.saveResult(y_pred,dicResultClusSample,dicResultClusGroup,'I'+str(proof),procID)
nComp=nComp*2
log.debug(dicResultClusSample)
log.debug(dicResultClusGroup)
usefulLibrary.applyClusteringAlarm(dicResultClusSample,dicResultClusGroup,dicAlarmsClus,dicDBvariables['clustGroup'],procID)
for alarm in sorted(dicAlarmsClus.keys()):
log.info(procID+" <applyClusteringTotal> Row:"+str(L_predict[alarm])+" - level: "+str(dicAlarmsClus[alarm]))
def applyClusteringPartial(L_predict,features,L_train,dicDBvariables,dicAlarmsClusTotal,score,procID):
# EL objetivo de este procedimiento es aplicar un número de veces (según la variable proofGroup) el algoritmo de clustering
# Gaussian Mixture Models (GMM) aumentanto en cada una de las iteraciones el número de grupos a obtener (2^x). En cada iteración se obtendrá
# el grupo al que pertence cada una de las muestras del array a predecir (L_predict)
# Se aplicará por columna (column)
log.info(procID+" <applyClusteringPartial> Features: "+str(features))
percentSizeTrain=(len(L_train)*100)/(len(L_train)+len(L_predict))
# Antes de dividir tenemos que calcular el % del tamaño del test_size
#
perCsplit=(percentSizeTrain-int(dicDBvariables["test_size"]))/100 # EJ: 91% - 80% = 11% / 100 --> 0.11 del array a usar como test y 0.89 para train
# haciendo justo el 80% (definido por la variable)
log.debug(procID+" <applyClusteringPartial> test_size= "+str(perCsplit))
X_train, X_test, y_train, y_test =train_test_split(L_train, L_train, test_size=perCsplit,random_state=33)
log.debug(procID+" <applyClusteringPartial> X_train size: before= "+str(len(L_train))+" | now= "+str(len(X_train)))
for col in range(len(features)):
dicAlarmsClus={}
nComp=2
dicResultClusSample={}
dicResultClusGroup={}
L_1colTR, L_restColTR = usefulLibrary.extractColumnArray(L_train,col,procID)
L_1colPR, L_restColPR = usefulLibrary.extractColumnArray(L_predict,col,procID)
for proof in range(int(dicDBvariables['proofGroup'])):
log.info(procID+" <applyClusteringPartial> Proof level:"+str(proof)+" - n_components: "+str(nComp)+" - COLUMN= "+str(col))
gm = mixture.GMM(n_components=nComp,covariance_type='tied', random_state=42)
gm.fit(L_1colTR)
y_pred = gm.predict(L_1colPR)
usefulLibrary.saveResult(y_pred,dicResultClusSample,dicResultClusGroup,'I'+str(proof),procID)
nComp=nComp*2
log.debug(dicResultClusSample)
log.debug(dicResultClusGroup)
usefulLibrary.applyClusteringAlarm(dicResultClusSample,dicResultClusGroup,dicAlarmsClus,dicDBvariables['clustGroup'],procID)
dicAlarmsClusTotal[col]=dicAlarmsClus
for alarm in sorted(dicAlarmsClus.keys()):
log.info(procID+" <applyClusteringPartial> COLUMN= "+str(col)+" Value:"+str(L_predict[alarm][col])+" - level: "+str(dicAlarmsClus[alarm])) | apache-2.0 |
jimsrc/seatos | mixed/figs/sheaths.paper/src/together4.py | 1 | 11024 | #!/usr/bin/env ipython
from pylab import *
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
import os, sys
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from numpy import array
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
class gral:
def __init__(self):
self.name='name'
TS = 11
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def makefig(ax, mc, sh, TEXT, TEXT_LOC, YLIMS, varname):
LW = 0.3 # linewidth
MS = 1.5
fmc,fsh = 3.0, 1.0 # escaleos temporales
if(varname == 'Temp'):
mc.med /= 1.0e4; sh.med /= 1.0e4
mc.avr /= 1.0e4; sh.avr /= 1.0e4
mc.std_err /= 1.0e4; sh.std_err /= 1.0e4
YLIMS[0] /= 1.0e4; YLIMS[1] /= 1.0e4
TEXT_LOC['mc'][1] /= 1.0e4
TEXT_LOC['sh'][1] /= 1.0e4
# curvas del mc
time = fsh+fmc*mc.tnorm
cc = time>=fsh
ax.plot(time[cc], mc.avr[cc], 'o-', color='black', markersize=MS, label='mean', lw=LW)
ax.plot(time[cc], mc.med[cc], 'o-', color='red', alpha=.8, markersize=MS, markeredgecolor='none', label='median', lw=LW)
# sombra del mc
inf = mc.avr + mc.std_err/np.sqrt(mc.nValues)
sup = mc.avr - mc.std_err/np.sqrt(mc.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((fsh, 0.), width=fmc, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
# curvas del sheath
time = fsh*sh.tnorm
cc = time<=fsh
ax.plot(time[cc], sh.avr[cc], 'o-', color='black', markersize=MS, lw=LW)
ax.plot(time[cc], sh.med[cc], 'o-', color='red', alpha=.8, markersize=MS, markeredgecolor='none', lw=LW)
# sombra del sheath
inf = sh.avr + sh.std_err/np.sqrt(sh.nValues)
sup = sh.avr - sh.std_err/np.sqrt(sh.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
#trans = transforms.blended_transform_factory(
# ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=fsh, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
#ax.legend(loc='best', fontsize=10)
ax.tick_params(labelsize=TS)
ax.grid()
ax.set_xlim(-2.0, 7.0)
ax.set_ylim(YLIMS)
ax.text(TEXT_LOC['mc'][0], TEXT_LOC['mc'][1], TEXT['mc'], fontsize=7)
ax.text(TEXT_LOC['sh'][0], TEXT_LOC['sh'][1], TEXT['sh'], fontsize=7)
if(varname in ('beta','Temp', 'rmsB', 'rmsBoB')):
ax.set_yscale('log')
else:
ax.set_yscale('linear')
return ax
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
stf = {}
stf['B'] = {
'label': 'B [nT]',
'ylims': [5., 29.],
'text_loc_1': {'mc':[4.5, 15.0], 'sh':[-1.95, 12.0]},
'text_loc_2': {'mc':[4.5, 18.0], 'sh':[-1.95, 12.0]},
'text_loc_3': {'mc':[4.5, 12.0], 'sh':[-1.95, 12.0]},
'nrow': 1
}
stf['V'] = {
'label': 'Vsw [km/s]',
'ylims': [350., 800.],
'text_loc_1': {'mc':[4.5, 500.0], 'sh':[-1.95, 520.0]},
'text_loc_2': {'mc':[4.5, 600.0], 'sh':[-1.95, 600.0]},
'text_loc_3': {'mc':[4.5, 410.0], 'sh':[-1.95, 600.0]},
'nrow': 2
}
stf['rmsBoB'] = {
'label': 'rmsBoB [1]',
'ylims': [0.015, 0.21],
'text_loc_1': {'mc':[4.5, 0.020], 'sh':[-1.95, 0.02]},
'text_loc_2': {'mc':[4.5, 0.095], 'sh':[-1.95, 0.02]},
'text_loc_3': {'mc':[4.5, 0.099], 'sh':[-1.95, 0.02]},
'nrow': 6
}
stf['rmsB'] = {
'label': 'rmsB [nT]',
'ylims': [0.1, 4.0],
'text_loc_1': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'text_loc_2': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'text_loc_3': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'nrow': 1
}
stf['beta'] = {
'label': '$\\beta$ [1]',
'ylims': [0.02, 10.0],
'text_loc_1': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'text_loc_2': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'text_loc_3': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'nrow': 5
}
stf['Pcc'] = {
'label': '$n_p$ [$cm^{-3}$]',
'ylims': [1, 23],
'text_loc_1': {'mc':[4.5, 14], 'sh':[-1.95, 16.0]},
'text_loc_2': {'mc':[4.5, 14], 'sh':[-1.95, 16.0]},
'text_loc_3': {'mc':[4.5, 11], 'sh':[-1.95, 18.0]},
'nrow': 3
}
stf['Temp'] = {
'label': 'T ($\\times 10^4$) [K]',
'ylims': [1e4, 100e4],
'text_loc_1': {'mc':[4.5, 18.0e4], 'sh':[-1.95, 20.0e4]},
'text_loc_2': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]},
'text_loc_3': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]},
'nrow': 4
}
stf['AlphaRatio'] = {
'label': 'alpha ratio [1]',
'ylims': [0.02, 0.09],
'text_loc_1': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]},
'text_loc_2': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]},
'text_loc_3': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]}
}
stf['CRs'] = {
'label': '$n_{GCR}$ [%]',
'ylims': [-8.0, 2.0],
'text_loc_1': {'mc':[4.5, -4.0], 'sh':[-1.95, -4.5]},
'text_loc_2': {'mc':[4.5, -7.0], 'sh':[-1.95, -4.5]},
'text_loc_3': {'mc':[4.5, -7.5], 'sh':[-1.95, -4.5]},
'nrow': 2
}
TEXT = {}
dir_figs = sys.argv[1] #'../figs'
#dir_inp_mc = '../../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dir_inp_sh = '../../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_inp_mc = os.environ['RIGHT']
dir_inp_sh = os.environ['LEFT']
vlo = [100.0, 450.0, 550.0]
vhi = [450.0, 550.0, 3000.0]
nvars = len(stf.keys())
print " input: "
print " %s " % dir_inp_mc
print " %s \n" % dir_inp_sh
print " vlo, vhi: ", (vlo, vhi), '\n'
print " nvars: ", nvars
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
i=2
#fig = figure(1, figsize=(12, 15))
f = plt.figure(1, figsize=(7, 5.8))
nr = 1 # scale for row size
gs = GridSpec(nrows=3*nr, ncols=2*3)
gs.update(left=0.1, right=0.98, hspace=0.13, wspace=0.15)
for i in range(3):
fname_inp = 'MCflag2_2before.4after_fgap0.2_Wang90.0_vlo.%3.1f.vhi.%3.1f' % (vlo[i], vhi[i])
fname_inp_nro_mc = dir_inp_mc + '/n.events_' + fname_inp + '.txt'
fname_inp_nro_sh = dir_inp_sh + '/n.events_' + fname_inp + '.txt'
#n = 1 # number of row
print " ______ col %d ______" % i
for varname in ('rmsB', 'CRs'):
# abro el file para averiguar el nro de eventos
fnro_mc = open(fname_inp_nro_mc, 'r')
fnro_sh = open(fname_inp_nro_sh, 'r')
for lmc, lsh in zip(fnro_mc, fnro_sh):
l_mc = lmc.split()
l_sh = lsh.split()
if varname==l_mc[0]: # nombre de la variable
n = stf[varname]['nrow']
ax = plt.subplot(gs[(n-1)*nr:n*nr, (2*i):(2*(i+1))])
Nfinal_mc, Nfinal_sh = int(l_mc[1]), int(l_sh[1]) # nmbr of events
fnro_mc.close(); fnro_sh.close()
break
print " %s"%varname, ' Nfinal_mc:%d' % Nfinal_mc, 'Nfinal_sh:%d' % Nfinal_sh
mc, sh = gral(), gral()
fname_inp_mc = dir_inp_mc + '/' + fname_inp + '_%s.txt' % varname
fname_inp_sh = dir_inp_sh + '/' + fname_inp + '_%s.txt' % varname
mc.tnorm, mc.med, mc.avr, mc.std_err, mc.nValues = np.loadtxt(fname_inp_mc).T
sh.tnorm, sh.med, sh.avr, sh.std_err, sh.nValues = np.loadtxt(fname_inp_sh).T
# nro de datos con mas del 80% non-gap data
TEXT['mc'] = ' N: %d' % Nfinal_mc
TEXT['sh'] = ' N: %d' % Nfinal_sh
if(vlo[i]==100.0):
TEXT_LOC = stf[varname]['text_loc_1'] #1.7, 12.0
elif(vlo[i]==450.0):
TEXT_LOC = stf[varname]['text_loc_2'] #1.7, 12.0
elif(vlo[i]==550.0):
TEXT_LOC = stf[varname]['text_loc_3'] #1.7, 12.0
else:
print " ----> ERROR con 'v_lo'!"
raise SystemExit
ylims = array(stf[varname]['ylims']) #[4., 17.]
ylabel = stf[varname]['label'] #'B [nT]'
ax = makefig(ax, mc, sh, TEXT, TEXT_LOC, ylims, varname)
# ticks & labels x
ax.tick_params(labelsize=TS)
if n==2: #n==nvars-1:
ax.set_xlabel('time normalized to\nsheath/MC passage [1]', fontsize=11)
#ax.xaxis.set_ticklabels([-1,0,1,2,3])
xticks = [-2,-1,0,1,2,3,4,5,6,7]
ax.set_xticks(xticks)
ax.set_xticklabels(xticks)
else:
ax.set_xlabel('')
#ax.get_xaxis().set_ticks([])
ax.xaxis.set_ticklabels([])
# ticks & labels y
if i==0:
ax.set_ylabel(ylabel, fontsize=15)
else:
ax.set_ylabel('')
#ax.get_yaxis().set_ticks([])
ax.yaxis.set_ticklabels([])
#+++++++++++++++++++++++++ nCR & model-fit
#dirs = {}
#dirs['sheath'] = '../../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dirs['mc'] = '../../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dirs['fname_inputs'] = 'MCflag2_2before.4after_fgap0.2_Wang90.0'
#dirs['figs'] = dir_figs
#
#par = {}
#par['lo'] = {
# 'vlo': 100.0,
# 'vhi': 450.0,
# 'tau': 2.36,
# 'bp' : 0.0,
# 'q' : -9.373,
# 'off': 0.89,
# 'bo' : 16.15
#}
#par['mid'] = {
# 'vlo': 450.0,
# 'vhi': 550.0,
# 'tau': 4.18,
# 'bp' : -0.9,
# 'q' : -6.02,
# 'off': 0.0,
# 'bo' : 11.87
#}
#par['hi'] = {
# 'vlo': 550.0,
# 'vhi': 3000.0,
# 'tau': 5.78,
# 'bp' : -0.18,
# 'q' : -5.53,
# 'off': 1.01,
# 'bo' : 14.48
#}
#
#from funcs import build_plot
#n = 3; i=0
#for i, name in zip(range(3), ('lo', 'mid', 'hi')):
# ax = plt.subplot(gs[(n-1)*nr:n*nr, (2*i):(2*(i+1))])
# build_plot(dirs, par[name], ax)
# if i==0:
# ax.set_ylabel('$n_{GCR}$ [%]', fontsize=15)
# else:
# ax.set_ylabel('')
# ax.yaxis.set_ticklabels([])
#+++++++++++++++++++++++++++++++++++++++++
#fig.tight_layout()
#fname_fig = dir_figs + '/fig_vlo.%3.1f_vhi.%3.1f_%s.png'%(vlo, vhi, varname)
fname_fig = '%s/figs_splitted_3.png' % dir_figs
savefig(fname_fig, dpi=150, bbox_inches='tight')
close()
print "\n output en:\n %s\n" % fname_fig
#EOF
| mit |
CleverChuk/ices | Python/multijob_module.py | 1 | 3479 | """
Author: Chukwubuikem Ume-Ugwa
Email: [email protected]
MIT License
Copyright (c) 2017 CleverChuk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from dataparser import *
from multiprocessing import Pool, Manager
import os
import time
manager = Manager()
heightD = manager.dict() # holds values for minimum height of each particle
TSIZE = 8 # data type size in bytes
N_OFFSETS = 44 # number of data
FCOLOR = genColor(N_OFFSETS, manager)
# Dimension of the simulation bed
xsize = 78
ysize = 112
zsize = 104
hOut = "HeightData"
def startChild(fname):
# DISALLOWED IN PYTHON
iam.fn = fname
dictn = iam.manager.dict()
mylist = iam.manager.list()
pool = Pool()
# passing offset multiplier to the producer task
pool.map(iam.producer, [i for i in range(1 , iam.N_OFFSETS)], 1)
# Feeds task from producers into the list
for i, j in self.dictn.items():
mylist.append(j[0])
# single process to handle plotting
proc = Process(target=iam.consumer, args=(mylist, ))
proc.start()
proc.join()
def multijob(fname):
"""
Handles reading and plotting of data in file with name fname
"""
print("Starting multijob from process: %d" % os.getpid())
fig = plt.figure()
axis = Axes3D(fig)
heightL = manager.list()
axis = Axes3D(fig)
axis.set_xlim([0,ysize])
axis.set_ylim([0,ysize])
axis.set_zlim([0,ysize])
axis.view_init(elev = 40, azim = 50)
coords = manager.list()
rho = readsingle(fname)
for i in range(1, N_OFFSETS):
eta_s = readsingle(fname, i * TSIZE)
# eta_s = process(rho, filter_eta(eta_s))
coords.append(getcoords(eta_s, xsize, ysize, zsize))
heightL.append(max(coords[-1][-2]) - min(coords[-1][-2]))
writtable(hOut,str(heightL).strip('[]'))
plot(coords, fig, axis, count = "ALL", fcolor = FCOLOR, multijob = (True,fname))
print("Finished multijob from process: %d" % os.getpid())
if __name__ == "__main__":
print("Starting mutiple jobs in a process task")
import timeit, sys
start_time = timeit.default_timer()
if(os.path.exists(hOut)):
os.remove(hOut)
pool = Pool()
files = list()
MAXCOUNT = 4
STEP = 2
START = 0
FNAME = "fullT{0}.dat"
## file with filesname to work on
for i in range(START, MAXCOUNT, STEP):
files.append(FNAME.format(i))
pool.map(multijob, files, 1)
elapsed = timeit.default_timer() - start_time
print("total time %d seconds" % elapsed)
print("Finished multiple job in a process task")
| mit |
ZenDevelopmentSystems/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
TheGhostHuCodes/spy_dir | spy_dir.py | 1 | 2182 | #!/usr/bin/env python
import os
import os.path as pt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import argparse
#TODO: take decimal places as parameter for printing.
def sizeof_pp(num):
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB']:
if abs(num) < 1024.0:
return "%3.2f %s" % (num, unit)
num /= 1024.0
return "%.2f %s" % (num, 'Yi')
def xtic_formatter(num, tick_index):
return sizeof_pp(num)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='.')
parser.add_argument('dir_path', metavar='Path', type=str, help='')
parser.add_argument('-p', '--plot', action='store_true')
args = parser.parse_args()
sizes = []
symlink_count = 0
for root, dirs, files in os.walk(args.dir_path, followlinks=False):
for name in files:
fullpath = pt.join(root, name)
if not os.path.islink(fullpath):
sizes.append(pt.getsize(fullpath))
else:
symlink_count += 1
sizes.sort()
print("Searching in directory: {0}".format(args.dir_path))
print("Files Inspected: {0}".format(len(sizes)))
print("Maxfilesize: " + sizeof_pp(sizes[-1]))
print("Symlinks found: {0}".format(symlink_count))
percentile = 95
index = len(sizes) * (percentile / 100.)
print("{0}% of files smaller than: ~".format(percentile) + sizeof_pp(
sizes[int(index)]))
sizesArray = np.asarray(sizes)
if (args.plot):
bins = min(len(sizes) / 10, 200)
plt.figure(figsize=(8, 8))
ax = plt.subplot(111)
# Adjust y-axis to show bins of height 1 and max bin height.
n, _, _ = plt.hist(sizesArray, bins, log=True)
plt.ylim(0.5, max(n) * 1.1)
plt.xlabel("File Size (bytes)")
plt.ylabel("Log(Number of Files)")
plt.title("File size histogram for: {0}".format(args.dir_path))
x_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
x_formatter.set_scientific(False)
x_format = mpl.ticker.FuncFormatter(xtic_formatter)
ax.xaxis.set_major_formatter(x_format)
plt.show()
| apache-2.0 |
deepesch/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
HolgerPeters/scikit-learn | sklearn/ensemble/gradient_boosting.py | 5 | 73159 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils import deprecated
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight,
self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() -
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) -
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, criterion,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_depth, min_impurity_split, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.min_impurity_split = min_impurity_split
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_split=self.min_impurity_split,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
# is regression
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features *
self.n_features_), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
check_is_fitted(self, 'estimators_')
@property
@deprecated("Attribute n_features was deprecated in version 0.19 and "
"will be removed in 0.21.")
def n_features(self):
return self.n_features_
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features_ = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features_, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features_, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
criterion : string, optional (default="friedman_mse")
The function to measure the quality of a split. Supported criteria
are "friedman_mse" for the mean squared error with improvement
score by Friedman, "mse" for mean squared error, and "mae" for
the mean absolute error. The default value of "friedman_mse" is
generally the best as it can provide a better approximation in
some cases.
.. versionadded:: 0.18
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
*presort* parameter.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_split=1e-7, init=None,
random_state=None, max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
criterion : string, optional (default="friedman_mse")
The function to measure the quality of a split. Supported criteria
are "friedman_mse" for the mean squared error with improvement
score by Friedman, "mse" for mean squared error, and "mae" for
the mean absolute error. The default value of "friedman_mse" is
generally the best as it can provide a better approximation in
some cases.
.. versionadded:: 0.18
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
optional parameter *presort*.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_split=1e-7, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features, min_impurity_split=min_impurity_split,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
0x0all/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
abimannans/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
dcastro9/patternrec_ps2 | code/alcohol_script.py | 1 | 5623 | from Dataset import Dataset
from WTA_Hasher import WTAHasher
from kNN_Classifier import kNNClassifier
import numpy as np
import matplotlib.pyplot as plt
import copy
ds_train_dir = "../datasets/alcohol/alcoholism_training.csv"
ds_test_dir = "../datasets/alcohol/alcoholism_test.csv"
results_dir = "../final_results/alcohol/"
num_k_values = 10
weights = [1,1,1,1,1,3]
ds_orig = Dataset(ds_train_dir, name='Original Data')
ds_norm = Dataset(ds_train_dir, normalize=True, name='Normalized Data')
ds_norm_weigh = Dataset(ds_train_dir, normalize=True, weights=weights,
name='Norm & Weighted Data')
ds_whiten = Dataset(ds_train_dir, whiten=True, name='Whitened Data')
ds_orig_t = Dataset(ds_test_dir)
ds_norm_t = Dataset(ds_test_dir, normalize=True)
ds_norm_weigh_t = Dataset(ds_test_dir, normalize=True, weights=weights)
ds_whiten_t = Dataset(ds_test_dir, whiten=True)
alcohol_datasets = [[ds_orig, ds_orig_t],
[ds_norm, ds_norm_t],
[ds_norm_weigh, ds_norm_weigh_t],
[ds_whiten, ds_whiten_t]]
k_values = range(1,num_k_values*2,2)
color=['red','blue','green','black']
labels=['20%', '50%', '80%', '100%']
folds=['2-fold', '5-fold', 'N-fold']
for ds in alcohol_datasets:
train_data_all = ds[0].data
test_data = ds[1].data
# Accuracy for get 20%, 50%, 80% and 100% of the data.
# Each subset will have
train_accuracy = [[np.zeros(num_k_values), np.zeros(num_k_values), np.zeros(num_k_values)],
[np.zeros(num_k_values), np.zeros(num_k_values), np.zeros(num_k_values)],
[np.zeros(num_k_values), np.zeros(num_k_values), np.zeros(num_k_values)],
[np.zeros(num_k_values), np.zeros(num_k_values), np.zeros(num_k_values)]]
best_k_and_ds = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
for it in range(5):
train_data_20, t = Dataset.getRandomPercent(train_data_all, 0.2)
train_data_50, t = Dataset.getRandomPercent(train_data_all, 0.5)
train_data_80, t = Dataset.getRandomPercent(train_data_all, 0.8)
all_training_data = [train_data_20,
train_data_50,
train_data_80,
train_data_all]
# Only run on train_data_all once.
if it > 0:
all_training_data = all_training_data[:-1]
for val in range(len(all_training_data)):
for k in k_values:
print str(it) + ": Training on: " + labels[val] + "for k value: " + str(k) + " for " + ds[0].name
# Do 2-5-N Fold Cross Validation.
cv_2 = Dataset.getkPartitions(all_training_data[val], 2)
cv_5 = Dataset.getkPartitions(all_training_data[val], 5)
cv_n = Dataset.getkPartitions(all_training_data[val],
len(all_training_data[val]))
cvs = [cv_2, cv_5, cv_n]
cross_val_accuracy = [0, 0, 0]
for cv_c in range(len(cvs)):
# Does f-Fold cross validation.
accuracy = 0
for fold in range(len(cvs[cv_c])):
td = copy.deepcopy(cvs[cv_c]) # Copy the cross validation dataset.
del td[fold] # Delete the item we're using for testing.
td_reshaped = []
for elem in td:
for item in elem:
td_reshaped.append(item)
knn = kNNClassifier(td_reshaped, k) # Initialize kNN.
accuracy += knn.test(cvs[cv_c][fold]) # Test.
accuracy /= len(cvs[cv_c])
if best_k_and_ds[val][cv_c] == 0:
best_k_and_ds[val][cv_c] = [k, td_reshaped, accuracy]
elif best_k_and_ds[val][cv_c][2] < accuracy:
best_k_and_ds[val][cv_c] = [k, td_reshaped, accuracy]
train_accuracy[val][cv_c][k/2] += accuracy
# Write results to file.
out_f = open(results_dir + ds[0].name + ".txt", 'w')
for cnt in range(len(train_accuracy)):
# Setup plot.
plt.xlabel('k Values')
plt.ylabel('Accuracy')
plt.title(ds[0].name)
average = True
if cnt == len(train_accuracy) - 1:
average = False
for fold in range(len(train_accuracy[cnt])):
if (average):
train_accuracy[cnt][fold] /= 5
plt.plot(k_values, train_accuracy[cnt][fold], color=color[fold],
label=folds[fold])
out_f.write(labels[cnt] + ":" + folds[fold] + ":" +
str(train_accuracy[cnt][fold]) + "\n")
# Save plot.
plt.legend()
plt.savefig(results_dir + ds[0].name + labels[cnt] + ".pdf")
plt.clf()
plt.cla()
# Now we test with the original test data provided.
out_f.write("\n\n Testing for best k & DS for:" + ds[0].name +"\n")
for val in range(len(best_k_and_ds)):
for fold in range(len(best_k_and_ds[val])):
knn = kNNClassifier(best_k_and_ds[val][fold][1],
best_k_and_ds[val][fold][0]) # Initialize kNN.
out = knn.test(test_data) # Test.
out_f.write(labels[val] + " with k:" +
str(best_k_and_ds[val][fold][0]) + " at " + folds[fold] +
" original accuracy:" + str(best_k_and_ds[val][fold][2]) +
" vs accuracy:" + str(out) + "\n")
# Close file.
out_f.close() | mit |
dhruv13J/scikit-learn | sklearn/tests/test_naive_bayes.py | 142 | 17496 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
hagabbar/pycbc_copy | examples/distributions/spin_spatial_distr_example.py | 14 | 1973 | import numpy
import matplotlib.pyplot as plt
import pycbc.coordinates as co
from mpl_toolkits.mplot3d import Axes3D
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in units
# of pi so we use between 0 and 1.
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi.
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py
# Here we are using the Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution.
# In this case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=10000)
# Make a spin 1 magnitude since solid angle is only 2 dimensions and we need a
# 3rd dimension for a 3D plot that we make later on.
spin_mag = numpy.ndarray(shape=(10000), dtype=float)
for i in range(0,10000):
spin_mag[i] = 1.
# Use pycbc.coordinates as co. Use spherical_to_cartesian function to
# convert from spherical polar coordinates to cartesian coordinates.
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Plot the spherical distribution of spins to make sure that we
# distributed across the surface of a sphere.
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(spinx, spiny, spinz, s=1)
ax.set_xlabel('Spin X Axis')
ax.set_ylabel('Spin Y Axis')
ax.set_zlabel('Spin Z Axis')
plt.show()
| gpl-3.0 |
zaxliu/deepnap | experiments/kdd-exps/experiment_DynaQNN_130_Feb10_2317.py | 1 | 5180 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:5])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgentNN(DynaMixin, QAgentNN):
def __init__(self, **kwargs):
super(Dyna_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'dmW'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
# phi_length = 5
# dim_state = (1, phi_length, 3+2)
# range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
# range_state = [[range_state_slice]*phi_length]
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 0, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 2
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgentNN(
env_model=env_model, num_sim=num_sim,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
kedz/sumpy | sumpy/io.py | 1 | 4037 | import os
import re
import pandas as pd
def load_duc_docset(input_source):
docs = DucSgmlReader().read(input_source)
return docs
def load_duc_abstractive_summaries(input_source):
models = DucAbstractSgmlReader().read(input_source)
return models
class FileInput(object):
def gather_paths(self, source):
"""Determines the type of source and return an iterator over input
document paths. If source is a str or unicode
object, determine if it is also a directory and return an iterator
for all directory files; otherwise treat as a single document input.
If source is any other iterable, treat as an iterable of file
paths."""
if isinstance(source, str) or isinstance(source, unicode):
if os.path.isdir(source):
paths = [os.path.join(source, fname)
for fname in os.listdir(source)]
for path in paths:
yield path
else:
yield source
else:
try:
for path in source:
yield path
except TypeError:
print source, 'is not iterable'
class DucSgmlReader(FileInput):
def read(self, input_source):
docs = []
for path in self.gather_paths(input_source):
with open(path, u"r") as f:
sgml = "".join(f.readlines())
m = re.search(r"<TEXT>(.*?)</TEXT>", sgml, flags=re.DOTALL)
if m is None:
raise Exception("TEXT not found in " + path)
text = m.group(1).strip()
text_clean = re.sub(r"<[^>]*?>", r"", text)
docs.append(text_clean)
return docs
class DucAbstractSgmlReader(FileInput):
def read(self, input_source):
docs = []
for path in self.gather_paths(input_source):
with open(path, u"r") as f:
sgml = "".join(f.readlines())
m = re.search(r"<SUM[^>]+>(.*?)</SUM>", sgml, flags=re.DOTALL)
if m is None:
raise Exception("SUM not found in " + path)
text = m.group(1).strip()
docs.append(text)
return docs
class MeadDocSentReader(FileInput):
docsent_patt = (r"<DOCSENT DID='([^']+)'\s+DOCNO='([^']+)'\s+"
r"LANG='([^']+)'\s+CORR-DOC='([^']+)'>")
sent_patt = (r"<S PAR=['\"]([^']+)['\"]\s+"
r"RSNT=['\"]([^']+)['\"]\s+"
r"SNO=['\"]([^']+)['\"]>(.*?)</S>")
def read(self, input_source):
docs = []
for path in self.gather_paths(input_source):
sents = []
with open(path, u"r") as f:
xml = "".join(f.readlines())
m = re.search(self.docsent_patt, xml, flags=re.DOTALL)
if m is None:
raise Exception("DOCSENT not found in " + path)
doc_id = m.group(1)
lang = m.group(3)
for s in re.finditer(self.sent_patt, xml, flags=re.DOTALL):
par = int(s.group(1))
rsnt = s.group(2)
sno = s.group(3)
text = s.group(4).strip()
if par > 1:
sents.append(text)
#sents.append({u"doc id": doc_id, u"sent id": int(rsnt),
# u"type": u"body" if par > 1 else u"headline",
# u"text": text.decode("utf-8")})
docs.append("\n".join(sents).decode("utf-8"))
#df = pd.DataFrame(
# sents, columns=[u"doc id", u"type", u"sent id", u"text"])
#df.set_index([u"doc id", u"sent id"], inplace=True)
return docs
def load_demo_docs():
import pkg_resources
input_source = pkg_resources.resource_filename(
"sumpy",
os.path.join("data", "mead_example_docs"))
return MeadDocSentReader().read(input_source)
| apache-2.0 |
zzcclp/spark | python/pyspark/pandas/tests/test_groupby.py | 14 | 118068 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError, DataError
from pyspark.pandas.missing.groupby import (
MissingPandasLikeDataFrameGroupBy,
MissingPandasLikeSeriesGroupBy,
)
from pyspark.pandas.groupby import is_multi_agg_with_relabel
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class GroupByTest(PandasOnSparkTestCase, TestUtils):
def test_groupby_simple(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
psdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("a").reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index).sum()),
sort(pdf.groupby("a", as_index=as_index).sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index).b.sum()),
sort(pdf.groupby("a", as_index=as_index).b.sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index)["b"].sum()),
sort(pdf.groupby("a", as_index=as_index)["b"].sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
sort(pdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index)[[]].sum()),
sort(pdf.groupby("a", as_index=as_index)[[]].sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index)["c"].sum()),
sort(pdf.groupby("a", as_index=as_index)["c"].sum()),
)
self.assert_eq(
psdf.groupby("a").a.sum().sort_index(), pdf.groupby("a").a.sum().sort_index()
)
self.assert_eq(
psdf.groupby("a")["a"].sum().sort_index(), pdf.groupby("a")["a"].sum().sort_index()
)
self.assert_eq(
psdf.groupby("a")[["a"]].sum().sort_index(), pdf.groupby("a")[["a"]].sum().sort_index()
)
self.assert_eq(
psdf.groupby("a")[["a", "c"]].sum().sort_index(),
pdf.groupby("a")[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b).sum().sort_index(), pdf.a.groupby(pdf.b).sum().sort_index()
)
for axis in [0, "index"]:
self.assert_eq(
psdf.groupby("a", axis=axis).a.sum().sort_index(),
pdf.groupby("a", axis=axis).a.sum().sort_index(),
)
self.assert_eq(
psdf.groupby("a", axis=axis)["a"].sum().sort_index(),
pdf.groupby("a", axis=axis)["a"].sum().sort_index(),
)
self.assert_eq(
psdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
)
self.assert_eq(
psdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b, axis=axis).sum().sort_index(),
pdf.a.groupby(pdf.b, axis=axis).sum().sort_index(),
)
self.assertRaises(ValueError, lambda: psdf.groupby("a", as_index=False).a)
self.assertRaises(ValueError, lambda: psdf.groupby("a", as_index=False)["a"])
self.assertRaises(ValueError, lambda: psdf.groupby("a", as_index=False)[["a"]])
self.assertRaises(ValueError, lambda: psdf.groupby("a", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: psdf.groupby("z", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: psdf.groupby(["z"], as_index=False)[["a", "c"]])
self.assertRaises(TypeError, lambda: psdf.a.groupby(psdf.b, as_index=False))
self.assertRaises(NotImplementedError, lambda: psdf.groupby("a", axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.groupby("a", axis="columns"))
self.assertRaises(ValueError, lambda: psdf.groupby("a", "b"))
self.assertRaises(TypeError, lambda: psdf.a.groupby(psdf.a, psdf.b))
# we can't use column name/names as a parameter `by` for `SeriesGroupBy`.
self.assertRaises(KeyError, lambda: psdf.a.groupby(by="a"))
self.assertRaises(KeyError, lambda: psdf.a.groupby(by=["a", "b"]))
self.assertRaises(KeyError, lambda: psdf.a.groupby(by=("a", "b")))
# we can't use DataFrame as a parameter `by` for `DataFrameGroupBy`/`SeriesGroupBy`.
self.assertRaises(ValueError, lambda: psdf.groupby(psdf))
self.assertRaises(ValueError, lambda: psdf.a.groupby(psdf))
self.assertRaises(ValueError, lambda: psdf.a.groupby((psdf,)))
# non-string names
pdf = pd.DataFrame(
{
10: [1, 2, 6, 4, 4, 6, 4, 3, 7],
20: [4, 2, 7, 3, 3, 1, 1, 1, 2],
30: [4, 2, 7, 3, None, 1, 1, 1, 2],
40: list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
psdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(10).reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby(10, as_index=as_index).sum()),
sort(pdf.groupby(10, as_index=as_index).sum()),
)
self.assert_eq(
sort(psdf.groupby(10, as_index=as_index)[20].sum()),
sort(pdf.groupby(10, as_index=as_index)[20].sum()),
)
self.assert_eq(
sort(psdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
sort(pdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
)
def test_groupby_multiindex_columns(self):
pdf = pd.DataFrame(
{
(10, "a"): [1, 2, 6, 4, 4, 6, 4, 3, 7],
(10, "b"): [4, 2, 7, 3, 3, 1, 1, 1, 2],
(20, "c"): [4, 2, 7, 3, None, 1, 1, 1, 2],
(30, "d"): list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby((10, "a")).sum().sort_index(), pdf.groupby((10, "a")).sum().sort_index()
)
self.assert_eq(
psdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
pdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
)
self.assert_eq(
psdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
pdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
)
# TODO: a pandas bug?
# expected = pdf.groupby((10, "a"))[(20, "c")].sum().sort_index()
expected = pd.Series(
[4.0, 2.0, 1.0, 4.0, 8.0, 2.0],
name=(20, "c"),
index=pd.Index([1, 2, 3, 4, 6, 7], name=(10, "a")),
)
self.assert_eq(psdf.groupby((10, "a"))[(20, "c")].sum().sort_index(), expected)
if (
LooseVersion(pd.__version__) >= LooseVersion("1.0.4")
and LooseVersion(pd.__version__) != LooseVersion("1.1.3")
and LooseVersion(pd.__version__) != LooseVersion("1.1.4")
):
self.assert_eq(
psdf[(20, "c")].groupby(psdf[(10, "a")]).sum().sort_index(),
pdf[(20, "c")].groupby(pdf[(10, "a")]).sum().sort_index(),
)
else:
# Due to pandas bugs resolved in 1.0.4, re-introduced in 1.1.3 and resolved in 1.1.5
self.assert_eq(psdf[(20, "c")].groupby(psdf[(10, "a")]).sum().sort_index(), expected)
def test_split_apply_combine_on_series(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
psdf = ps.from_pandas(pdf)
funcs = [
((True, False), ["sum", "min", "max", "count", "first", "last"]),
((True, True), ["mean"]),
((False, False), ["var", "std"]),
]
funcs = [(check_exact, almost, f) for (check_exact, almost), fs in funcs for f in fs]
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for check_exact, almost, func in funcs:
for kkey, pkey in [("b", "b"), (psdf.b, pdf.b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
if as_index is True or func != "std":
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
else:
# seems like a pandas' bug for as_index=False and func == "std"?
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index).a, func)()),
sort(pdf.groupby(pkey, as_index=True).a.std().reset_index()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index), func)()),
sort(pdf.groupby(pkey, as_index=True).std().reset_index()),
check_exact=check_exact,
almost=almost,
)
for kkey, pkey in [(psdf.b + 1, pdf.b + 1), (psdf.copy().b, pdf.copy().b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for i in [0, 4, 7]:
with self.subTest(as_index=as_index, func=func, i=i):
self.assert_eq(
sort(getattr(psdf.groupby(psdf.b > i, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(psdf.groupby(psdf.b > i, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for kkey, pkey in [
(psdf.b, pdf.b),
(psdf.b + 1, pdf.b + 1),
(psdf.copy().b, pdf.copy().b),
(psdf.b.rename(), pdf.b.rename()),
]:
with self.subTest(func=func, key=pkey):
self.assert_eq(
getattr(psdf.a.groupby(kkey), func)().sort_index(),
getattr(pdf.a.groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((psdf.a + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.a + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((psdf.b + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.b + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr(psdf.a.rename().groupby(kkey), func)().sort_index(),
getattr(pdf.a.rename().groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
def test_aggregate(self):
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
psdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for kkey, pkey in [("A", "A"), (psdf.A, pdf.A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
psdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
if as_index:
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
else:
# seems like a pandas' bug for as_index=False and func_or_funcs is list?
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=True).agg(["sum"]).reset_index()),
)
for kkey, pkey in [(psdf.A + 1, pdf.A + 1), (psdf.copy().A, pdf.copy().A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
psdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
expected_error_message = (
r"aggs must be a dict mapping from column name to aggregate functions "
r"\(string or list of strings\)."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.groupby("A", as_index=as_index).agg(0)
# multi-index columns
columns = pd.MultiIndex.from_tuples([(10, "A"), (10, "B"), (20, "C")])
pdf.columns = columns
psdf.columns = columns
for as_index in [True, False]:
stats_psdf = psdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
stats_pdf = pdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
self.assert_eq(
stats_psdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
stats_pdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
)
stats_psdf = psdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
stats_pdf = pdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
self.assert_eq(
stats_psdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
stats_pdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
)
# non-string names
pdf.columns = [10, 20, 30]
psdf.columns = [10, 20, 30]
for as_index in [True, False]:
stats_psdf = psdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
stats_pdf = pdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
self.assert_eq(
stats_psdf.sort_values(by=[20, 30]).reset_index(drop=True),
stats_pdf.sort_values(by=[20, 30]).reset_index(drop=True),
)
stats_psdf = psdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
stats_pdf = pdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
self.assert_eq(
stats_psdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
stats_pdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
)
def test_aggregate_func_str_list(self):
# this is test for cases where only string or list is assigned
pdf = pd.DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
psdf = ps.from_pandas(pdf)
agg_funcs = ["max", "min", ["min", "max"]]
for aggfunc in agg_funcs:
# Since in Koalas groupby, the order of rows might be different
# so sort on index to ensure they have same output
sorted_agg_psdf = psdf.groupby("kind").agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby("kind").agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_psdf, sorted_agg_pdf)
# test on multi index column case
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
for aggfunc in agg_funcs:
sorted_agg_psdf = psdf.groupby(("X", "A")).agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby(("X", "A")).agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_psdf, sorted_agg_pdf)
@unittest.skipIf(pd.__version__ < "0.25.0", "not supported before pandas 0.25.0")
def test_aggregate_relabel(self):
# this is to test named aggregation in groupby
pdf = pd.DataFrame({"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]})
psdf = ps.from_pandas(pdf)
# different agg column, same function
agg_pdf = pdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
agg_psdf = psdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
self.assert_eq(agg_pdf, agg_psdf)
# same agg column, different functions
agg_pdf = pdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
agg_psdf = psdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
self.assert_eq(agg_pdf, agg_psdf)
# test on NamedAgg
agg_pdf = (
pdf.groupby("group").agg(b_max=pd.NamedAgg(column="B", aggfunc="max")).sort_index()
)
agg_psdf = (
psdf.groupby("group").agg(b_max=ps.NamedAgg(column="B", aggfunc="max")).sort_index()
)
self.assert_eq(agg_psdf, agg_pdf)
# test on NamedAgg multi columns aggregation
agg_pdf = (
pdf.groupby("group")
.agg(
b_max=pd.NamedAgg(column="B", aggfunc="max"),
b_min=pd.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
agg_psdf = (
psdf.groupby("group")
.agg(
b_max=ps.NamedAgg(column="B", aggfunc="max"),
b_min=ps.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
self.assert_eq(agg_psdf, agg_pdf)
def test_dropna(self):
pdf = pd.DataFrame(
{"A": [None, 1, None, 1, 2], "B": [1, 2, 3, None, None], "C": [4, 5, 6, 7, None]}
)
psdf = ps.from_pandas(pdf)
# pd.DataFrame.groupby with dropna parameter is implemented since pandas 1.1.0
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index, dropna=dropna).std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).std()),
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
)
self.assert_eq(
sort(
psdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
sort(
pdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
)
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
psdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
sort(
pdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
sorted_stats_psdf = sort(
psdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
sorted_stats_pdf = sort(
pdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
self.assert_eq(sorted_stats_psdf, sorted_stats_pdf)
else:
# Testing dropna=True (pandas default behavior)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index, dropna=True)["B"].min()),
sort(pdf.groupby("A", as_index=as_index)["B"].min()),
)
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
psdf.groupby(["A", "B"], as_index=as_index, dropna=True).agg(
{"C": ["min", "std"]}
)
),
sort(pdf.groupby(["A", "B"], as_index=as_index).agg({"C": ["min", "std"]})),
almost=True,
)
# Testing dropna=False
index = pd.Index([1.0, 2.0, np.nan], name="A")
expected = pd.Series([2.0, np.nan, 1.0], index=index, name="B")
result = psdf.groupby("A", as_index=True, dropna=False)["B"].min().sort_index()
self.assert_eq(expected, result)
expected = pd.DataFrame({"A": [1.0, 2.0, np.nan], "B": [2.0, np.nan, 1.0]})
result = (
psdf.groupby("A", as_index=False, dropna=False)["B"]
.min()
.sort_values("A")
.reset_index(drop=True)
)
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[(1.0, 2.0), (1.0, None), (2.0, None), (None, 1.0), (None, 3.0)], names=["A", "B"]
)
expected = pd.DataFrame(
{
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
},
index=index,
)
result = (
psdf.groupby(["A", "B"], as_index=True, dropna=False)
.agg({"C": ["min", "std"]})
.sort_index()
)
self.assert_eq(expected, result)
expected = pd.DataFrame(
{
("A", ""): [1.0, 1.0, 2.0, np.nan, np.nan],
("B", ""): [2.0, np.nan, np.nan, 1.0, 3.0],
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
result = (
psdf.groupby(["A", "B"], as_index=False, dropna=False)
.agg({"C": ["min", "std"]})
.sort_values(["A", "B"])
.reset_index(drop=True)
)
self.assert_eq(expected, result)
def test_describe(self):
# support for numeric type, not support for string type yet
datas = []
datas.append({"a": [1, 1, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
datas.append({"a": [-1, -1, -3], "b": [-4, -5, -6], "c": [-7, -8, -9]})
datas.append({"a": [0, 0, 0], "b": [0, 0, 0], "c": [0, 8, 0]})
# it is okay if string type column as a group key
datas.append({"a": ["a", "a", "c"], "b": [4, 5, 6], "c": [7, 8, 9]})
percentiles = [0.25, 0.5, 0.75]
formatted_percentiles = ["25%", "50%", "75%"]
non_percentile_stats = ["count", "mean", "std", "min", "max"]
for data in datas:
pdf = pd.DataFrame(data)
psdf = ps.from_pandas(pdf)
describe_pdf = pdf.groupby("a").describe().sort_index()
describe_psdf = psdf.groupby("a").describe().sort_index()
# since the result of percentile columns are slightly difference from pandas,
# we should check them separately: non-percentile columns & percentile columns
# 1. Check that non-percentile columns are equal.
agg_cols = [col.name for col in psdf.groupby("a")._agg_columns]
self.assert_eq(
describe_psdf.drop(list(product(agg_cols, formatted_percentiles))),
describe_pdf.drop(columns=formatted_percentiles, level=1),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby("a").quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_psdf.drop(list(product(agg_cols, non_percentile_stats))),
quantile_pdf.rename(columns="{:.0%}".format, level=1),
)
# not support for string type yet
datas = []
datas.append({"a": ["a", "a", "c"], "b": ["d", "e", "f"], "c": ["g", "h", "i"]})
datas.append({"a": ["a", "a", "c"], "b": [4, 0, 1], "c": ["g", "h", "i"]})
for data in datas:
pdf = pd.DataFrame(data)
psdf = ps.from_pandas(pdf)
self.assertRaises(
NotImplementedError, lambda: psdf.groupby("a").describe().sort_index()
)
# multi-index columns
pdf = pd.DataFrame({("x", "a"): [1, 1, 3], ("x", "b"): [4, 5, 6], ("y", "c"): [7, 8, 9]})
psdf = ps.from_pandas(pdf)
describe_pdf = pdf.groupby(("x", "a")).describe().sort_index()
describe_psdf = psdf.groupby(("x", "a")).describe().sort_index()
# 1. Check that non-percentile columns are equal.
agg_column_labels = [col._column_label for col in psdf.groupby(("x", "a"))._agg_columns]
self.assert_eq(
describe_psdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, formatted_percentiles)
]
),
describe_pdf.drop(columns=formatted_percentiles, level=2),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby(("x", "a")).quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_psdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, non_percentile_stats)
]
),
quantile_pdf.rename(columns="{:.0%}".format, level=2),
)
def test_aggregate_relabel_multiindex(self):
pdf = pd.DataFrame({"A": [0, 1, 2, 3], "B": [5, 6, 7, 8], "group": ["a", "a", "b", "b"]})
pdf.columns = pd.MultiIndex.from_tuples([("y", "A"), ("y", "B"), ("x", "group")])
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = pdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
agg_psdf = psdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
self.assert_eq(agg_pdf, agg_psdf)
# same column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_psdf = (
psdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_psdf)
# different column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [6, 8], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_psdf = (
psdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_psdf)
def test_all_any(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
"B": [True, True, True, False, False, False, None, True, None, False],
}
)
psdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index).all()),
sort(pdf.groupby("A", as_index=as_index).all()),
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index).any()),
sort(pdf.groupby("A", as_index=as_index).any()),
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index).all()).B,
sort(pdf.groupby("A", as_index=as_index).all()).B,
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index).any()).B,
sort(pdf.groupby("A", as_index=as_index).any()).B,
)
self.assert_eq(
psdf.B.groupby(psdf.A).all().sort_index(), pdf.B.groupby(pdf.A).all().sort_index()
)
self.assert_eq(
psdf.B.groupby(psdf.A).any().sort_index(), pdf.B.groupby(pdf.A).any().sort_index()
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
psdf.columns = columns
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby(("X", "A"), as_index=as_index).all()),
sort(pdf.groupby(("X", "A"), as_index=as_index).all()),
)
self.assert_eq(
sort(psdf.groupby(("X", "A"), as_index=as_index).any()),
sort(pdf.groupby(("X", "A"), as_index=as_index).any()),
)
def test_raises(self):
psdf = ps.DataFrame(
{"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
# test raises with incorrect key
self.assertRaises(ValueError, lambda: psdf.groupby([]))
self.assertRaises(KeyError, lambda: psdf.groupby("x"))
self.assertRaises(KeyError, lambda: psdf.groupby(["a", "x"]))
self.assertRaises(KeyError, lambda: psdf.groupby("a")["x"])
self.assertRaises(KeyError, lambda: psdf.groupby("a")["b", "x"])
self.assertRaises(KeyError, lambda: psdf.groupby("a")[["b", "x"]])
def test_nunique(self):
pdf = pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("a").agg({"b": "nunique"}).sort_index(),
pdf.groupby("a").agg({"b": "nunique"}).sort_index(),
)
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ps.DataFrame({"b": [2, 2]}, index=pd.Index([0, 1], name="a"))
self.assert_eq(psdf.groupby("a").nunique().sort_index(), expected)
self.assert_eq(
psdf.groupby("a").nunique(dropna=False).sort_index(),
expected,
)
else:
self.assert_eq(
psdf.groupby("a").nunique().sort_index(), pdf.groupby("a").nunique().sort_index()
)
self.assert_eq(
psdf.groupby("a").nunique(dropna=False).sort_index(),
pdf.groupby("a").nunique(dropna=False).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].nunique().sort_index(),
pdf.groupby("a")["b"].nunique().sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
pdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
)
nunique_psdf = psdf.groupby("a", as_index=False).agg({"b": "nunique"})
nunique_pdf = pdf.groupby("a", as_index=False).agg({"b": "nunique"})
self.assert_eq(
nunique_psdf.sort_values(["a", "b"]).reset_index(drop=True),
nunique_pdf.sort_values(["a", "b"]).reset_index(drop=True),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ps.DataFrame({("y", "b"): [2, 2]}, index=pd.Index([0, 1], name=("x", "a")))
self.assert_eq(
psdf.groupby(("x", "a")).nunique().sort_index(),
expected,
)
self.assert_eq(
psdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
expected,
)
else:
self.assert_eq(
psdf.groupby(("x", "a")).nunique().sort_index(),
pdf.groupby(("x", "a")).nunique().sort_index(),
)
self.assert_eq(
psdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
pdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
)
def test_unique(self):
for pdf in [
pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
),
pd.DataFrame(
{
"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
"b": ["w", "w", "w", "x", "x", "y", "y", "z", "z", "z"],
}
),
]:
with self.subTest(pdf=pdf):
psdf = ps.from_pandas(pdf)
actual = psdf.groupby("a")["b"].unique().sort_index().to_pandas()
expect = pdf.groupby("a")["b"].unique().sort_index()
self.assert_eq(len(actual), len(expect))
for act, exp in zip(actual, expect):
self.assertTrue(sorted(act) == sorted(exp))
def test_value_counts(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]}, columns=["A", "B"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("A")["B"].value_counts().sort_index(),
pdf.groupby("A")["B"].value_counts().sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
)
self.assert_eq(
psdf.B.rename().groupby(psdf.A).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A).value_counts().sort_index(),
)
self.assert_eq(
psdf.B.groupby(psdf.A.rename()).value_counts().sort_index(),
pdf.B.groupby(pdf.A.rename()).value_counts().sort_index(),
)
self.assert_eq(
psdf.B.rename().groupby(psdf.A.rename()).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A.rename()).value_counts().sort_index(),
)
def test_size(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.groupby("A").size().sort_index(), pdf.groupby("A").size().sort_index())
self.assert_eq(
psdf.groupby("A")["B"].size().sort_index(), pdf.groupby("A")["B"].size().sort_index()
)
self.assert_eq(
psdf.groupby("A")[["B"]].size().sort_index(),
pdf.groupby("A")[["B"]].size().sort_index(),
)
self.assert_eq(
psdf.groupby(["A", "B"]).size().sort_index(),
pdf.groupby(["A", "B"]).size().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("X", "A")).size().sort_index(),
pdf.groupby(("X", "A")).size().sort_index(),
)
self.assert_eq(
psdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
pdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
)
def test_diff(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.groupby("b").diff().sort_index(), pdf.groupby("b").diff().sort_index())
self.assert_eq(
psdf.groupby(["a", "b"]).diff().sort_index(),
pdf.groupby(["a", "b"]).diff().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].diff().sort_index(),
pdf.groupby(["b"])["a"].diff().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
pdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).diff().sort_index(),
pdf.groupby(pdf.b // 5).diff().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].diff().sort_index(),
pdf.groupby(pdf.b // 5)["a"].diff().sort_index(),
)
self.assert_eq(psdf.groupby("b").diff().sum(), pdf.groupby("b").diff().sum().astype(int))
self.assert_eq(psdf.groupby(["b"])["a"].diff().sum(), pdf.groupby(["b"])["a"].diff().sum())
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).diff().sort_index(),
pdf.groupby(("x", "b")).diff().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
)
def test_rank(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.groupby("b").rank().sort_index(), pdf.groupby("b").rank().sort_index())
self.assert_eq(
psdf.groupby(["a", "b"]).rank().sort_index(),
pdf.groupby(["a", "b"]).rank().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].rank().sort_index(),
pdf.groupby(["b"])["a"].rank().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
pdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).rank().sort_index(),
pdf.groupby(pdf.b // 5).rank().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].rank().sort_index(),
pdf.groupby(pdf.b // 5)["a"].rank().sort_index(),
)
self.assert_eq(psdf.groupby("b").rank().sum(), pdf.groupby("b").rank().sum())
self.assert_eq(psdf.groupby(["b"])["a"].rank().sum(), pdf.groupby(["b"])["a"].rank().sum())
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).rank().sort_index(),
pdf.groupby(("x", "b")).rank().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
)
def test_cumcount(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
for ascending in [True, False]:
self.assert_eq(
psdf.groupby("b").cumcount(ascending=ascending).sort_index(),
pdf.groupby("b").cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
pdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby("b").cumcount(ascending=ascending).sum(),
pdf.groupby("b").cumcount(ascending=ascending).sum(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
for ascending in [True, False]:
self.assert_eq(
psdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
pdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
)
def test_cummin(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").cummin().sort_index(), pdf.groupby("b").cummin().sort_index()
)
self.assert_eq(
psdf.groupby(["a", "b"]).cummin().sort_index(),
pdf.groupby(["a", "b"]).cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].cummin().sort_index(),
pdf.groupby(["b"])["a"].cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).cummin().sort_index(),
pdf.groupby(pdf.b // 5).cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].cummin().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummin().sort_index(),
)
self.assert_eq(
psdf.groupby("b").cummin().sum().sort_index(),
pdf.groupby("b").cummin().sum().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b).cummin().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cummin().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummin().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummin().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).cummin().sort_index(),
pdf.groupby(("x", "b")).cummin().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
)
psdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"]).cummin())
psdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"])["B"].cummin())
def test_cummax(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").cummax().sort_index(), pdf.groupby("b").cummax().sort_index()
)
self.assert_eq(
psdf.groupby(["a", "b"]).cummax().sort_index(),
pdf.groupby(["a", "b"]).cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].cummax().sort_index(),
pdf.groupby(["b"])["a"].cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).cummax().sort_index(),
pdf.groupby(pdf.b // 5).cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].cummax().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummax().sort_index(),
)
self.assert_eq(
psdf.groupby("b").cummax().sum().sort_index(),
pdf.groupby("b").cummax().sum().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b).cummax().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cummax().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummax().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummax().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).cummax().sort_index(),
pdf.groupby(("x", "b")).cummax().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
)
psdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"]).cummax())
psdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"])["B"].cummax())
def test_cumsum(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").cumsum().sort_index(), pdf.groupby("b").cumsum().sort_index()
)
self.assert_eq(
psdf.groupby(["a", "b"]).cumsum().sort_index(),
pdf.groupby(["a", "b"]).cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].cumsum().sort_index(),
pdf.groupby(["b"])["a"].cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).cumsum().sort_index(),
pdf.groupby(pdf.b // 5).cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].cumsum().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby("b").cumsum().sum().sort_index(),
pdf.groupby("b").cumsum().sum().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b).cumsum().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cumsum().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumsum().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumsum().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).cumsum().sort_index(),
pdf.groupby(("x", "b")).cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
)
psdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"]).cumsum())
psdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"])["B"].cumsum())
def test_cumprod(self):
pdf = pd.DataFrame(
{
"a": [1, 2, -3, 4, -5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 0, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").cumprod().sort_index(),
pdf.groupby("b").cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(["a", "b"]).cumprod().sort_index(),
pdf.groupby(["a", "b"]).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(["b"])["a"].cumprod().sort_index(),
pdf.groupby(["b"])["a"].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(psdf.b // 3).cumprod().sort_index(),
pdf.groupby(pdf.b // 3).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(psdf.b // 3)["a"].cumprod().sort_index(),
pdf.groupby(pdf.b // 3)["a"].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby("b").cumprod().sum().sort_index(),
pdf.groupby("b").cumprod().sum().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cumprod().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumprod().sort_index(),
check_exact=False,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).cumprod().sort_index(),
pdf.groupby(("x", "b")).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
check_exact=False,
)
psdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"]).cumprod())
psdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"])["B"].cumprod())
def test_nsmallest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
pdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
)
self.assert_eq(
psdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
pdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
)
self.assert_eq(
(psdf.b * 10).groupby(psdf.a).nsmallest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
psdf.b.rename().groupby(psdf.a).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
psdf.b.groupby(psdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
self.assert_eq(
psdf.b.rename().groupby(psdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nsmallest do not support multi-index now"):
psdf.set_index(["a", "b"]).groupby(["c"])["d"].nsmallest(1)
def test_nlargest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby(["a"])["b"].nlargest(1).sort_values(),
pdf.groupby(["a"])["b"].nlargest(1).sort_values(),
)
self.assert_eq(
psdf.groupby(["a"])["b"].nlargest(2).sort_index(),
pdf.groupby(["a"])["b"].nlargest(2).sort_index(),
)
self.assert_eq(
(psdf.b * 10).groupby(psdf.a).nlargest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
psdf.b.rename().groupby(psdf.a).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
psdf.b.groupby(psdf.a.rename()).nlargest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
self.assert_eq(
psdf.b.rename().groupby(psdf.a.rename()).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nlargest do not support multi-index now"):
psdf.set_index(["a", "b"]).groupby(["c"])["d"].nlargest(1)
def test_fillna(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("A").fillna(0).sort_index(), pdf.groupby("A").fillna(0).sort_index()
)
self.assert_eq(
psdf.groupby("A")["C"].fillna(0).sort_index(),
pdf.groupby("A")["C"].fillna(0).sort_index(),
)
self.assert_eq(
psdf.groupby("A")[["C"]].fillna(0).sort_index(),
pdf.groupby("A")[["C"]].fillna(0).sort_index(),
)
self.assert_eq(
psdf.groupby("A").fillna(method="bfill").sort_index(),
pdf.groupby("A").fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
pdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby("A").fillna(method="ffill").sort_index(),
pdf.groupby("A").fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
pdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5).fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5)["C"].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5).fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5)["C"].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.C.rename().groupby(psdf.A).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A).fillna(0).sort_index(),
)
self.assert_eq(
psdf.C.groupby(psdf.A.rename()).fillna(0).sort_index(),
pdf.C.groupby(pdf.A.rename()).fillna(0).sort_index(),
)
self.assert_eq(
psdf.C.rename().groupby(psdf.A.rename()).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A.rename()).fillna(0).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("X", "A")).fillna(0).sort_index(),
pdf.groupby(("X", "A")).fillna(0).sort_index(),
)
self.assert_eq(
psdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
)
def test_ffill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
psdf.groupby("A").ffill().sort_index(),
pdf.groupby("A").ffill().sort_index().drop("A", 1),
)
self.assert_eq(
psdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
psdf.groupby("A").ffill().sort_index(), pdf.groupby("A").ffill().sort_index()
)
self.assert_eq(
psdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].ffill().sort_index(), pdf.groupby("A")["B"].ffill().sort_index()
)
self.assert_eq(
psdf.groupby("A")["B"].ffill()[idx[6]], pdf.groupby("A")["B"].ffill()[idx[6]]
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
psdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
psdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index(),
)
def test_bfill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
psdf.groupby("A").bfill().sort_index(),
pdf.groupby("A").bfill().sort_index().drop("A", 1),
)
self.assert_eq(
psdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
psdf.groupby("A").bfill().sort_index(), pdf.groupby("A").bfill().sort_index()
)
self.assert_eq(
psdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].bfill().sort_index(),
pdf.groupby("A")["B"].bfill().sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].bfill()[idx[6]], pdf.groupby("A")["B"].bfill()[idx[6]]
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
psdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
psdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index(),
)
@unittest.skipIf(pd.__version__ < "0.24.0", "not supported before pandas 0.24.0")
def test_shift(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 2, 2, 3, 3] * 3,
"b": [1, 1, 2, 2, 3, 4] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("a").shift().sort_index(), pdf.groupby("a").shift().sort_index()
)
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(psdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index(),
# pdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index())
self.assert_eq(
psdf.groupby(["b"])["a"].shift().sort_index(),
pdf.groupby(["b"])["a"].shift().sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"])["c"].shift().sort_index(),
pdf.groupby(["a", "b"])["c"].shift().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).shift().sort_index(),
pdf.groupby(pdf.b // 5).shift().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].shift().sort_index(),
pdf.groupby(pdf.b // 5)["a"].shift().sort_index(),
)
# TODO: known pandas' bug when fill_value is not None pandas>=1.0.0
# https://github.com/pandas-dev/pandas/issues/31971#issue-565171762
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
pdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).shift().sort_index(),
pdf.a.rename().groupby(pdf.b).shift().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).shift().sort_index(),
pdf.a.groupby(pdf.b.rename()).shift().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).shift().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).shift().sort_index(),
)
self.assert_eq(psdf.groupby("a").shift().sum(), pdf.groupby("a").shift().sum().astype(int))
self.assert_eq(
psdf.a.rename().groupby(psdf.b).shift().sum(),
pdf.a.rename().groupby(pdf.b).shift().sum(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "a")).shift().sort_index(),
pdf.groupby(("x", "a")).shift().sort_index(),
)
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(psdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index(),
# pdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index())
def test_apply(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(len).sort_index(),
pdf.groupby("b").apply(len).sort_index(),
)
self.assert_eq(
psdf.groupby("b")["a"]
.apply(lambda x, y, z: x + x.min() + y * z, 10, z=20)
.sort_index(),
pdf.groupby("b")["a"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),
)
self.assert_eq(
psdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"])
.apply(lambda x, y, z: x + x.min() + y + z, 1, z=2)
.sort_index(),
pdf.groupby(["a", "b"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
pdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["c"].apply(len).sort_index(),
pdf.groupby(["b"])["c"].apply(len).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)[["a"]].apply(len).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(len).sort_index(),
almost=True,
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
psdf.groupby("b").apply(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
pdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(("x", "b")).apply(len).sort_index(),
pdf.groupby(("x", "b")).apply(len).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
)
def test_apply_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply()
def test_apply_negative(self):
def func(_) -> ps.Series[int]:
return pd.Series([1])
with self.assertRaisesRegex(TypeError, "Series as a return type hint at frame groupby"):
ps.range(10).groupby("id").apply(func)
def test_apply_with_new_dataframe(self):
pdf = pd.DataFrame(
{"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5], "car_id": ["A", "A", "A", "B", "B"]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
psdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
# dataframe with 1000+ records
pdf = pd.DataFrame(
{
"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5] * 300,
"car_id": ["A", "A", "A", "B", "B"] * 300,
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
psdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
def test_apply_with_new_dataframe_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply_with_new_dataframe()
def test_apply_key_handling(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
with ps.option_context("compute.shortcut_limit", 1):
self.assert_eq(
psdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
def test_apply_with_side_effect(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
psdf = ps.from_pandas(pdf)
acc = ps.utils.default_session().sparkContext.accumulator(0)
def sum_with_acc_frame(x) -> ps.DataFrame[np.float64, np.float64]:
nonlocal acc
acc += 1
return np.sum(x)
actual = psdf.groupby("d").apply(sum_with_acc_frame).sort_index()
actual.columns = ["d", "v"]
self.assert_eq(actual, pdf.groupby("d").apply(sum).sort_index().reset_index(drop=True))
self.assert_eq(acc.value, 2)
def sum_with_acc_series(x) -> np.float64:
nonlocal acc
acc += 1
return np.sum(x)
self.assert_eq(
psdf.groupby("d")["v"].apply(sum_with_acc_series).sort_index(),
pdf.groupby("d")["v"].apply(sum).sort_index().reset_index(drop=True),
)
self.assert_eq(acc.value, 4)
def test_transform(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
)
def test_transform_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_transform()
def test_filter(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
psdf.groupby("b").filter(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
pdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
pdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
)
def test_idxmax(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmax().sort_index(), psdf.groupby(["a"]).idxmax().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
psdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmax().sort_index(),
psdf.groupby(["a"])["b"].idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmax().sort_index(),
psdf.b.rename().groupby(psdf.a).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmax().sort_index(),
psdf.b.groupby(psdf.a.rename()).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmax().sort_index(),
psdf.b.rename().groupby(psdf.a.rename()).idxmax().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmax only support one-level index now"):
psdf.set_index(["a", "b"]).groupby(["c"]).idxmax()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmax().sort_index(),
psdf.groupby(("x", "a")).idxmax().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
psdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
)
def test_idxmin(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmin().sort_index(), psdf.groupby(["a"]).idxmin().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
psdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmin().sort_index(),
psdf.groupby(["a"])["b"].idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmin().sort_index(),
psdf.b.rename().groupby(psdf.a).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmin().sort_index(),
psdf.b.groupby(psdf.a.rename()).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmin().sort_index(),
psdf.b.rename().groupby(psdf.a.rename()).idxmin().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmin only support one-level index now"):
psdf.set_index(["a", "b"]).groupby(["c"]).idxmin()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmin().sort_index(),
psdf.groupby(("x", "a")).idxmin().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
psdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
)
def test_head(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby("a").head(2).sort_index(), psdf.groupby("a").head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), psdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), psdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), psdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(),
psdf.groupby("a")["b"].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
psdf.groupby("a")["b"].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(2).sort_index(),
psdf.groupby("a")[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(-2).sort_index(),
psdf.groupby("a")[["b"]].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(100000).sort_index(),
psdf.groupby("a")[["b"]].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2).head(2).sort_index(),
psdf.groupby(psdf.a // 2).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)["b"].head(2).sort_index(),
psdf.groupby(psdf.a // 2)["b"].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)[["b"]].head(2).sort_index(),
psdf.groupby(psdf.a // 2)[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).head(2).sort_index(),
psdf.b.rename().groupby(psdf.a).head(2).sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).head(2).sort_index(),
psdf.b.groupby(psdf.a.rename()).head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).head(2).sort_index(),
psdf.b.rename().groupby(psdf.a.rename()).head(2).sort_index(),
)
# multi-index
midx = pd.MultiIndex(
[["x", "y"], ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]],
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
)
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],
},
columns=["a", "b", "c"],
index=midx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby("a").head(2).sort_index(), psdf.groupby("a").head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), psdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), psdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), psdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(),
psdf.groupby("a")["b"].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
psdf.groupby("a")["b"].head(100000).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).head(2).sort_index(),
psdf.groupby(("x", "a")).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(-2).sort_index(),
psdf.groupby(("x", "a")).head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(100000).sort_index(),
psdf.groupby(("x", "a")).head(100000).sort_index(),
)
def test_missing(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# DataFrameGroupBy functions
missing_functions = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a"), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(psdf.groupby("a"), name)()
# SeriesGroupBy functions
missing_functions = inspect.getmembers(MissingPandasLikeSeriesGroupBy, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(psdf.a.groupby(psdf.a), name)()
# DataFrameGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a"), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(psdf.groupby("a"), name)
# SeriesGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeSeriesGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(psdf.a.groupby(psdf.a), name)
@staticmethod
def test_is_multi_agg_with_relabel():
assert is_multi_agg_with_relabel(a="max") is False
assert is_multi_agg_with_relabel(a_min=("a", "max"), a_max=("a", "min")) is True
def test_get_group(self):
pdf = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("class").get_group("bird"),
pdf.groupby("class").get_group("bird"),
)
self.assert_eq(
psdf.groupby("class")["name"].get_group("mammal"),
pdf.groupby("class")["name"].get_group("mammal"),
)
self.assert_eq(
psdf.groupby("class")[["name"]].get_group("mammal"),
pdf.groupby("class")[["name"]].get_group("mammal"),
)
self.assert_eq(
psdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
pdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
)
self.assert_eq(
psdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
)
self.assert_eq(
psdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
)
self.assert_eq(
(psdf.max_speed + 1).groupby(psdf["class"]).get_group("mammal"),
(pdf.max_speed + 1).groupby(pdf["class"]).get_group("mammal"),
)
self.assert_eq(
psdf.groupby("max_speed").get_group(80.5),
pdf.groupby("max_speed").get_group(80.5),
)
self.assertRaises(KeyError, lambda: psdf.groupby("class").get_group("fish"))
self.assertRaises(TypeError, lambda: psdf.groupby("class").get_group(["bird", "mammal"]))
self.assertRaises(KeyError, lambda: psdf.groupby("class")["name"].get_group("fish"))
self.assertRaises(
TypeError, lambda: psdf.groupby("class")["name"].get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: psdf.groupby(["class", "name"]).get_group(("lion", "mammal"))
)
self.assertRaises(ValueError, lambda: psdf.groupby(["class", "name"]).get_group(("lion",)))
self.assertRaises(
ValueError, lambda: psdf.groupby(["class", "name"]).get_group(("mammal",))
)
self.assertRaises(ValueError, lambda: psdf.groupby(["class", "name"]).get_group("mammal"))
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("A", "name"), ("B", "class"), ("C", "max_speed")])
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby(("B", "class")).get_group("bird"),
pdf.groupby(("B", "class")).get_group("bird"),
)
self.assert_eq(
psdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
pdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
)
self.assert_eq(
psdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
pdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
)
self.assert_eq(
psdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
pdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
)
self.assert_eq(
(psdf[("C", "max_speed")] + 1).groupby(psdf[("B", "class")]).get_group("mammal"),
(pdf[("C", "max_speed")] + 1).groupby(pdf[("B", "class")]).get_group("mammal"),
)
self.assert_eq(
psdf.groupby(("C", "max_speed")).get_group(80.5),
pdf.groupby(("C", "max_speed")).get_group(80.5),
)
self.assertRaises(KeyError, lambda: psdf.groupby(("B", "class")).get_group("fish"))
self.assertRaises(
TypeError, lambda: psdf.groupby(("B", "class")).get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: psdf.groupby(("B", "class"))[("A", "name")].get_group("fish")
)
self.assertRaises(
KeyError,
lambda: psdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion", "mammal")),
)
self.assertRaises(
ValueError,
lambda: psdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion",)),
)
self.assertRaises(
ValueError, lambda: psdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal",))
)
self.assertRaises(
ValueError, lambda: psdf.groupby([("B", "class"), ("A", "name")]).get_group("mammal")
)
def test_median(self):
psdf = ps.DataFrame(
{
"a": [1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0],
"b": [2.0, 3.0, 1.0, 4.0, 6.0, 9.0, 8.0, 10.0, 7.0, 5.0],
"c": [3.0, 5.0, 2.0, 5.0, 1.0, 2.0, 6.0, 4.0, 3.0, 6.0],
},
columns=["a", "b", "c"],
index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6],
)
# DataFrame
expected_result = ps.DataFrame(
{"b": [2.0, 8.0, 7.0], "c": [3.0, 2.0, 4.0]}, index=pd.Index([1.0, 2.0, 3.0], name="a")
)
self.assert_eq(expected_result, psdf.groupby("a").median().sort_index())
# Series
expected_result = ps.Series(
[2.0, 8.0, 7.0], name="b", index=pd.Index([1.0, 2.0, 3.0], name="a")
)
self.assert_eq(expected_result, psdf.groupby("a")["b"].median().sort_index())
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
psdf.groupby("a").median(accuracy="a")
def test_tail(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby("a").tail(2).sort_index(), psdf.groupby("a").tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(-2).sort_index(), psdf.groupby("a").tail(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(100000).sort_index(), psdf.groupby("a").tail(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(2).sort_index(), psdf.groupby("a")["b"].tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(-2).sort_index(),
psdf.groupby("a")["b"].tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")["b"].tail(100000).sort_index(),
psdf.groupby("a")["b"].tail(100000).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(2).sort_index(),
psdf.groupby("a")[["b"]].tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(-2).sort_index(),
psdf.groupby("a")[["b"]].tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(100000).sort_index(),
psdf.groupby("a")[["b"]].tail(100000).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2).tail(2).sort_index(),
psdf.groupby(psdf.a // 2).tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)["b"].tail(2).sort_index(),
psdf.groupby(psdf.a // 2)["b"].tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)[["b"]].tail(2).sort_index(),
psdf.groupby(psdf.a // 2)[["b"]].tail(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).tail(2).sort_index(),
psdf.b.rename().groupby(psdf.a).tail(2).sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).tail(2).sort_index(),
psdf.b.groupby(psdf.a.rename()).tail(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).tail(2).sort_index(),
psdf.b.rename().groupby(psdf.a.rename()).tail(2).sort_index(),
)
# multi-index
midx = pd.MultiIndex(
[["x", "y"], ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]],
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
)
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],
},
columns=["a", "b", "c"],
index=midx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby("a").tail(2).sort_index(), psdf.groupby("a").tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(-2).sort_index(), psdf.groupby("a").tail(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(100000).sort_index(), psdf.groupby("a").tail(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(2).sort_index(), psdf.groupby("a")["b"].tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(-2).sort_index(),
psdf.groupby("a")["b"].tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")["b"].tail(100000).sort_index(),
psdf.groupby("a")["b"].tail(100000).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).tail(2).sort_index(),
psdf.groupby(("x", "a")).tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).tail(-2).sort_index(),
psdf.groupby(("x", "a")).tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).tail(100000).sort_index(),
psdf.groupby(("x", "a")).tail(100000).sort_index(),
)
def test_ddof(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
psdf = ps.from_pandas(pdf)
for ddof in (0, 1):
# std
self.assert_eq(
pdf.groupby("a").std(ddof=ddof).sort_index(),
psdf.groupby("a").std(ddof=ddof).sort_index(),
check_exact=False,
)
self.assert_eq(
pdf.groupby("a")["b"].std(ddof=ddof).sort_index(),
psdf.groupby("a")["b"].std(ddof=ddof).sort_index(),
check_exact=False,
)
# var
self.assert_eq(
pdf.groupby("a").var(ddof=ddof).sort_index(),
psdf.groupby("a").var(ddof=ddof).sort_index(),
check_exact=False,
)
self.assert_eq(
pdf.groupby("a")["b"].var(ddof=ddof).sort_index(),
psdf.groupby("a")["b"].var(ddof=ddof).sort_index(),
check_exact=False,
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_groupby import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
AndrewBMartin/pygurobi | pygurobi/pygurobi.py | 1 | 31972 | """
Functions to support rapid interactive modification of Gurobi models.
For reference on Gurobi objects such as Models, Variables, and Constraints, see
http://www.gurobi.com/documentation/7.0/refman/py_python_api_overview.html.
"""
import csv
import json
try:
import gurobipy as gp
except ImportError:
raise ImportError("gurobipy not installed. Please see {0} to download".format(
"https://www.gurobi.com/documentation/6.5/quickstart_mac/the_gurobi_python_interfac.html"))
# Assuming that constraints are of the form:
# constraintName(index1,index2,...,indexN).
# Asuming that variables are of the form:
# variableName[index1,index2,...,indexN]
CON_BRACKET_L = "("
CON_BRACKET_R = ")"
VAR_BRACKET_L = "["
VAR_BRACKET_R = "]"
# 13 July 2016 - Need to sort out capitalization here for attributes
# Attributes of a Gurobi variable
VAR_ATTRS = ["LB", "UB", "Obj", "VType", "VarName", "X", "Xn", "RC",
"BarX", "Start", "VarHintVal", "VarHintPri", "BranchPriority",
"VBasis", "PStart", "IISLB", "IISUB", "PWLObjCvx",
"SAObjLow", "SAObjUp", "SALBLow", "SALBUp",
"SAUBLow", "SAUBUp", "UnbdRay"]
# Attributes of a Gurobi constraint
CON_ATTRS = ["Sense", "RHS", "ConstrName", "Pi", "Slack",
"CBasis", "DStart", "Lazy", "IISConstr",
"SARHSLow", "SARHSUp", "FarkasDual"]
def read_model(filename):
"""
Read a model using gurobipy.
"""
m = gp.read(filename)
return m
def reoptimize(m):
"""
Update, reset, and optimize
a model.
"""
m.update()
m.reset()
m.optimize()
def get_variable_attrs():
"""
Return a list of variable attributes.
Details of attributes found at the Gurobi
website:
http://www.gurobi.com/documentation/6.5/refman/attributes.html
"""
return VAR_ATTRS
def get_constraint_attrs():
"""
Return a list of constraint attributes.
Details of attributes found at the Gurobi
website:
http://www.gurobi.com/documentation/6.5/refman/attributes.html
"""
return CON_ATTRS
def list_constraints(model):
"""
Print to screen the constraint sets in the model.
Show the name of each constraint set along with the
number of constraints in that set.
A constraint set is composed of all constraints
sharing the same string identifier before the indices:
A(2,3,4) and A(1,2,3) are in the same constraint set, A;
A(2,3,4) and B(2,3,4) are in constraint sets A and B, respectively
"""
sets = {}
constraints = model.getConstrs()
# Assuming constraint set name separated from indicies by
for c in constraints:
name = c.constrName
split_name = name.split(CON_BRACKET_L)
set_name = split_name[0]
if set_name not in sets:
sets[set_name] = 1
else:
sets[set_name] += 1
print "Constraint set, Number of constraints"
print "\n".join(["{0}, {1}".format(name, number) for name, number
in sorted(sets.items())])
def list_variables(model):
"""
Print to screen the variable sets in the model.
Show the name of each variable set along with the
number of variables in that set.
A variable set is composed of all variables
sharing the same string identifier before the indices:
A[2,3,4] and A[1,2,3] are in the same variable set, A;
A[2,3,4] and B[2,3,4] are in variable sets A and B, respectively
"""
sets = {}
variables = model.getVars()
# Assuming constraint set name separated from indicies by
for v in variables:
name = v.varName
split_name = name.split(VAR_BRACKET_L)
set_name = split_name[0]
if set_name not in sets:
sets[set_name] = 1
else:
sets[set_name] += 1
print "Variable set, Number of variables"
print "\n".join(["{0}, {1}".format(name, number) for name, number
in sorted(sets.items())])
def get_variables(model, name="", approx=False, filter_values={}, exclude=False):
"""
Return a list of variables from the model
selected by variable set name.
A variable set is composed of all variables
sharing the same string identifier before the indices:
A[2,3,4] and A[1,2,3] are in the same variable set, A;
A[2,3,4] and B[2,3,4] are in varaible sets A and B, respectively
PyGurobi by default assumes that *variable names* are separated
from indices by square brackets "[" and "]",
For example, variables look like x[i,j] - "x" in the variable set name,
and "i" and "j" and the variable's index values.
See the source code for more details.
"""
variables = []
if not name:
variables = model.getVars()
if not approx:
variables = [v for v in model.getVars()
if v.varName.split(VAR_BRACKET_L)[0] == name]
else:
variables = [v for v in model.getVars()
if name in v.varName.split(VAR_BRACKET_L)[0]]
if filter_values:
variables = filter_variables(variables, filter_values,
exclude=exclude)
return variables
def check_attr(attr, attributes):
"""
Check if the attr string case-insensitively corresponds to a
Gurobi attribute.
"""
for a in attributes:
if attr == a:
return True
if attr.lower() == a.lower():
return True
return False
def check_variable_attr(attr):
"""
Check if a string corresponds to a variable attribute.
Case-insensitive.
"""
var_attrs = get_variable_attrs()
return check_attr(attr, var_attrs)
def check_constraint_attr(attr):
"""
Check if a string corresponds to a constraint attribute.
Attributes are case-insensitive.
"""
con_attrs = get_constraint_attrs()
return check_attr(attr, con_attrs)
def get_variables_attr(attr, model="", name="", variables=""):
"""
Return a dictionary of variables names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of variables
"""
if not attr:
raise AttributeError("No attributes specified")
if not check_variable_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
# Make a list of attributes at the top and check against
# them to make sure that the specified attribute belongs.
if not model and not variables:
raise ValueError("No model or variable list given")
variables = variables_check(model, name, variables)
return {v.varName: getattr(v, attr) for v in variables}
def print_variables_attr(attr, model="", name="", variables=""):
"""
Print to screen a dictionary of variables names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of variables
"""
var_dict = get_variables_attr(attr, model=model,
name=name, variables=variables)
print "\n".join(["{0}, {1}".format(v, k) for v, k in
sorted(var_dict.items())])
def set_variables_attr(attr, val, model="", name="", variables=""):
"""
Set an attribute of a model variable set.
Specifiy either model and name parameters or supply a list of variables
"""
if not attr or not val:
raise AttributeError("No attribute or value specified")
return
if not check_variable_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
if not model and not variables:
raise ValueError("No model or variables specified")
variables = variables_check(model, name, variables)
for v in variables:
setattr(v, attr, val)
def zero_all_objective_coeffs(model):
"""
Set all objective coefficients in a model to zero.
"""
if not model:
raise ValueError("No model given")
for v in model.getVars():
v.Obj = 0
def set_variables_bounds(lb="", ub="", model="", name="", variables=""):
"""
Set the lower bound and/or upper bound for a variables set.
Specifiy either model and name parameters or supply a list of variables
"""
if lb:
set_variables_attr("lb", val=lb, model=model,
name=name, variables=variables)
if ub:
set_variables_attr("ub", val=ub, model=model,
name=name, variables=variables)
def remove_variables_from_model(model, name="", variables=""):
"""
Remove the given variables from the model.
Specifiy either model and name parameters or supply a list of constraints
"""
if not model and not variables:
raise ValueError("No model or variables given")
if not model:
raise ValueError("No model given")
variables = variables_check(model, name, variables)
for v in variables:
model.remove(v)
def variables_check(model, name, variables):
"""
Return the appropriate
variables based on the information supplied.
"""
if variables:
return variables
if model and name:
variables = get_variables(model, name)
if model and not name:
variables = model.getVars()
if not variables:
print "No variables found for\nmodel: {0},\nname: {1}".format(
model, name)
return variables
def get_variable_index_value(variable, index):
"""
Return the value of the given index
for a given variable.
Variable names are assumed to be given
as A[a,c,d, ....,f]
"""
value = variable.varName.split(",")[index].strip()
if VAR_BRACKET_R in value:
value = value[:-1]
elif VAR_BRACKET_L in value:
value = value.split(VAR_BRACKET_L)[1]
# Not expecting many variable index values to
# to be floats
if value.isdigit:
try:
value = int(value)
except ValueError:
pass
return value
def get_linexp_from_variables(variables):
"""
Return a linear expression from the supplied list
of variables.
"""
linexp = gp.LinExpr()
for v in variables:
linexp += v
return linexp
def sum_variables_by_index(index, model="", name="", variables=""):
"""
Return a dictionary mapping index values to the sum
of the solution values of all matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
var_dict = get_variables_by_index(index, model=model, name=name,
variables=variables)
if not var_dict:
raise ValueError("No variables found".format(index))
new_dict = {index_name: sum([v.X for v in index_vars])
for index_name, index_vars in
sorted(var_dict.items())}
return new_dict
def print_dict(dictionary):
"""
Print a dictionary to screen.
"""
print "\n".join(["{0}, {1}".format(index_name, index_value)
for index_name, index_value in
sorted(dictionary.items())])
def print_variables_sum_by_index(index, model="", name="", variables=""):
"""
Print a dictionary of variables, summed by index.
"""
var_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
print_dict(var_dict)
def get_variables_by_index(index, model="", name="", variables=""):
"""
Return a dictionary mapping index values to lists of
matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
if index != 0 and not index:
raise IndexError("No index given")
if not model and not variables:
raise ValueError("No model or variables given")
if not (name and model) and not variables:
raise ValueError("No variables specified")
variables = variables_check(model, name, variables)
var_dict = {}
for v in variables:
value = get_variable_index_value(v, index)
if value not in var_dict:
var_dict[value] = [v]
else:
var_dict[value].append(v)
return var_dict
def filter_variables(variables, filter_values, exclude=False):
"""
Return a new list of variables that match the filter values
from the given variables list.
"""
if not variables:
raise ValueError("variables not given")
if not filter_values:
raise ValueError("Dictionary of filter values not given")
new_vars = []
for v in variables:
add = True
for index, value in filter_values.iteritems():
key = get_variable_index_value(v, index)
if key != value:
add = False
break
if add:
new_vars.append(v)
if exclude:
new_vars = [v for v in (set(variables)-set(new_vars))]
return new_vars
def get_variables_by_index_values(model, name, index_values, exclude=False):
variables = get_variables(model, name, index_values, exclude)
return variables
def get_variables_by_two_indices(index1, index2, model="", name="", variables=""):
"""
Return a dictionary of variables mapping index1 values
to dictionaries mapping
index2 values to matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
two_indices_dict = {}
index1_dict = get_variables_by_index(index1, model=model, name=name,
variables=variables)
for key, value in index1_dict.iteritems():
two_indices_dict[key] = get_variables_by_index(index2, variables=value)
return two_indices_dict
def print_variables(variables):
"""
Print a list of variables to look good.
"""
print "\n".join([v.varName for v in variables])
def sum_variables_by_two_indices(index1, index2, model="", name="", variables=""):
"""
Return a dictionary mapping index1 values
to dictionaries of the given variables summed over index2.
"""
two_indices_dict = get_variables_by_two_indices(index1, index2,
model=model, name=name, variables=variables)
if not two_indices_dict:
raise ValueError("Inputs did not match with model variables")
new_dict = {}
for key, var_dict in two_indices_dict.iteritems():
new_dict[key] = {index_name: sum([v.X for v in index_vars])
for index_name, index_vars in
sorted(var_dict.items())}
return new_dict
def print_two_indices_dict(indices_dict):
"""
Print to screen a two level nested dictionary.
"""
for key, value in indices_dict.iteritems():
print "\n{0}".format(key)
print_dict(value)
def get_linexp_by_index(index, model="", name="", variables=""):
"""
Return a dictionary of index values to Gurobi linear expressions
corresponding to the summation of variables that match the index
value for the given index number.
Specifiy either model and name parameters or supply a list of variables.
"""
linexps = {}
variables = variables_check(model, name, variables)
for v in variables:
value = get_variable_index_value(v, index)
if value not in linexps:
linexps[value] = gp.LinExpr(v)
else:
linexps[value] += v
return linexps
def print_constraints(constraints):
"""
Print constraints in an aesthetically pleasing way.
"""
print "\n".join([c.constrName for c in constraints])
def get_constraints_multiple(model, names_list, approx=False):
"""
Return a list of constraints given by the constraint
set names in names_list.
"""
cons_list = []
for name in names_list:
cons_list.extend(get_constraints(model, name, approx))
return cons_list
def filter_constraints(constraints, filter_values, exclude=False):
"""
Return a new list of constraints that match the filter values from
the given constraints list.
"""
if not constraints:
raise ValueError("constraints not given")
if not filter_values:
raise ValueError("Dictionary of filter values not given")
new_cons = []
for c in constraints:
add = True
for index, value in filter_values.iteritems():
key = get_constraint_index_value(c, index)
try:
key.replace('"', "")
except AttributeError:
pass
if key != value:
add = False
break
if add:
new_cons.append(c)
if exclude:
# May want to add sorting by varName here
new_cons = [c for c in (set(constraints)-set(new_cons))]
return new_cons
def get_constraints(model, name="", approx=False, filter_values={},
exclude=False):
"""
Return a list of constraints from the model
selected by constraint set name.
A constraint set is composed of all constraints
sharing the same string identifier before the indices:
A(2,3,4) and A(1,2,3) are in the same constraint set, A;
A(2,3,4) and B(2,3,4) are in constraint sets A and B, respectively
PyGurobi by default assumes that constraint set names are
separated from indices by round brackets
"(" and ")". For example, constraints look like env(r,t) - where "env"
in the constraint set name
and "r" and "t" are the index values. See the source for more details.
"""
if not name:
return model.getConstrs()
constraints = []
if not approx:
constraints = [c for c in model.getConstrs()
if c.constrName.split(CON_BRACKET_L)[0] == name]
else:
constraints = [c for c in model.getConstrs()
if name in c.constrName.split(CON_BRACKET_L)[0]]
if filter_values:
constraints = filter_constraints(constraints, filter_values, exclude)
return constraints
def constraints_check(model, name, constraints):
"""
Check to see whether the user specified a list
of constraints or expects them to be retrieved
from the model.
"""
if constraints:
return constraints
if model and name:
constraints = get_constraints(model, name)
elif model and not name:
constraints = model.getConstrs()
return constraints
def get_constraints_attr(attr, model="", name="", constraints=""):
"""
Return a dictionary of constraint names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of constraints
"""
if not attr:
raise AttributeError("No attributes specified")
if not check_constraint_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a constraint attribute.".format(attr),
"Get list of all variables attributes with the",
"get_constraint_attrs() method."))
# Check if the attr supplied is not a viable model attribute
if not model and not constraints:
raise ValueError("No model or constraint list given")
constraints = constraints_check(model, name, constraints)
return {c.constrName: getattr(c, attr) for c in constraints}
def print_constraints_attr(attr, model="", name="", constraints=""):
"""
Print to screen a list of constraint attribute values
given by the constraints specified in the names parameter.
Specifiy either model and name parameters or supply a list of constraints
"""
constraints = get_constraints_attr(attr, model=model,
name=name, constraints=constraints)
print "\n".join(["{0}, {1}".format(c, k)
for c, k in sorted(constraints.items())])
def set_constraints_attr(attr, val, model="", name="", constraints=""):
"""
Set an attribute of a model constraint set.
Specifiy either model and name parameters or supply a list of constraints
"""
if not attr or not val:
raise AttributeError("No attribute or value specified")
if not check_constraint_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
if not model and not constraints:
raise ValueError("No model or constraints specified")
constraints = constraints_check(model, name, constraints)
for c in constraints:
setattr(c, attr, val)
def set_constraints_rhs_as_percent(percent, model="", name="", constraints=""):
"""
Set the right hand side (rhs) of a constraint set as a percentage of its current rhs.
Specifiy either model and name parameters or supply a list of constraints
"""
if percent != 0 and not percent:
print "Error: No percent specified."
return
try:
percent = float(percent)
except ValueError:
raise ValueError("Percent must be a number. Percent: {}".format(percent))
if not model and not constraints:
raise ValueError("No model or constraints specified.")
constraints = constraints_check(model, name, constraints)
for c in constraints:
cur_rhs = getattr(c, "rhs")
setattr(c, "rhs", percent*cur_rhs)
def remove_constraints_from_model(model, name="", constraints=""):
"""
Remove the given constraints from the model.
Specifiy either model and name parameters or supply a list of constraints
"""
if not model and not constraints:
raise ValueError("No model or constraints given")
if not model:
raise ValueError("No model given")
# This is needed for the case where a list of
# constraints is provided because a model object
# must be provided
if not constraints:
constraints = constraints_check(model, name, constraints)
for c in constraints:
model.remove(c)
def get_constraint_index_value(constraint, index):
"""
Return the value of the given index
for a given constraint.
Constraint names are assumed to be given
as A(a,c,d, ....,f)
"""
value = constraint.constrName.split(",")[index].strip()
if CON_BRACKET_R in value:
value = value[:-1]
elif CON_BRACKET_L in value:
value = value.split(CON_BRACKET_L)[1]
# Not expecting many constraint index values to
# to be floats
if value.isdigit:
try:
value = int(value)
except ValueError:
pass
return value
def get_constraints_by_index(index, model="", name="", constraints=""):
"""
Return a dictionary mapping index values to lists of
constraints having that index value.
Specifiy either model and name parameters or supply a list of constraints
"""
if index != 0 and not index:
raise IndexError("No index given")
if not model and not constraints:
raise ValueError("No model or constraints given")
if not (name and model) and not constraints:
raise ValueError("No constraints specified")
constraints = constraints_check(model, name, constraints)
con_dict = {}
for c in constraints:
value = get_constraint_index_value(c, index)
if value not in con_dict:
con_dict[value] = [c]
else:
con_dict[value].append(c)
return con_dict
def get_constraints_by_index_values(model, name, index_values, exclude=False):
"""
Return a list of constraints filtered by index values.
If exlude is False then return constraints that match the filters.
If exclude is True than return constraints that do not match the filters.
"""
constraints = get_constraints(model, name, index_values, exclude)
return constraints
def get_grb_sense_from_string(sense):
"""
Return the GRB constraint sense object
corresponding to the supplied string.
Convention follows the Gurobi docs:
https://www.gurobi.com/documentation/6.5/refman/sense.html#attr:Sense
"""
if sense == "<":
return gp.GRB.LESS_EQUAL
elif sense == ">":
return gp.GRB.GREATER_EQUAL
elif sense == "=":
return gp.GRB.EQUAL
else:
raise ValueError("Constraint sense is not '<', '>', '='")
def add_constraint_constant(model, variables, constant, sense="<",
con_name=""):
"""
Add constraint to model that says the sum of
variables must be equal, less than or equal, or, greater than or equal, a constant.
"""
if not variables:
raise ValueError("variables list not provided")
linexp = get_linexp_from_variables(variables)
sense = get_grb_sense_from_string(sense)
if not con_name:
model.addConstr(linexp, sense, constant)
else:
model.addConstr(linexp, sense, constant, con_name)
def check_if_name_a_variable(name, model):
"""
Check if the supplied name corresponds to
a variable set name in the given model.
"""
variables = get_variables(model, name)
if not variables:
return False
return True
def check_if_name_a_constraint(name, model):
"""
Check if the supplied name corresopnd to
a constraint set name in the given model.
"""
constraints = get_constraints(model, name)
if not constraints:
return False
return True
def add_constraint_variables(model, variables1, variables2,
sense="=", con_name=""):
"""
Add constraint to model that says the sum of
a list of variables must be equal, less than or equal,
or greater than or equal, the sum of another list of variables.
"""
if not variables1 or not variables2:
ValueError("Variables list not provided")
linexp1 = get_linexp_from_variables(variables1)
linexp2 = get_linexp_from_variables(variables2)
sense = get_grb_sense_from_string(sense)
if not con_name:
model.addConstr(linexp1, sense, linexp2)
else:
model.addConstr(linexp1, sense, linexp2, con_name)
def graph_by_index(model, variables, index, title="", y_axis="", x_axis=""):
"""
Display a graph of the variable against the specified index
using matplotlib.
Matplotlib must already be installed to use this.
See: http://matplotlib.org/faq/installing_faq.html
"""
try:
import matplotlib.pyplot as plot
except ImportError:
raise ImportError("{0}\n{1}".format(
"Module Matplotlib not found.",
"Please download and install Matplotlib to use this function."))
fig = plot.figure()
ax = fig.add_subplot(111)
variables_sum = sum_variables_by_index(index, variables=variables)
keys, values = zip(*variables_sum.items())
y = range(len(variables_sum))
if title:
ax.set_title(title)
if y_axis:
ax.set_ylabel(y_axis)
if x_axis:
ax.set_xlabel(x_axis)
ax.bar(y, values)
#ax.legend(keys)
plot.show()
def graph_by_two_indices(model, variables, index1, index2, title="",
y_axis="", x_axis=""):
"""
Display a graph of the variable summed over index2
given by index1.
Matplotlib must already be installed to use this.
See: http://matplotlib.org/faq/installing_faq.html
"""
try:
import matplotlib.pyplot as plot
except ImportError:
raise ImportError("{0}\n{1}".format(
"Module Matplotlib not found.",
"Please download and install Matplotlib to use this function."))
fig = plot.figure()
ax = fig.add_subplot(111)
# We need to do this in reverse order to prepare it for graphing
variables_sum = sum_variables_by_two_indices(index2, index1,
variables=variables)
keys, values = zip(*variables_sum.items())
colours = ["b", "g", "r", "c", "y", "m", "k", "w"]
y = range(len(values[0]))
print y
if title:
ax.set_title(title)
if y_axis:
ax.set_ylabel(y_axis)
if x_axis:
ax.set_xlabel(x_axis)
bars = []
prev_bars = [0 for bar in y]
colour_count = 0
for key, value in variables_sum.iteritems():
cur_bars = [k[1] for k in sorted(value.items(), key=lambda x: x[0])]
bars.append(ax.bar(y, cur_bars, bottom=prev_bars,
color=colours[colour_count]))
prev_bars = cur_bars
colour_count += 1
if colour_count == len(colours) - 1:
colour_count = 0
ax.legend(keys)
plot.show()
def print_variables_to_csv(file_name, model="", name="", variables=""):
"""
Print the specified variables to a csv file
given by the file_name parameter.
If no variables specified than all model
variables written.
"""
if ".csv" not in file_name:
raise ValueError("Non csv file specified")
with open(file_name, "wb+") as write_file:
writer = csv.writer(write_file)
headers = ["Variable name", "Value"]
writer.writerow(headers)
variables = variables_check(model, name, variables)
# This will put quotes around strings, because the variable
# names have commas in them.
writer.writerows([ [v.varName, v.X] for v in variables])
def print_variables_to_csv_by_index(file_name, index,
model="", name="", variables=""):
"""
Print the sums of variables by the specified index
to a csv file.
Default behaviour of the function is to overwrite
the given file_name.
"""
if ".csv" not in file_name:
raise ValueError("Non csv file specified")
with open(file_name, "wb+") as write_file:
writer = csv.writer(write_file)
headers = ["Index", "Value"]
writer.writerow(headers)
variables_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
if not variables_dict:
raise ValueError("No variables found")
writer.writerows([ [key, value]
for key, value in sorted(variables_dict.items())])
def print_variables_to_json_by_index(file_name, index, model="",
name="", variables="", index_alias=""):
"""
Print the specified variables to a json file given by file_name
organized by the specified index.
Formatted for reading into nvD3 applications.
Default behaviour is to overwrite file if one exists in
file_name's location.
"""
if ".json" not in file_name:
raise ValueError("Non json file specified")
index_name = index
if index_alias:
index_name = index_alias
var_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
data = {index_name: [{ index_name: var_dict }] }
json.dump(data, open(file_name, "wb"))
| mit |
gtoonstra/airflow | airflow/hooks/base_hook.py | 14 | 3184 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import random
from airflow.models import Connection
from airflow.exceptions import AirflowException
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
@provide_session
def _get_connections_from_db(cls, conn_id, session=None):
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
log = LoggingMixin().log
log.info("Using connection to: %s", conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
bootphon/crossitlearn | simple_dnn.py | 1 | 32993 | """
A deep neural network with or w/o dropout in one file.
"""
import numpy
import theano
import sys
import math
from theano import tensor as T
from theano import shared
from theano.tensor.shared_randomstreams import RandomStreams
from collections import OrderedDict
BATCH_SIZE = 100
STACKSIZE = 69
def relu_f(vec):
""" Wrapper to quickly change the rectified linear unit function """
return (vec + abs(vec)) / 2.
def softplus_f(v):
return T.nnet.softplus(v)
def dropout(rng, x, p=0.5):
""" Zero-out random values in x with probability p using rng """
if p > 0. and p < 1.:
seed = rng.randint(2 ** 30)
srng = theano.tensor.shared_randomstreams.RandomStreams(seed)
mask = srng.binomial(n=1, p=1.-p, size=x.shape,
dtype=theano.config.floatX)
return x * mask
return x
def fast_dropout(rng, x):
""" Multiply activations by N(1,1) """
seed = rng.randint(2 ** 30)
srng = RandomStreams(seed)
mask = srng.normal(size=x.shape, avg=1., dtype=theano.config.floatX)
return x * mask
def build_shared_zeros(shape, name):
""" Builds a theano shared variable filled with a zeros numpy array """
return shared(value=numpy.zeros(shape, dtype=theano.config.floatX),
name=name, borrow=True)
class Linear(object):
""" Basic linear transformation layer (W.X + b) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
W_values *= 4 # This works for sigmoid activated networks!
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b = build_shared_zeros((n_out,), 'b')
self.input = input
self.W = W
self.b = b
self.params = [self.W, self.b]
self.output = T.dot(self.input, self.W) + self.b
if fdrop:
self.output = fast_dropout(rng, self.output)
def __repr__(self):
return "Linear"
class SigmoidLayer(Linear):
""" Sigmoid activation layer (sigmoid(W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
super(SigmoidLayer, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation)
self.output = T.nnet.sigmoid(self.pre_activation)
class ReLU(Linear):
""" Rectified Linear Unit activation layer (max(0, W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
if b is None:
b = build_shared_zeros((n_out,), 'b')
super(ReLU, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation)
self.output = relu_f(self.pre_activation)
class SoftPlus(Linear):
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=0.):
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
super(SoftPlus, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation, fdrop)
self.output = softplus_f(self.pre_activation)
class DatasetMiniBatchIterator(object):
""" Basic mini-batch iterator """
def __init__(self, x, y, batch_size=BATCH_SIZE, randomize=False):
self.x = x
self.y = y
self.batch_size = batch_size
self.randomize = randomize
from sklearn.utils import check_random_state
self.rng = check_random_state(42)
def __iter__(self):
n_samples = self.x.shape[0]
if self.randomize:
for _ in xrange(n_samples / BATCH_SIZE):
if BATCH_SIZE > 1:
i = int(self.rng.rand(1) * ((n_samples+BATCH_SIZE-1) / BATCH_SIZE))
else:
i = int(math.floor(self.rng.rand(1) * n_samples))
yield (i, self.x[i*self.batch_size:(i+1)*self.batch_size],
self.y[i*self.batch_size:(i+1)*self.batch_size])
else:
for i in xrange((n_samples + self.batch_size - 1)
/ self.batch_size):
yield (self.x[i*self.batch_size:(i+1)*self.batch_size],
self.y[i*self.batch_size:(i+1)*self.batch_size])
class LogisticRegression:
"""Multi-class Logistic Regression
"""
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if W != None:
self.W = W
else:
self.W = build_shared_zeros((n_in, n_out), 'W')
if b != None:
self.b = b
else:
self.b = build_shared_zeros((n_out,), 'b')
# P(Y|X) = softmax(W.X + b)
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.output = self.y_pred
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def negative_log_likelihood_sum(self, y):
return -T.sum(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def log_loss(self, y):
# TODO
log_y_hat = T.log(self.p_y_given_x)
#ll = log_y_hat[T.arange(y.shape[0]), y] + log_y_hat[T.arange(y.shape[0]), 1-y]
#return -T.mean(ll)
def training_cost(self, y):
""" Wrapper for standard name """
return self.negative_log_likelihood_sum(y)
#return self.log_loss(y) TODO
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError("y should have the same shape as self.y_pred",
("y", y.type, "y_pred", self.y_pred.type))
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred, y))
else:
print("!!! y should be of int type")
return T.mean(T.neq(self.y_pred, numpy.asarray(y, dtype='int')))
class NeuralNet(object):
""" Neural network (not regularized, without dropout) """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=40*3,
layers_types=[Linear, ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[1024, 1024, 1024, 1024],
n_outs=62 * 3,
rho=0.95, eps=1.E-6,
max_norm=0.,
debugprint=False):
"""
TODO
"""
self.layers = []
self.params = []
self.n_layers = len(layers_types)
self.layers_types = layers_types
assert self.n_layers > 0
self.max_norm = max_norm
self._rho = rho # ``momentum'' for adadelta
self._eps = eps # epsilon for adadelta
self._accugrads = [] # for adadelta
self._accudeltas = [] # for adadelta
self._old_dxs = [] # for adadelta with Nesterov
if theano_rng == None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
self.x = T.fmatrix('x')
self.y = T.ivector('y')
self.layers_ins = [n_ins] + layers_sizes
self.layers_outs = layers_sizes + [n_outs]
layer_input = self.x
for layer_type, n_in, n_out in zip(layers_types,
self.layers_ins, self.layers_outs):
this_layer = layer_type(rng=numpy_rng,
input=layer_input, n_in=n_in, n_out=n_out)
assert hasattr(this_layer, 'output')
self.params.extend(this_layer.params)
self._accugrads.extend([build_shared_zeros(t.shape.eval(),
'accugrad') for t in this_layer.params])
self._accudeltas.extend([build_shared_zeros(t.shape.eval(),
'accudelta') for t in this_layer.params])
self._old_dxs.extend([build_shared_zeros(t.shape.eval(),
'old_dxs') for t in this_layer.params])
self.layers.append(this_layer)
layer_input = this_layer.output
assert hasattr(self.layers[-1], 'training_cost')
assert hasattr(self.layers[-1], 'errors')
# TODO standardize cost
self.mean_cost = self.layers[-1].negative_log_likelihood(self.y)
self.cost = self.layers[-1].training_cost(self.y)
#self.mean_cost = self.layers[-1].training_cost(self.y) # TODO
if debugprint:
theano.printing.debugprint(self.cost)
self.errors = self.layers[-1].errors(self.y)
def __repr__(self):
dimensions_layers_str = map(lambda x: "x".join(map(str, x)),
zip(self.layers_ins, self.layers_outs))
return "_".join(map(lambda x: "_".join((x[0].__name__, x[1])),
zip(self.layers_types, dimensions_layers_str)))
def get_SGD_trainer(self):
""" Returns a plain SGD minibatch trainer with learning rate as param.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate to use
# compute the gradients with respect to the model parameters
# using mean_cost so that the learning rate is not too dependent
# on the batch size
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
if self.max_norm:
W = param - gparam * learning_rate
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param - gparam * learning_rate
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def get_adagrad_trainer(self):
""" Returns an Adagrad (Duchi et al. 2010) trainer using a learning rate.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate to use
# compute the gradients with respect to the model parameters
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for accugrad, param, gparam in zip(self._accugrads, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = accugrad + gparam * gparam
dx = - (learning_rate / T.sqrt(agrad + self._eps)) * gparam
if self.max_norm:
W = param + dx
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param + dx
updates[accugrad] = agrad
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def get_adadelta_trainer(self):
""" Returns an Adadelta (Zeiler 2012) trainer using self._rho and
self._eps params.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
# compute the gradients with respect to the model parameters
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for accugrad, accudelta, param, gparam in zip(self._accugrads,
self._accudeltas, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = self._rho * accugrad + (1 - self._rho) * gparam * gparam
dx = - T.sqrt((accudelta + self._eps)
/ (agrad + self._eps)) * gparam
updates[accudelta] = (self._rho * accudelta
+ (1 - self._rho) * dx * dx)
if self.max_norm:
W = param + dx
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param + dx
updates[accugrad] = agrad
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def score_classif(self, given_set):
""" Returns functions to get current classification errors. """
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
score = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y)],
outputs=self.errors,
givens={self.x: batch_x, self.y: batch_y})
def scoref():
""" returned function that scans the entire set given as input """
return [score(batch_x, batch_y) for batch_x, batch_y in given_set]
return scoref
class RegularizedNet(NeuralNet):
""" Neural net with L1 and L2 regularization """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=100,
layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[1024, 1024, 1024],
n_outs=2,
rho=0.9, eps=1.E-6,
L1_reg=0.,
L2_reg=0.,
max_norm=0.,
debugprint=False):
"""
TODO
"""
super(RegularizedNet, self).__init__(numpy_rng, theano_rng, n_ins,
layers_types, layers_sizes, n_outs, rho, eps, max_norm,
debugprint)
L1 = shared(0.)
for param in self.params:
L1 += T.sum(abs(param))
if L1_reg > 0.:
self.cost = self.cost + L1_reg * L1
L2 = shared(0.)
for param in self.params:
L2 += T.sum(param ** 2)
if L2_reg > 0.:
self.cost = self.cost + L2_reg * L2
class DropoutNet(NeuralNet):
""" Neural net with dropout (see Hinton's et al. paper) """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=40*3,
layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[4000, 4000, 4000, 4000],
dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
n_outs=62 * 3,
rho=0.98, eps=1.E-6,
max_norm=0.,
fast_drop=True,
debugprint=False):
"""
TODO
"""
super(DropoutNet, self).__init__(numpy_rng, theano_rng, n_ins,
layers_types, layers_sizes, n_outs, rho, eps, max_norm,
debugprint)
self.dropout_rates = dropout_rates
if fast_drop:
if dropout_rates[0]:
dropout_layer_input = fast_dropout(numpy_rng, self.x)
else:
dropout_layer_input = self.x
else:
dropout_layer_input = dropout(numpy_rng, self.x, p=dropout_rates[0])
self.dropout_layers = []
for layer, layer_type, n_in, n_out, dr in zip(self.layers,
layers_types, self.layers_ins, self.layers_outs,
dropout_rates[1:] + [0]): # !!! we do not dropout anything
# from the last layer !!!
if dr:
if fast_drop:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W, b=layer.b, fdrop=True)
else:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W * 1. / (1. - dr),
b=layer.b * 1. / (1. - dr))
# N.B. dropout with dr==1 does not dropanything!!
this_layer.output = dropout(numpy_rng, this_layer.output, dr)
else:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W, b=layer.b)
assert hasattr(this_layer, 'output')
self.dropout_layers.append(this_layer)
dropout_layer_input = this_layer.output
assert hasattr(self.layers[-1], 'training_cost')
assert hasattr(self.layers[-1], 'errors')
# TODO standardize cost
# these are the dropout costs
self.mean_cost = self.dropout_layers[-1].negative_log_likelihood(self.y)
self.cost = self.dropout_layers[-1].training_cost(self.y)
# these is the non-dropout errors
self.errors = self.layers[-1].errors(self.y)
def __repr__(self):
return super(DropoutNet, self).__repr__() + "\n"\
+ "dropout rates: " + str(self.dropout_rates)
def add_fit_and_score(class_to_chg):
""" Mutates a class to add the fit() and score() functions to a NeuralNet.
"""
from types import MethodType
def fit(self, x_train, y_train, x_dev=None, y_dev=None,
max_epochs=20, early_stopping=True, split_ratio=0.1, # TODO 100+ epochs
method='adadelta', verbose=False, plot=False):
"""
TODO
"""
import time, copy
if x_dev == None or y_dev == None:
from sklearn.cross_validation import train_test_split
x_train, x_dev, y_train, y_dev = train_test_split(x_train, y_train,
test_size=split_ratio, random_state=42)
if method == 'sgd':
train_fn = self.get_SGD_trainer()
elif method == 'adagrad':
train_fn = self.get_adagrad_trainer()
elif method == 'adadelta':
train_fn = self.get_adadelta_trainer()
elif method == 'adadelta_rprop':
train_fn = self.get_adadelta_rprop_trainer()
train_set_iterator = DatasetMiniBatchIterator(x_train, y_train)
dev_set_iterator = DatasetMiniBatchIterator(x_dev, y_dev)
train_scoref = self.score_classif(train_set_iterator)
dev_scoref = self.score_classif(dev_set_iterator)
best_dev_loss = numpy.inf
epoch = 0
# TODO early stopping (not just cross val, also stop training)
if plot:
verbose = True
self._costs = []
self._train_errors = []
self._dev_errors = []
self._updates = []
while epoch < max_epochs:
if not verbose:
sys.stdout.write("\r%0.2f%%" % (epoch * 100./ max_epochs))
sys.stdout.flush()
avg_costs = []
timer = time.time()
for x, y in train_set_iterator:
if method == 'sgd' or 'adagrad' in method:
avg_cost = train_fn(x, y, lr=1.E-2)
elif 'adadelta' in method:
avg_cost = train_fn(x, y)
if type(avg_cost) == list:
avg_costs.append(avg_cost[0])
else:
avg_costs.append(avg_cost)
if verbose:
mean_costs = numpy.mean(avg_costs)
mean_train_errors = numpy.mean(train_scoref())
print(' epoch %i took %f seconds' %
(epoch, time.time() - timer))
print(' epoch %i, avg costs %f' %
(epoch, mean_costs))
print(' method %s, epoch %i, training error %f' %
(method, epoch, mean_train_errors))
if plot:
self._costs.append(mean_costs)
self._train_errors.append(mean_train_errors)
dev_errors = numpy.mean(dev_scoref())
if plot:
self._dev_errors.append(dev_errors)
if dev_errors < best_dev_loss:
best_dev_loss = dev_errors
best_params = copy.deepcopy(self.params)
if verbose:
print('!!! epoch %i, validation error of best model %f' %
(epoch, dev_errors))
epoch += 1
if not verbose:
print("")
for i, param in enumerate(best_params):
self.params[i] = param
def score(self, x, y):
""" error rates """
iterator = DatasetMiniBatchIterator(x, y)
scoref = self.score_classif(iterator)
return numpy.mean(scoref())
class_to_chg.fit = MethodType(fit, None, class_to_chg)
class_to_chg.score = MethodType(score, None, class_to_chg)
if __name__ == "__main__":
add_fit_and_score(DropoutNet)
add_fit_and_score(RegularizedNet)
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
from scipy.ndimage import convolve
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = numpy.concatenate([X] +
[numpy.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = numpy.concatenate([Y for _ in range(5)], axis=0)
return X, Y
from sklearn import datasets, svm, naive_bayes
from sklearn import cross_validation, preprocessing
SPOKEN_WORDS = True
MNIST = False
DIGITS = False
NUDGE_DIGITS = True
FACES = False
TWENTYNEWSGROUPS = False
VERBOSE = True
SCALE = True
PLOT = True
def train_models(x_train, y_train, x_test, y_test, n_features, n_outs,
use_dropout=False, n_epochs=100, numpy_rng=None, # TODO 200+ epochs
svms=False, nb=False, deepnn=True, name=''):
if svms:
print("Linear SVM")
classifier = svm.SVC(gamma=0.001)
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
print("RBF-kernel SVM")
classifier = svm.SVC(kernel='rbf', class_weight='auto')
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
if nb:
print("Multinomial Naive Bayes")
classifier = naive_bayes.MultinomialNB()
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
if deepnn:
import warnings
warnings.filterwarnings("ignore") # TODO remove
if use_dropout:
n_epochs *= 4
pass
def new_dnn(dropout=False):
if dropout:
print("Dropout DNN")
return DropoutNet(numpy_rng=numpy_rng, n_ins=n_features,
#layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
layers_types=[SoftPlus, SoftPlus, SoftPlus, SoftPlus, LogisticRegression],
layers_sizes=[2000, 2000, 2000, 2000],
dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
n_outs=n_outs,
max_norm=4.,
fast_drop=False,
debugprint=0)
else:
print("Simple (regularized) DNN")
return RegularizedNet(numpy_rng=numpy_rng, n_ins=n_features,
#layers_types=[LogisticRegression],
#layers_sizes=[],
#layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
#layers_types=[SoftPlus, SoftPlus, SoftPlus, LogisticRegression],
#layers_sizes=[1000, 1000, 1000],
layers_types=[ReLU, LogisticRegression],
layers_sizes=[200],
n_outs=n_outs,
#L1_reg=0.001/x_train.shape[0],
#L2_reg=0.001/x_train.shape[0],
L1_reg=0.,
L2_reg=1./x_train.shape[0],
max_norm=0.,
debugprint=0)
import matplotlib.pyplot as plt
plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(222)
ax3 = plt.subplot(223)
ax4 = plt.subplot(224) # TODO updates of the weights
methods = ['adadelta']
for method in methods:
dnn = new_dnn(use_dropout)
print dnn
dnn.fit(x_train, y_train, max_epochs=n_epochs, method=method, verbose=VERBOSE, plot=PLOT)
test_error = dnn.score(x_test, y_test)
print("score: %f" % (1. - test_error))
ax1.plot(numpy.log10(dnn._costs), label=method)
#ax2.plot(numpy.log10(dnn._train_errors), label=method)
#ax3.plot(numpy.log10(dnn._dev_errors), label=method)
ax2.plot(dnn._train_errors, label=method)
ax3.plot(dnn._dev_errors, label=method)
#ax4.plot(dnn._updates, label=method) TODO
ax4.plot([test_error for _ in range(10)], label=method)
ax1.set_xlabel('epoch')
ax1.set_ylabel('cost (log10)')
ax2.set_xlabel('epoch')
ax2.set_ylabel('train error')
ax3.set_xlabel('epoch')
ax3.set_ylabel('dev error')
ax4.set_ylabel('test error')
plt.legend()
plt.savefig('training_log' + name + '.png')
if MNIST:
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
X = numpy.asarray(mnist.data, dtype='float32')
if SCALE:
#X = preprocessing.scale(X)
X /= 255.
y = numpy.asarray(mnist.target, dtype='int32')
#target_names = mnist.target_names
print("Total dataset size:")
print("n samples: %d" % X.shape[0])
print("n features: %d" % X.shape[1])
print("n classes: %d" % len(set(y)))
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, X.shape[1],
len(set(y)), numpy_rng=numpy.random.RandomState(123),
name='MNIST')
if DIGITS:
digits = datasets.load_digits()
data = numpy.asarray(digits.data, dtype='float32')
target = numpy.asarray(digits.target, dtype='int32')
x = data
y = target
if NUDGE_DIGITS:
x, y = nudge_dataset(x, y)
if SCALE:
x = preprocessing.scale(x)
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, x.shape[1],
len(set(target)), numpy_rng=numpy.random.RandomState(123),
name='digits')
if FACES:
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
lfw_people = datasets.fetch_lfw_people(min_faces_per_person=70,
resize=0.4)
X = numpy.asarray(lfw_people.data, dtype='float32')
if SCALE:
X = preprocessing.scale(X)
y = numpy.asarray(lfw_people.target, dtype='int32')
target_names = lfw_people.target_names
print("Total dataset size:")
print("n samples: %d" % X.shape[0])
print("n features: %d" % X.shape[1])
print("n classes: %d" % target_names.shape[0])
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, X.shape[1],
len(set(y)), numpy_rng=numpy.random.RandomState(123),
name='faces')
if TWENTYNEWSGROUPS:
from sklearn.feature_extraction.text import TfidfVectorizer
newsgroups_train = datasets.fetch_20newsgroups(subset='train')
vectorizer = TfidfVectorizer(encoding='latin-1', max_features=10000)
#vectorizer = HashingVectorizer(encoding='latin-1')
x_train = vectorizer.fit_transform(newsgroups_train.data)
x_train = numpy.asarray(x_train.todense(), dtype='float32')
y_train = numpy.asarray(newsgroups_train.target, dtype='int32')
newsgroups_test = datasets.fetch_20newsgroups(subset='test')
x_test = vectorizer.transform(newsgroups_test.data)
x_test = numpy.asarray(x_test.todense(), dtype='float32')
y_test = numpy.asarray(newsgroups_test.target, dtype='int32')
train_models(x_train, y_train, x_test, y_test, x_train.shape[1],
len(set(y_train)),
numpy_rng=numpy.random.RandomState(123),
svms=False, nb=True, deepnn=True,
name='20newsgroups')
if SPOKEN_WORDS:
# words done by "say", shapes of their filterbanks
#>>> shapes
#array([[62, 40],
# [65, 40],
# [58, 40],
# ...,
# [85, 40],
# [79, 40],
# [51, 40]])
#>>> shapes.mean(axis=0)
#array([ 70.87751196, 40. ])
#>>> shapes.std(axis=0)
#array([ 12.94580736, 0. ])
#>>> shapes.min(axis=0)
#array([39, 40])
words_fbanks = numpy.load("all_words_pascal1k.npz")
n_tokens = len([k for k in words_fbanks.keys()])
lexicon = set([w.split('_')[1] for w in words_fbanks.keys()])
lexicon = [w for w in lexicon] # we need an ordered collection
n_words = len(lexicon)
all_fbanks = numpy.concatenate([v for _, v in words_fbanks.iteritems()])
print all_fbanks.shape
mean = all_fbanks.mean(axis=0)
print mean.shape
std = all_fbanks.std(axis=0)
print std.shape
# take 69 fbanks in the middle of the word and pad with 0s if needed
X = numpy.zeros((n_tokens, 40*STACKSIZE), dtype='float32')
y = numpy.zeros(n_tokens, dtype='int32')
for i, (swf, fb) in enumerate(words_fbanks.iteritems()):
spkr, word, _ = swf.split('_')
l = fb.shape[0]
m = l/2
s = max(0, m - ((STACKSIZE-1) / 2))
e = min(l-1, m + ((STACKSIZE-1) / 2))
tmp = (fb - mean) / std
tmp = tmp[s:e+1].flatten()
diff = 40*STACKSIZE - tmp.shape[0]
if not diff:
X[i] = tmp
else:
X[i][diff/2:-diff/2] = tmp
y[i] = lexicon.index(word)
# train the DNN, with the training set as test set if let in this form:
train_models(X, y, X, y, X.shape[1],
len(set(y)),
numpy_rng=numpy.random.RandomState(123),
svms=False, nb=False, deepnn=True,
name='spoken_words')
| mit |
parthea/pydatalab | datalab/data/_csv.py | 6 | 7063 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements usefule CSV utilities."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import next
from builtins import str as newstr
from builtins import range
from builtins import object
import csv
import os
import pandas as pd
import random
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tempfile
import datalab.storage
import datalab.utils
_MAX_CSV_BYTES = 10000000
class Csv(object):
"""Represents a CSV file in GCS or locally with same schema.
"""
def __init__(self, path, delimiter=b','):
"""Initializes an instance of a Csv instance.
Args:
path: path of the Csv file.
delimiter: the separator used to parse a Csv line.
"""
self._path = path
self._delimiter = delimiter
@property
def path(self):
return self._path
@staticmethod
def _read_gcs_lines(path, max_lines=None):
return datalab.storage.Item.from_url(path).read_lines(max_lines)
@staticmethod
def _read_local_lines(path, max_lines=None):
lines = []
for line in open(path):
if max_lines is not None and len(lines) >= max_lines:
break
lines.append(line)
return lines
def _is_probably_categorical(self, column):
if newstr(column.dtype) != 'object':
# only string types (represented in DataFrame as object) can potentially be categorical
return False
if len(max(column, key=lambda p: len(newstr(p)))) > 100:
return False # value too long to be a category
if len(set(column)) > 100:
return False # too many unique values to be a category
return True
def browse(self, max_lines=None, headers=None):
"""Try reading specified number of lines from the CSV object.
Args:
max_lines: max number of lines to read. If None, the whole file is read
headers: a list of strings as column names. If None, it will use "col0, col1..."
Returns:
A pandas DataFrame with the schema inferred from the data.
Raises:
Exception if the csv object cannot be read or not enough lines to read, or the
headers size does not match columns size.
"""
if self.path.startswith('gs://'):
lines = Csv._read_gcs_lines(self.path, max_lines)
else:
lines = Csv._read_local_lines(self.path, max_lines)
if len(lines) == 0:
return pd.DataFrame(columns=headers)
columns_size = len(next(csv.reader([lines[0]], delimiter=self._delimiter)))
if headers is None:
headers = ['col' + newstr(e) for e in range(columns_size)]
if len(headers) != columns_size:
raise Exception('Number of columns in CSV do not match number of headers')
buf = StringIO()
for line in lines:
buf.write(line)
buf.write('\n')
buf.seek(0)
df = pd.read_csv(buf, names=headers, delimiter=self._delimiter)
for key, col in df.iteritems():
if self._is_probably_categorical(col):
df[key] = df[key].astype('category')
return df
def _create_federated_table(self, skip_header_rows):
import datalab.bigquery as bq
df = self.browse(1, None)
# read each column as STRING because we only want to sample rows.
schema_train = bq.Schema([{'name': name, 'type': 'STRING'} for name in df.keys()])
options = bq.CSVOptions(skip_leading_rows=(1 if skip_header_rows is True else 0))
return bq.FederatedTable.from_storage(self.path,
csv_options=options,
schema=schema_train,
max_bad_records=0)
def _get_gcs_csv_row_count(self, federated_table):
import datalab.bigquery as bq
results = bq.Query('SELECT count(*) from data',
data_sources={'data': federated_table}).results()
return results[0].values()[0]
def sample_to(self, count, skip_header_rows, strategy, target):
"""Sample rows from GCS or local file and save results to target file.
Args:
count: number of rows to sample. If strategy is "BIGQUERY", it is used as approximate number.
skip_header_rows: whether to skip first row when reading from source.
strategy: can be "LOCAL" or "BIGQUERY". If local, the sampling happens in local memory,
and number of resulting rows matches count. If BigQuery, sampling is done
with BigQuery in cloud, and the number of resulting rows will be approximated to
count.
target: The target file path, can be GCS or local path.
Raises:
Exception if strategy is "BIGQUERY" but source is not a GCS path.
"""
# TODO(qimingj) Add unit test
# Read data from source into DataFrame.
if sys.version_info.major > 2:
xrange = range # for python 3 compatibility
if strategy == 'BIGQUERY':
import datalab.bigquery as bq
if not self.path.startswith('gs://'):
raise Exception('Cannot use BIGQUERY if data is not in GCS')
federated_table = self._create_federated_table(skip_header_rows)
row_count = self._get_gcs_csv_row_count(federated_table)
query = bq.Query('SELECT * from data', data_sources={'data': federated_table})
sampling = bq.Sampling.random(count * 100 / float(row_count))
sample = query.sample(sampling=sampling)
df = sample.to_dataframe()
elif strategy == 'LOCAL':
local_file = self.path
if self.path.startswith('gs://'):
local_file = tempfile.mktemp()
datalab.utils.gcs_copy_file(self.path, local_file)
with open(local_file) as f:
row_count = sum(1 for line in f)
start_row = 1 if skip_header_rows is True else 0
skip_count = row_count - count - 1 if skip_header_rows is True else row_count - count
skip = sorted(random.sample(xrange(start_row, row_count), skip_count))
header_row = 0 if skip_header_rows is True else None
df = pd.read_csv(local_file, skiprows=skip, header=header_row, delimiter=self._delimiter)
if self.path.startswith('gs://'):
os.remove(local_file)
else:
raise Exception('strategy must be BIGQUERY or LOCAL')
# Write to target.
if target.startswith('gs://'):
with tempfile.NamedTemporaryFile() as f:
df.to_csv(f, header=False, index=False)
f.flush()
datalab.utils.gcs_copy_file(f.name, target)
else:
with open(target, 'w') as f:
df.to_csv(f, header=False, index=False, sep=str(self._delimiter))
| apache-2.0 |
vene/marseille | experiments/exp_rnn.py | 1 | 5162 | import os
import dill
import numpy as np
from sklearn.model_selection import KFold
from marseille.custom_logging import logging
from marseille.datasets import get_dataset_loader, load_embeds
from marseille.io import cache_fname
from marseille.argrnn import ArgumentLSTM
def argrnn_cv_score(dataset, dynet_weight_decay, mlp_dropout,
rnn_dropout, prop_layers, class_weight, constraints,
compat_features, second_order):
fn = cache_fname("argrnn_cv_score", (dataset, dynet_weight_decay,
mlp_dropout, rnn_dropout, prop_layers,
class_weight, constraints,
compat_features, second_order))
if os.path.exists(fn):
logging.info("Cached file already exists.")
with open(fn, "rb") as f:
return dill.load(f)
load, ids = get_dataset_loader(dataset, split="train")
embeds = load_embeds(dataset)
grandparent_layers = 1 if second_order and dataset == 'ukp' else 0
coparent_layers = 1 if second_order else 0
sibling_layers = 1 if second_order and dataset == 'cdcp' else 0
scores = []
all_Y_pred = []
score_at_iter = [10, 25, 50, 75, 100]
n_folds = 5 if dataset == 'ukp' else 3
for k, (tr, val) in enumerate(KFold(n_folds).split(ids)):
docs_train = list(load(ids[tr]))
docs_val = list(load(ids[val]))
Y_train = [doc.label for doc in docs_train]
Y_val = [doc.label for doc in docs_val]
rnn = ArgumentLSTM(lstm_dropout=rnn_dropout,
mlp_dropout=mlp_dropout,
compat_features=compat_features,
constraints=constraints,
prop_mlp_layers=prop_layers,
coparent_layers=coparent_layers,
grandparent_layers=grandparent_layers,
sibling_layers=sibling_layers,
class_weight=class_weight,
second_order_multilinear=True,
max_iter=100,
score_at_iter=score_at_iter,
n_mlp=128,
n_lstm=128,
lstm_layers=2,
link_mlp_layers=1,
embeds=embeds,
exact_inference=False,
link_bilinear=True)
rnn.fit(docs_train, Y_train, docs_val, Y_val)
Y_val_pred = rnn.predict(docs_val)
all_Y_pred.extend(Y_val_pred)
scores.append(rnn.scores_)
with open(fn, "wb") as f:
dill.dump((scores, score_at_iter, all_Y_pred), f)
return scores, score_at_iter, all_Y_pred
if __name__ == '__main__':
from docopt import docopt
usage = """
Usage:
exp_rnn (cdcp|ukp) [\
--dynet-seed N --dynet-weight-decay N --dynet-mem N --prop-layers=N \
--rnn-dropout=N --mlp-dropout=N --balanced --constraints --strict \
--compat-features --second-order]
Options:
--dynet-seed=N random number generator seed for dynet library
--dynet-weight-decay=N global weight decay amount for dynet library
--dynet-mem=N memory pool size for dynet
--prop-layers=N number of prop classifier layers. [default: 2]
--rnn-dropout=N dropout ratio in lstm. [default: 0.0]
--mlp-dropout=N dropout ratio in mlp. [default: 0.1]
--balanced whether to reweight class costs by freq
--constraints whether to constrain the decoding
--strict whether to use strict domain constraints
--compat-features whether to use features for compat factors
--second-order whether to use coparent / grandpa / siblings
"""
args = docopt(usage)
dataset = 'cdcp' if args['cdcp'] else 'ukp'
prop_layers = int(args['--prop-layers'])
rnn_dropout = float(args['--rnn-dropout'])
mlp_dropout = float(args['--mlp-dropout'])
cw = 'balanced' if args['--balanced'] else None
if args['--constraints']:
constraints = dataset
if args['--strict']:
constraints += '+strict'
else:
constraints = ""
scores, score_at_iter, _ = argrnn_cv_score(dataset,
args['--dynet-weight-decay'],
mlp_dropout,
rnn_dropout,
prop_layers,
cw,
constraints,
args['--compat-features'],
args['--second-order'])
for iter, score in zip(score_at_iter, np.mean(scores, axis=0)):
print("iter={} "
"Link: {:.3f}/{:.3f} "
"Node: {:.3f}/{:.3f} "
"accuracy {:.3f}".format(iter, *score),
)
| bsd-3-clause |
soft-matter/mr | mr/tests/test_feature_saving.py | 1 | 1721 | import unittest
import nose
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
from pandas.util.testing import (assert_series_equal, assert_frame_equal)
import os
from tempfile import NamedTemporaryFile
import pandas as pd
from pandas import DataFrame, Series
import mr
import sqlite3
path, _ = os.path.split(os.path.abspath(__file__))
class TestFeatureSaving(unittest.TestCase):
def setUp(self):
self.db_conn = sqlite3.connect(':memory:')
directory = os.path.join(path, 'video', 'image_sequence')
self.v = mr.ImageSequence(directory)
self.PARAMS = (11, 3000)
with NamedTemporaryFile() as temp:
self.expected = mr.batch(self.v[[0, 1]], *self.PARAMS,
meta=temp.name)
def test_sqlite(self):
with NamedTemporaryFile() as temp:
f = mr.batch(self.v[[0, 1]], *self.PARAMS, conn=self.db_conn,
sql_flavor='sqlite', table='features', meta=temp.name)
assert_frame_equal(f, self.expected)
def test_HDFStore(self):
STORE_NAME = 'temp_for_testing.h5'
if os.path.isfile(STORE_NAME):
os.remove(STORE_NAME)
try:
store = pd.HDFStore(STORE_NAME)
except:
nose.SkipTest('Cannot make an HDF5 file. Skipping')
else:
with NamedTemporaryFile() as temp:
f = mr.batch(self.v[[0, 1]], *self.PARAMS, store=store,
table='features', meta=temp.name)
assert_frame_equal(f.reset_index(drop=True),
self.expected.reset_index(drop=True))
os.remove(STORE_NAME)
| gpl-3.0 |
eyadsibai/rep | tests/test_pybrain.py | 3 | 3872 | # Copyright 2014-2015 Yandex LLC and contributors <https://yandex.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from rep.test.test_estimators import check_classifier, check_regression, check_params, \
generate_classification_data, check_classification_reproducibility
from rep.estimators.pybrain import PyBrainClassifier, PyBrainRegressor
from sklearn.ensemble import BaggingClassifier
from rep.estimators import SklearnClassifier
__author__ = 'Artem Zhirokhov'
classifier_params = {
'has_staged_pp': False,
'has_importances': False,
'supports_weight': False
}
regressor_params = {
'has_staged_predictions': False,
'has_importances': False,
'supports_weight': False
}
def test_pybrain_params():
check_params(PyBrainClassifier, layers=[1, 2], epochs=5, use_rprop=True, hiddenclass=['LinearLayer'])
check_params(PyBrainRegressor, layers=[1, 2], epochs=5, etaplus=1.3, hiddenclass=['LinearLayer'], learningrate=0.1)
def test_pybrain_classification():
clf = PyBrainClassifier(epochs=2)
check_classifier(clf, **classifier_params)
check_classifier(PyBrainClassifier(epochs=-1, continue_epochs=1, layers=[]), **classifier_params)
check_classifier(PyBrainClassifier(epochs=2, layers=[5, 2]), **classifier_params)
def test_pybrain_reproducibility():
try:
import numpy
X, y, _ = generate_classification_data()
clf1 = PyBrainClassifier(layers=[4], epochs=2).fit(X, y)
clf2 = PyBrainClassifier(layers=[4], epochs=2).fit(X, y)
print(clf1.predict_proba(X)-clf2.predict_proba(X))
assert numpy.allclose(clf1.predict_proba(X), clf2.predict_proba(X)), 'different predicitons'
check_classification_reproducibility(clf1, X, y)
except:
# This test fails. Because PyBrain can't reproduce training.
pass
def test_pybrain_Linear_MDLSTM():
check_classifier(PyBrainClassifier(epochs=2, layers=[10, 2], hiddenclass=['LinearLayer', 'MDLSTMLayer']),
**classifier_params)
check_regression(PyBrainRegressor(epochs=3, layers=[10, 2], hiddenclass=['LinearLayer', 'MDLSTMLayer']),
**regressor_params)
def test_pybrain_SoftMax_Tanh():
check_classifier(PyBrainClassifier(epochs=2, layers=[10, 5, 2], hiddenclass=['SoftmaxLayer', 'SoftmaxLayer', 'TanhLayer'], use_rprop=True),
**classifier_params)
check_regression(PyBrainRegressor(epochs=2, layers=[10, 5, 2], hiddenclass=['SoftmaxLayer', 'TanhLayer', 'TanhLayer']),
**regressor_params)
def pybrain_test_partial_fit():
clf = PyBrainClassifier(layers=[4], epochs=2)
X, y, _ = generate_classification_data()
clf.partial_fit(X, y)
clf.partial_fit(X[:2], y[:2])
def test_pybrain_multi_classification():
check_classifier(PyBrainClassifier(), n_classes=4, **classifier_params)
def test_pybrain_regression():
check_regression(PyBrainRegressor(), **regressor_params)
def test_pybrain_multi_regression():
check_regression(PyBrainRegressor(), n_targets=4, **regressor_params)
def test_simple_stacking_pybrain():
base_pybrain = PyBrainClassifier()
base_bagging = BaggingClassifier(base_estimator=base_pybrain, n_estimators=3)
check_classifier(SklearnClassifier(clf=base_bagging), **classifier_params)
| apache-2.0 |
ybalgir/Quantop | Lec7.py | 1 | 1822 | import numpy as np
import pandas as pd
from statsmodels import regression
import statsmodels.api as sm
import matplotlib.pyplot as plt
import math
import pandas_datareader.data as web
from datetime import datetime
def Starter_Lec7():
start = datetime(2014, 1, 1)
end = datetime(2015, 1, 1)
asset = web.DataReader("TSLA","yahoo",start,end)
asset_closingPrice = asset['Close']
benchmark = web.DataReader("SPY","yahoo",start,end)
benchmark_closingPrice = benchmark['Close']
r_a = asset_closingPrice.pct_change()[1:]
r_b = benchmark_closingPrice.pct_change()[1:]
modelSummary = linreg(r_a,r_b)
print("{0} {1} \n\n".format(modelSummary,type(modelSummary)))
def linreg(X,Y):
#running linear regression
X = sm.add_constant(X)
model = regression.linear_model.OLS(Y,X).fit()
a = model.params[0]
b = model.params[1]
X = pd.DataFrame(X, columns=['Close']) #Y_CMT Neat trick to extract columns from a pandas dataframe
# Return summary of the regression and plot results
X2 = np.linspace(float(X.min()), float(X.max()), 100)
Y_hat = X2 * b + a
plt.scatter(X, Y, alpha=0.3) # Plot the raw data
plt.plot(X2, Y_hat, 'r', alpha=0.9) # Add the regression line, colored in red
plt.xlabel('X Value')
plt.ylabel('Y Value')
plt.show()
return model.summary()
def TestPlotting():
N = 8
y = np.zeros(N)
x1 = np.linspace(0, 10, N, endpoint=True)
x2 = np.linspace(0, 10, N, endpoint=False)
plt.plot(x1, y, 'o')
plt.plot(x2, y + 0.5, 'o')
plt.ylim([-0.5, 1])
plt.show()
def NumpyMatrix():
array1 = np.matrix([[1,2,3],[4,5,6],[7,8,9]])
print("{0} {1} \n\n".format(array1[:,2],type(array1)))
array1 = array1[:,2]
print("{0} {1} \n\n".format(array1,type(array1)))
| gpl-3.0 |
studywolf/pydmps | pydmps/dmp_rhythmic.py | 1 | 5004 | """
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from pydmps.dmp import DMPs
import numpy as np
class DMPs_rhythmic(DMPs):
"""An implementation of discrete DMPs"""
def __init__(self, **kwargs):
"""
"""
# call super class constructor
super(DMPs_rhythmic, self).__init__(pattern="rhythmic", **kwargs)
self.gen_centers()
# set variance of Gaussian basis functions
# trial and error to find this spacing
self.h = np.ones(self.n_bfs) * self.n_bfs # 1.75
self.check_offset()
def gen_centers(self):
"""Set the centre of the Gaussian basis
functions be spaced evenly throughout run time"""
c = np.linspace(0, 2 * np.pi, self.n_bfs + 1)
c = c[0:-1]
self.c = c
def gen_front_term(self, x, dmp_num):
"""Generates the front term on the forcing term.
For rhythmic DMPs it's non-diminishing, so this
function is just a placeholder to return 1.
x float: the current value of the canonical system
dmp_num int: the index of the current dmp
"""
if isinstance(x, np.ndarray):
return np.ones(x.shape)
return 1
def gen_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs the goal is the average of the
desired trajectory.
y_des np.array: the desired trajectory to follow
"""
goal = np.zeros(self.n_dmps)
for n in range(self.n_dmps):
num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = 0.5 * (y_des[n, num_idx].min() + y_des[n, num_idx].max())
return goal
def gen_psi(self, x):
"""Generates the activity of the basis functions for a given
canonical system state or path.
x float, array: the canonical system state or path
"""
if isinstance(x, np.ndarray):
x = x[:, None]
return np.exp(self.h * (np.cos(x - self.c) - 1))
def gen_weights(self, f_target):
"""Generate a set of weights over the basis functions such
that the target forcing term trajectory is matched.
f_target np.array: the desired forcing term trajectory
"""
# calculate x and psi
x_track = self.cs.rollout()
psi_track = self.gen_psi(x_track)
# efficiently calculate BF weights using weighted linear regression
for d in range(self.n_dmps):
for b in range(self.n_bfs):
self.w[d, b] = np.dot(psi_track[:, b], f_target[:, d]) / (
np.sum(psi_track[:, b]) + 1e-10
)
# ==============================
# Test code
# ==============================
if __name__ == "__main__":
import matplotlib.pyplot as plt
# test normal run
dmp = DMPs_rhythmic(n_dmps=1, n_bfs=10, w=np.zeros((1, 10)))
y_track, dy_track, ddy_track = dmp.rollout()
plt.figure(1, figsize=(6, 3))
plt.plot(np.ones(len(y_track)) * dmp.goal, "r--", lw=2)
plt.plot(y_track, lw=2)
plt.title("DMP system - no forcing term")
plt.xlabel("time (ms)")
plt.ylabel("system trajectory")
plt.legend(["goal", "system state"], loc="lower right")
plt.tight_layout()
# test imitation of path run
plt.figure(2, figsize=(6, 4))
n_bfs = [10, 30, 50, 100, 10000]
# a straight line to target
path1 = np.sin(np.arange(0, 2 * np.pi, 0.01) * 5)
# a strange path to target
path2 = np.zeros(path1.shape)
path2[int(len(path2) / 2.0) :] = 0.5
for ii, bfs in enumerate(n_bfs):
dmp = DMPs_rhythmic(n_dmps=2, n_bfs=bfs)
dmp.imitate_path(y_des=np.array([path1, path2]))
y_track, dy_track, ddy_track = dmp.rollout()
plt.figure(2)
plt.subplot(211)
plt.plot(y_track[:, 0], lw=2)
plt.subplot(212)
plt.plot(y_track[:, 1], lw=2)
plt.subplot(211)
a = plt.plot(path1, "r--", lw=2)
plt.title("DMP imitate path")
plt.xlabel("time (ms)")
plt.ylabel("system trajectory")
plt.legend([a[0]], ["desired path"], loc="lower right")
plt.subplot(212)
b = plt.plot(path2, "r--", lw=2)
plt.title("DMP imitate path")
plt.xlabel("time (ms)")
plt.ylabel("system trajectory")
plt.legend(["%i BFs" % i for i in n_bfs], loc="lower right")
plt.tight_layout()
plt.show()
| gpl-3.0 |
fracturica/shardlib | shardlib/comp_analysis/SIMCompAnalysis.py | 1 | 23592 | import dataProcessing as dp
import plotFuncs as pf
import numpy as np
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
from matplotlib.path import Path
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mpl
from compAnalysisBase import CompAnalysisBase
class SIMCompAnalysis(CompAnalysisBase):
def __init__(self, leavesQueue, criteria, sifs):
self.queue = leavesQueue
self.sifs = sifs
self.crit = criteria
def printQueueItems(self, items):
self.queue.printTitle()
for i in sorted(items):
self.queue.printQueueItem(i)
def getItemNodeDict(self, items, queue):
qdict = queue.getQueueDict()
return dict([(i, qdict[i]) for i in items])
def calcAlphaVal(self, sif, item):
vals = len(self.dataDicts[0][0][sif][item])
if vals > 1000:
return 0.1
else:
return 1
class BoxCompPlot(SIMCompAnalysis):
def createCompBoxPlot(self, items, errType, fig):
self.items = items
self.errType = errType
self.createDataDictAndEstBoxPlot()
self.createDataStrBoxPlot()
self.createFigure(fig)
def createDataStrBoxPlot(self):
dd = self.getItemNodeDict(self.items, self.queue)
optKey = self.getLeavesOptKey()
data = [dd, optKey, 'Number in Queue', '']
self.dataStr = [data]
def getLeavesOptKey(self):
return sorted(self.est.items(), key=lambda x: abs(x[1]))[0][0]
def createDataDictAndEstBoxPlot(self):
dataDict = {s: {} for s in self.sifs}
est = {i: {} for i in self.items}
dd = self.getItemNodeDict(self.items, self.queue)
for i in self.items:
node = dd[i]
errs, est[i] = self.getNodeErrsEst(node)
for s in self.sifs:
dataDict[s][i] = errs[s]
self.est = {i: est[i][self.crit[1]] for i in self.items}
self.dataDicts = [dataDict]
def getNodeErrsEst(self, node):
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
est = adn.getEstimates()[self.crit[0]]
errs = adn.getErrors()[self.errType]
return errs, est
class HistCompPlot(SIMCompAnalysis):
def createCompHistPlot(self, items, errType, xlim, fig):
self.fig = fig
self.items = items
self.errType = errType
self.xlim = xlim
self.createDataStr()
self.createDataDict()
self.createFigure()
def createDataStr(self):
dd = self.getItemNodeDict(self.items.keys(), self.queue)
xlabel = 'errors "{0}"'.format(self.errType)
data = [dd, None, xlabel, 'hist']
self.dataStr = [data]
def createDataDict(self):
data = {s: {} for s in self.sifs}
for i in self.items.keys():
node = self.dataStr[0][0][i]
errs = self.getNodeErrors(node)
for s in self.sifs:
data[s][i] = errs[s]
self.dataDicts = [data]
def getNodeErrors(self, node):
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
errs = adn.getErrors()[self.errType]
return errs
def setAxesXlim(self):
for ax in self.axes:
ax.set_xlim(self.xlim)
def setAxesYlim(self):
ymin, ymax = 10e16, 10e-16
for ax in self.axes:
y1, y2 = ax.get_ylim()
ymin = y1 if y1 < ymin else ymin
ymax = y2 if y2 > ymax else ymax
for ax in self.axes:
ax.set_ylim((ymin, ymax))
def setLegend(self, handles):
text = 'Node: '
labels = [text + str(i) for i in sorted(handles.keys())]
handles = [handles[i] for i in sorted(handles.keys())]
self.axes[0].legend(handles, labels, bbox_to_anchor=(1.02, 1),
loc=2, borderaxespad=0)
def createFigure(self):
self.axes = []
self.createFigureAxes()
handles = {}
for k in range(len(self.axes)):
s = self.sifs[k]
for i in self.items.keys():
n, b, p = self.axes[k].hist(
self.dataDicts[0][s][i],
self.items[i],
normed=True,
alpha=0.5)
handles[i] = p[0]
self.setAxesXlim()
self.setAxesYlim()
self.setLegend(handles)
self.setXlabels()
self.printQueueItems(self.items.keys())
class CorrCompPlot(SIMCompAnalysis):
def createCompCorrPlot(self, items, quantityType, ylim, fig):
self.fig = fig
self.items = items
self.qt = quantityType
self.ylim = ylim
self.createDataStr()
self.createDataDict()
self.createFigure()
def createDataStr(self):
dd = self.getItemNodeDict(self.items, self.queue)
data = [dd, None, 'analytical values', 'analysis vs analytical']
self.dataStr = [data]
def createDataDict(self):
dataX = {s: {} for s in self.sifs}
dataY = {s: {} for s in self.sifs}
for i in self.items:
node = self.dataStr[0][0][i]
anSol, res = self.getNodeParams(node)
for s in self.sifs:
dataX[s][i] = anSol[s]
dataY[s][i] = res[s]
self.dataDicts = [[dataX, dataY]]
def getNodeParams(self, node):
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
anSol = adn.getAnSol()
res = adn.getDataByType(self.qt)
return anSol, res
def getReferenceXYVals(self):
minV = {s: 10e16 for s in self.sifs}
maxV = {s: -10e16 for s in self.sifs}
for s in self.sifs:
for i in self.items:
mn = min(self.dataDicts[0][0][s][i])
mx = max(self.dataDicts[0][0][s][i])
minV[s] = mn if mn < minV[s] else minV[s]
maxV[s] = mx if mx > maxV[s] else maxV[s]
if self.qt == 'results':
refX = {s: [minV[s], maxV[s]] for s in self.sifs}
return refX, refX
elif self.qt in ['difference', 'normedDiff']:
refX = {s: [max(0, minV[s]), maxV[s]] for s in self.sifs}
refY = {s: [0, 0] for s in self.sifs}
return refX, refY
else:
raise NotImplementedError
def getXYVals(self, sif, item):
if self.qt == 'results':
X = self.dataDicts[0][0][sif][item]
Y = self.dataDicts[0][1][sif][item]
elif self.qt in ['difference', 'normedDiff']:
X = np.abs(self.dataDicts[0][0][sif][item])
Y = self.dataDicts[0][1][sif][item]
else:
raise NotImplementedError
return X, Y
def createPlot(self):
self.handles = {}
refX, refY = self.getReferenceXYVals()
for k in range(len(self.axes)):
s = self.sifs[k]
for i in self.items:
alpha = self.calcAlphaVal(s, i)
X, Y = self.getXYVals(s, i)
p, = self.axes[k].plot(X, Y, '.', alpha=alpha)
self.handles[i] = p
r, = self.axes[k].plot(refX[s], refY[s], 'k', lw=1.5)
self.handles['reference'] = r
def setXLim(self):
refX, refY = self.getReferenceXYVals()
for k in range(len(self.axes)):
s = self.sifs[k]
self.axes[k].set_xlim(refX[s])
def setLegend(self):
text = 'Node: '
labels = [text + str(i) for i in self.items]
handles = [self.handles[i] for i in self.items]
if 'reference' in self.handles.keys():
handles.append(self.handles['reference'])
labels.append('ref line')
self.axes[0].legend(handles, labels, bbox_to_anchor=(1.02, 1),
loc=2, borderaxespad=0)
def setYLim(self):
if isinstance(self.ylim, (list, tuple)):
for ax in self.axes:
ax.set_ylim(self.ylim)
def createFigure(self):
self.axes = []
self.createFigureAxes()
self.createPlot()
self.setXLim()
self.setLegend()
self.printQueueItems(self.items)
self.setYLim()
class RangeCompPlot(SIMCompAnalysis):
def createCompRangePlot(self, items, opts, fig):
self.fig = fig
self.items = items
self.opts = opts
self.createDataStr()
self.createDataDict()
self.createFigure()
def createDataStr(self):
self.dataStr = []
qdict = self.queue.getQueueDict()
for k in sorted(self.items.keys()):
optSim = self.getOptSim(qdict[k])
data = [{k: qdict[k]}, optSim, 'angles',
self.getSubplotTitle(qdict[k])]
self.dataStr.append(data)
def getOptSim(self, node):
if self.opts['optSim']:
sims = node.getSuccessfulMembers()
optSim = pf.getSimIdsWithLowestErrorPerDH(
sims, self.crit[0], self.crit[1]).values()[0][0]
return optSim
else:
return None
def createDataDict(self):
self.dataDicts = []
for item in self.dataStr:
node = item[0].values()[0]
self.dataDicts.append(self.getNodeParams(node))
def getNodeParams(self, node):
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
angles = adn.getAngles()
results = adn.getResults()
ansol = adn.getAnSol()
errors = adn.getErrors()[self.opts['errors']]
return angles, results, ansol, errors
def createSlices(self):
self.slices = []
i = 0
for k in sorted(self.items.keys()):
numInt = self.items[k]
angles = self.dataDicts[i][0]
sl = self.createSliceIndices(angles, numInt)
self.slices.append(sl)
i += 1
def createSliceIndices(self, vals, numInts):
intLen = (max(vals) - min(vals)) / float(numInts)
indices = [[] for i in range(numInts)]
for x in vals:
i = int(x / intLen)
if i < numInts - 1:
indices[i].append(x)
else:
indices[-1].append(x)
if [] in indices:
raise ValueError('Try reducing the number of intervals.')
sliceInd = [[] for i in range(numInts)]
for i in range(numInts):
minVal = indices[i][0]
maxVal = indices[i][-1]
ind0 = np.where(vals == minVal)[0][0]
ind1 = np.where(vals == maxVal)[-1][-1] + 1
sliceInd[i].append(ind0)
sliceInd[i].append(ind1)
sliceInd[-1][1] += 1
return sliceInd
def createFigure(self):
self.axes = []
self.createFigureAxes()
if self.opts['range']:
self.createSlices()
self.plotRangeArea()
if self.opts['dataPoints']:
self.createDataPointsPlot()
if self.opts['analytical']:
self.createAnSolPlot()
if self.opts['optSim']:
self.createOptSimPlot()
self.setXLim()
self.createLegend()
self.setSubplotTitles()
self.setYlimits()
def createLegend(self):
handles = []
labels = []
h, l = self.axes[0].get_legend_handles_labels()
ind = len(self.dataStr) - 1
self.axes[ind].legend(h, l, bbox_to_anchor=(1, 1.02), loc=2)
def setXLim(self):
for n in range(len(self.dataStr)):
i = self.getItemKey(n)
for sif in self.sifs:
ax = self.getAxes(i, sif)
angles = self.dataDicts[n][0]
ax.set_xlim((min(angles), max(angles)))
def createOptSimPlot(self):
for n in range(len(self.dataDicts)):
i = self.getItemKey(n)
ad = dp.AnalysisData(self.dataStr[n][1])
ad.calcAnSol()
ad.calculateStats()
angles = ad.getAngles()
for sif in self.sifs:
ax = self.getAxes(i, sif)
res = ad.getResults()[sif]
ax.plot(angles, res, 'lime', lw=1,
label='optSim')
def createDataPointsPlot(self):
for n in range(len(self.dataStr)):
i = self.getItemKey(n)
for sif in self.sifs:
angles = self.dataDicts[n][0]
ax = self.getAxes(i, sif)
for dt in self.opts['data']:
dInd, color = self.getDataIndAndColor(dt)
data = self.dataDicts[n][dInd][sif]
alpha = self.calcAlphaValRP(n)
ax.plot(angles, data,
linestyle='-', marker='.',
color=color, alpha=alpha,
label=dt)
def calcAlphaValRP(self, n):
vals = len(self.dataDicts[n][0])
if vals > 1000:
return 0.05
else:
return 0.3
def createAnSolPlot(self):
for n in range(len(self.items.keys())):
i = self.getItemKey(n)
for sif in self.sifs:
ax = self.getAxes(i, sif)
angles = self.dataDicts[n][0]
anSol = self.dataDicts[n][2][sif]
ax.plot(angles, anSol, 'k', lw=2,
label='analytical')
def getAxes(self, item, sif):
itemInd = sorted(self.items.keys()).index(item)
itemLen = len(self.items)
ax = self.axes[itemLen * self.sifs.index(sif) + itemInd]
return ax
def getItemKey(self, n):
return sorted(self.items.keys())[n]
def plotRangeArea(self):
for n in range(len(self.items)):
i = self.getItemKey(n)
for sif in self.sifs:
axes = self.getAxes(i, sif)
self.plotRangeAreaPerAxes(axes, n, sif)
def getDataIndAndColor(self, dataType):
dataInds = {'results': 1, 'errors': 3}
colors = {'results': 'b', 'errors': 'r'}
return dataInds[dataType], colors[dataType]
def createVerts(self, slices, angles, values, func):
x, y, verts = [], [], []
valsl = [values[s[0] - 1 if s[0] > 0 else 0:s[1]] for s in slices]
angsl = [angles[s[0] - 1 if s[0] > 0 else 0:s[1]] for s in slices]
for a in angsl:
x.append(a[0])
x.append(a[-1])
for v in valsl:
y.append(func(v))
y.append(func(v))
verts = [[xi, yi] for xi, yi in zip(x, y)]
return verts
def createVerts2(self, slices, angles, values, func):
x, y, verts = [], [], []
valsl = [values[s[0]:s[1]] for s in slices]
angsl = [angles[s[0]:s[1]] for s in slices]
for an, va in zip(angsl, valsl):
y.append(func(va))
print va, y
print np.where(va == y[-1])
ind = np.where(va == y[-1])[0][0]
x.append(an[ind])
x.append(angles[-1])
x.insert(0, angles[0])
yavg = 0.5 * (y[0] + y[-1])
y.append(yavg)
y.insert(0, yavg)
verts = [[xi, yi] for xi, yi in zip(x, y)]
return verts
def plotRangeAreaPerAxes(self, axes, itemInd, sif):
vertMethods = {1: self.createVerts, 2: self.createVerts2}
vertFunc = vertMethods[self.opts['rangeType']]
slices = self.slices[itemInd]
angles = self.dataDicts[itemInd][0]
for dt in self.opts['data']:
dInd, color = self.getDataIndAndColor(dt)
values = self.dataDicts[itemInd][dInd][sif]
verts1 = vertFunc(slices, angles, values, min)
verts2 = vertFunc(slices, angles, values, max)[::-1]
verts = verts1 + verts2 + [verts2[-1]]
codes = self.createClosedPathCodes(verts)
p = Path(verts, codes)
patch = mpl.patches.PathPatch(
p,
facecolor=color,
edgecolor='none',
alpha=0.2,
label=dt +
' range')
axes.add_patch(patch)
patch = mpl.patches.PathPatch(p, edgecolor=color,
fill=False, lw=0.75, alpha=0.6)
axes.add_patch(patch)
def createClosedPathCodes(self, verts):
codes = [Path.MOVETO]
for i in range(len(verts) - 2):
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
return codes
class BoundsCompPlot(SIMCompAnalysis):
def createBoundsPlot(self, items, targets, fig, tol=0.1, iterLim=100):
self.items = items
self.targets = targets
self.fig = fig
self.iterLim = iterLim
self.tol = tol
self.createDataStr()
self.createDataDicts()
self.printStats()
self.createFigure()
def createDataStr(self):
self.dataStr = []
qdict = self.queue.getQueueDict()
for i in self.items:
dd = [{i: qdict[i]}, None, 'angles',
self.getSubplotTitle(qdict[i])]
self.dataStr.append(dd)
def createDataDicts(self):
self.dataDicts = []
for n in range(len(self.items)):
i = self.items[n]
log = {s: {t: {'sigma': [], 'pip': []}
for t in self.targets.keys()}
for s in self.sifs}
node = self.dataStr[n][0][i]
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
sigmaUp = 2 * adn.getAnSolParams()['sigma']
sigmaLow = 0
for s in self.sifs:
for t in self.targets.keys():
log[s][t] = self.findSigmaBound(
adn, sigmaUp, sigmaLow, s,
self.targets[t], log[s][t])
self.dataDicts.append([adn, log])
def printStats(self):
for n in range(len(self.dataStr)):
i = self.items[n]
print self.dataStr[n][3]
log = self.dataDicts[n][1]
for s in self.sifs:
sigmas, bounds, its = [], [], []
for t in log[s].keys():
u = log[s][t]
sigmas.append(u['sigma'][-1])
bounds.append(u['pip'][-1])
its.append(len(u['sigma']))
info = '{0}sigma=[{1:.4}, {2:.4}] | bounds=[{3:.4}%, {4:.4}%] | iterations=[{5}, {6}]'.format(
' {0} '.format(s), sigmas[0], sigmas[1], bounds[0], bounds[1], its[0], its[1])
print info
def createFigure(self):
self.axes = []
self.createFigureAxes()
self.createPlot()
self.setXLimits()
self.setYlimits()
self.setSubplotTitles()
def setXLimits(self):
for n in range(len(self.dataStr)):
i = self.items[n]
adn = self.dataDicts[n][0]
a = adn.getAngles()
lims = (min(a), max(a))
for s in self.sifs:
ax = self.getAxes(i, s)
ax.set_xlim(lims)
def getAxes(self, item, sif):
itemLen = len(self.items)
itemInd = self.items.index(item)
ax = self.axes[itemLen * self.sifs.index(sif) + itemInd]
return ax
def getAlphaVal(self, item):
n = self.items.index(item)
adn = self.dataDicts[n][0]
if len(adn.getAngles()) > 1000:
return 0.1
else:
return 1
def createPlot(self):
for n in range(len(self.dataStr)):
i = self.items[n]
adn = self.dataDicts[n][0]
logs = self.dataDicts[n][1]
alpha = self.getAlphaVal(i)
for s in self.sifs:
ax = self.getAxes(i, s)
sigmaUpper = logs[s]['upper']['sigma'][-1]
sigmaLower = logs[s]['lower']['sigma'][-1]
ins, outs = self.getInOutPoints(adn,
sigmaLower, sigmaUpper, s)
ax.plot(ins[0], ins[1], 'b.',
label='inside bounds', alpha=alpha)
ax.plot(outs[0], outs[1], 'r.',
label='outside bounds', alpha=alpha)
angles = adn.getAngles()
anSol = adn.getAnSol()[s]
ax.plot(angles, anSol, 'k', lw=1.5,
label='analytical')
lowerBound = adn.calcSIFsForSigmaAndSIF(
sigmaLower, s)
upperBound = adn.calcSIFsForSigmaAndSIF(
sigmaUpper, s)
ax.plot(angles, upperBound, 'lime', lw=1.5,
label='bounds')
ax.plot(angles, lowerBound, 'lime', lw=1.5)
def findSigmaBound(self, adn, sigmaUp, sigmaLow,
sif, target, log):
sigma = 0.5 * (sigmaUp + sigmaLow)
pip = self.getPercentPointsInPoly(adn, sigma, sif)
log['pip'].append(pip)
log['sigma'].append(sigma)
if ((pip >= target - self.tol and pip <= target + self.tol) or
(len(log['sigma']) == self.iterLim)):
return log
elif pip < target - self.tol:
sigmaLow = sigma
return self.findSigmaBound(adn, sigmaUp, sigmaLow,
sif, target, log)
elif pip > target + self.tol:
sigmaUp = sigma
return self.findSigmaBound(adn, sigmaUp, sigmaLow,
sif, target, log)
else:
raise ValueError('unexpected condition reached')
def getPercentPointsInPoly(self, adn, sigma, sif):
allnum, numin, numout = self.countPointInOutOfContour(
adn, sigma, sif)
assert abs(numin + numout - allnum) < 10e-8
return float(numin) / float(allnum) * 100
def countPointInOutOfContour(self, adn, sigma, sif):
tfl = self.getInOutOfContour(adn, sigma, sif)
numin = np.sum(tfl)
allnum = len(tfl)
numout = allnum - numin
return allnum, numin, numout
def getInOutOfContour(self, adn, sigma, sif):
angles = adn.getAngles()
results = abs(adn.getResults()[sif])
points = [[xi, yi] for xi, yi in zip(angles, results)]
yVals = abs(np.array(adn.calcSIFsForSigmaAndSIF(sigma, sif)))
return self.getInOutPointsArray(angles, yVals, points)
def getInOutPointsArray(self, angles, yVals, points):
path = Path(self.createVertsForPolyPath(angles, yVals))
return path.contains_points(points, radius=0)
def getInOutPoints(self, adn, sigmaLow, sigmaUp, sif):
inoutLow = self.getInOutOfContour(adn, sigmaLow, sif)
inoutUp = self.getInOutOfContour(adn, sigmaUp, sif)
angles = adn.getAngles()
res = adn.getResults()[sif]
inAngles, inVals = [], []
outAngles, outVals = [], []
for i in range(len(inoutUp)):
if inoutLow[i] or not inoutUp[i]:
outAngles.append(angles[i])
outVals.append(res[i])
else:
inAngles.append(angles[i])
inVals.append(res[i])
return [[inAngles, inVals], [outAngles, outVals]]
def createVertsForPolyPath(self, x, y):
verts = [[xi, yi] for xi, yi in zip(x, y)]
verts.insert(0, [verts[0][0], -10e16])
verts.append([verts[-1][0], -10e16])
return verts
| mit |
slarosa/QGIS | python/plugins/sextante/algs/MeanAndStdDevPlot.py | 3 | 3304 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MeanAndStdDevPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from PyQt4.QtCore import *
from qgis.core import *
from sextante.parameters.ParameterTable import ParameterTable
from sextante.parameters.ParameterTableField import ParameterTableField
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.outputs.OutputHTML import OutputHTML
from sextante.tools import *
from sextante.core.QGisLayers import QGisLayers
class MeanAndStdDevPlot(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NAME_FIELD = "NAME_FIELD"
MEAN_FIELD = "MEAN_FIELD"
STDDEV_FIELD = "STDDEV_FIELD"
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = QGisLayers.getObjectFromUri(uri)
namefieldname = self.getParameterValue(self.NAME_FIELD)
meanfieldname = self.getParameterValue(self.MEAN_FIELD)
stddevfieldname = self.getParameterValue(self.STDDEV_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, namefieldname, meanfieldname, stddevfieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[meanfieldname], width,
color='r',
yerr=values[stddevfieldname],
error_kw=dict(ecolor='yellow'))
plt.xticks(ind, values[namefieldname], rotation = 45)
plotFilename = output +".png"
lab.savefig(plotFilename)
f = open(output, "w")
f.write("<img src=\"" + plotFilename + "\"/>")
f.close()
def defineCharacteristics(self):
self.name = "Mean and standard deviation plot"
self.group = "Graphics"
self.addParameter(ParameterTable(self.INPUT, "Input table"))
self.addParameter(ParameterTableField(self.NAME_FIELD, "Category name field", self.INPUT,ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterTableField(self.MEAN_FIELD, "Mean field", self.INPUT))
self.addParameter(ParameterTableField(self.STDDEV_FIELD, "StdDev field", self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, "Output"))
| gpl-2.0 |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_topo_compare_conditions.py | 3 | 2175 | """
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and
auditory responses is created. Both conditions
are then accessed by their respective names to
create a sensor layout plot of the related
evoked responses.
"""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
# bad channels in raw.info['bads'] will be automatically excluded
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'yellow', 'green'
title = 'MNE sample data - left vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title)
plt.show()
| bsd-3-clause |
rupakc/Kaggle-Compendium | Santas Stolen Sleigh/SantaUtil.py | 1 | 6924 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 23:21:29 2016
Defines a set of utility functions to be used for prediction
@author: Rupak Chakraborty
"""
import math
from trip import Trip
from gift import Gift
import random
import time
import pandas as pd
import operator
RADIUS_EARTH = 6773
NORTH_POLE_LAT = 90
NORTH_POLE_LONG = 0
EMPTY_SLEIGH_WEIGHT = 10
SLEIGH_CAPACITY = 1000
random.seed(time.time())
gift_filename = "Santa's Stolen Sleigh/gifts.csv"
"""
Calculates the haversine distance between two given points
The two points are the values of the latitude and longitude
in degrees
Params:
--------
lat_first - Latitude of the first point
long_first - Longitude of the first point
lat_second - Latitude of second point
long_second - Longitude of the second point
Returns:
---------
The haversine distance between the two given points i.e. a float
"""
def haversineDistance(lat_first,long_first,lat_second,long_second):
lat_first = math.radians(lat_first)
long_first = math.radians(long_first)
lat_second = math.radians(lat_second)
long_second = math.radians(long_second)
sine_squared_lat = math.pow(math.sin((lat_first-lat_second)/2.0),2.0)
sine_squared_long = math.pow(math.sin((long_first-long_second)/2.0),2.0)
cos_lat_term = math.cos(lat_first)*math.cos(lat_second)*sine_squared_long
total_term = cos_lat_term + sine_squared_lat
distance = 2*RADIUS_EARTH*math.asin(math.sqrt(total_term))
return distance
"""
Defines the fitness function for the trip list i.e. all deliveries
The total fitness is defined as the weighted sum of distances
Params:
--------
trip_list: A List of trips which Santa needs to take (A list containing the trip object)
Returns:
---------
Total Cost of the given trip list (i.e. Fitness)
"""
def tripFitness(trip_list):
total_cost = 0
for trip in trip_list:
total_cost = total_cost + trip.trip_cost
return total_cost
"""
Given a list of gifts calculates the cost of the trip (i.e. Weighted Distance)
Params:
--------
gift_list: A list of gifts in the order in which they have to be delivered
Returns:
---------
Cost of the trip with the given order of gifts (i.e. A Floating point number)
"""
def tripCost(gift_list):
gift_size = len(gift_list)
initial_gift_weight = tripWeightUtil(gift_list,0,gift_size-1)
weighted_distance = initial_gift_weight*haversineDistance(NORTH_POLE_LAT,NORTH_POLE_LONG,gift_list[0].latitude,gift_list[0].longitude)
for i in range(gift_size-1):
remaining_weight = tripWeightUtil(gift_list,i+1,gift_size-1)
distance = haversineDistance(gift_list[i].latitude,gift_list[i].longitude,gift_list[i+1].latitude,gift_list[i+1].longitude)
weighted_distance = weighted_distance + remaining_weight*distance
returning_distance = haversineDistance(gift_list[gift_size-1].latitude,gift_list[gift_size-1].longitude,NORTH_POLE_LAT,NORTH_POLE_LONG)
weighted_distance = weighted_distance + EMPTY_SLEIGH_WEIGHT*returning_distance
return weighted_distance
"""
Utility function to calculate the cumulative weight of gifts in a given range
Both ends of the range are included
Params:
--------
gift_list : List of gift objects
start_index : Starting index for gift list
end_index : Ending index of the gift list
Returns:
---------
Returns the sum of weights in a given range
"""
def tripWeightUtil(gift_list,start_index,end_index):
total_weight = 0
while start_index <= end_index:
total_weight = total_weight + gift_list[start_index].weight
start_index = start_index + 1
return total_weight
"""
Applies the mutation operator on trip list i.e. swaps two trips
Params:
-------
trip_list: List containing the trips taken by Santa
Returns:
--------
A new list containing the trip list with values swapped
"""
def mutateTripList(trip_list):
i,j = generateSwapIndices(len(trip_list))
temp = trip_list[i]
trip_list[i] = trip_list[j]
trip_list[j] = temp
return trip_list
"""
Applies the mutation operator on the gift list i.e. swaps two gifts in a list
Params:
-------
gift_list: List containing the gifts taken by Santa
Returns:
--------
A new list containing the gift list with values swapped
"""
def mutateGiftList(gift_list):
i,j = generateSwapIndices(len(gift_list))
temp = gift_list[i]
gift_list[i] = gift_list[j]
gift_list[j] = temp
return gift_list
"""
Utility function to generate two distinct random integers from zero to a given range
Params:
--------
max_size: Integer containing the maximum limit for generation of the random integers
Returns:
--------
Two distinct random integers between 0 and a given max_size
"""
def generateSwapIndices(max_size):
a = random.randint(0,max_size-1)
b = random.randint(0,max_size-1)
while b != a:
b = random.randint(0,max_size)
return a,b
"""
Returns the dataFrame containing the gift information
Params:
-------
String containing the filename from which the information is to be extracted
Returns:
--------
Pandas Dataframe object containing the gift information
"""
def getGiftList(filename):
giftFrame = pd.read_csv(filename)
gift_list = list([])
for i in range(len(giftFrame)):
gift_series = giftFrame.iloc[i]
gift = Gift(gift_series.GiftId,gift_series.Latitude,gift_series.Longitude,gift_series.Weight)
gift_list.append(gift)
return gift_list;
"""
Sorts a given map by the values and returns a list containing th sorted tuples
"""
def sortMapByValues(map_to_sort):
sorted_map = sorted(map_to_sort.items(), key=operator.itemgetter(1),reverse=False)
return sorted_map
"""
Sorts the given population by its fitness value
Params:
-------
initial_population: List containing the initial population
Returns:
--------
List of tuples containing the indices of the initial population and its fitness
"""
def sortPopulationByFitness(initial_population):
i = 0;
fitness_population_map = {}
for trip_gene in initial_population:
fitness_population_map[i] = tripFitness(trip_gene)
i = i + 1
ordered_fitness_list = sortMapByValues(fitness_population_map)
return ordered_fitness_list
"""
Given all the trips in a list returns the one with the maximum cost and its index
Params:
---------
trip_list: List of trips to be taken for delivery
Returns:
--------
The trip with the maximum cost and its corresponding index
"""
def maximumTripCost(trip_list):
index = 0
max_trip = trip_list[0]
for i,trip in enumerate(trip_list):
if trip.trip_cost > max_trip:
max_trip = trip.trip_cost
index = i
return index,trip
| mit |
nesterione/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
shuangshuangwang/spark | python/pyspark/sql/tests/test_pandas_udf.py | 1 | 10216 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from pyspark.sql.types import DoubleType, StructType, StructField, LongType
from pyspark.sql.utils import ParseException, PythonException
from pyspark.rdd import PythonEvalType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid return type.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid return type'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid return type'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
PythonException,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
PythonException,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
PythonException,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
PythonException,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
PythonException,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
def test_pandas_udf_detect_unsafe_type_conversion(self):
import pandas as pd
import numpy as np
values = [1.0] * 3
pdf = pd.DataFrame({'A': values})
df = self.spark.createDataFrame(pdf).repartition(1)
@pandas_udf(returnType="int")
def udf(column):
return pd.Series(np.linspace(0, 1, len(column)))
# Since 0.11.0, PyArrow supports the feature to raise an error for unsafe cast.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": True}):
with self.assertRaisesRegexp(Exception,
"Exception thrown when converting pandas.Series"):
df.select(['A']).withColumn('udf', udf('A')).collect()
# Disabling Arrow safe type check.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
df.select(['A']).withColumn('udf', udf('A')).collect()
def test_pandas_udf_arrow_overflow(self):
import pandas as pd
df = self.spark.range(0, 1)
@pandas_udf(returnType="byte")
def udf(column):
return pd.Series([128] * len(column))
# When enabling safe type check, Arrow 0.11.0+ disallows overflow cast.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": True}):
with self.assertRaisesRegexp(Exception,
"Exception thrown when converting pandas.Series"):
df.withColumn('udf', udf('id')).collect()
# Disabling safe type check, let Arrow do the cast anyway.
with self.sql_conf({"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
df.withColumn('udf', udf('id')).collect()
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
kkouer/PcGcs | Lib/site-packages/numpy/core/code_generators/ufunc_docstrings.py | 57 | 85797 | # Docstrings for generated ufuncs
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10, 101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
y : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
y : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent elementwise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function that
has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine elementwise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and division
by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude
1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
exp : calculate x**p.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than the formula ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values elementwise.
This function returns the absolute values (positive magnitude) of the data
in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of
`floor` such that `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the Python modulo operator `%`.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Modulo operation where the quotient is `floor(x1/x2)`.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors is
bound by conventions. In `fmod`, the sign of the remainder is the sign of
the dividend. In `remainder`, the sign of the divisor does not affect the
sign of the result.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finite-ness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is finite; otherwise the values are False (element
is either positive infinity, negative infinity or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if the second argument is also supplied when `x` is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Return a bool-type array, the same shape as `x`, True where ``x ==
+/-inf``, False everywhere else.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or bool-type ndarray
For scalar input, the result is a new boolean with value True
if the input is positive or negative infinity; otherwise the value
is False.
For array input, the result is a boolean array with the same
shape as the input and the values are True where the
corresponding element of the input is positive or negative
infinity; elsewhere the values are False. If a second argument
was supplied the result is stored there. If the type of that array
is a numeric type the result is represented as zeros and ones, if
the type is boolean then as False and True, respectively.
The return value `y` is then a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for Not a Number (NaN), return result as a bool array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is NaN; otherwise the value is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is NaN; otherwise the values are False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log10`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 elementwise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x elementwise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing
the element-wise maxima. If one of the elements being
compared is a nan, then that element is returned. If
both elements are nans then the first is returned. The
latter distinction is important for complex nans,
which are defined as at least one of the real or
imaginary parts being a nan. The net effect is that
nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
element-wise minimum
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
element-wise minimum that propagates nans.
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
fmin(x1, x2[, out])
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Returns an array with the negative of each element of the original array.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ones_like',
"""
Returns an array of ones with the same shape and type as a given array.
Equivalent to ``a.copy().fill(1)``.
Please refer to the documentation for `zeros_like` for further details.
See Also
--------
zeros_like, ones
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ones_like(a)
array([[1, 1, 1],
[1, 1, 1]])
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division.
For integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right by removing `x2` bits at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x: array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
and it must be of the right shape to hold the output.
See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1: array_like
Values to change the sign of.
x2: array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next representable floating-point value after x1 in the direction
of x2 element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1: array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry
(the mathematical study of triangles). Consider a circle of radius
1 centered on the origin. A ray comes in from the :math:`+x` axis,
makes an angle at the origin (measured counter-clockwise from that
axis), and departs from the origin. The :math:`y` coordinate of
the outgoing ray's intersection with the unit circle is the sine
of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to
+1 for :math:`\\pi / 2.` The function has zeroes where the angle is
a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and
:math:`2\\pi` are negative. The numerous properties of the sine and
related functions are included in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
(A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.)
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or
``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making ``//``
and ``/`` equivalent operators. The default floor division operation of
``/`` can be replaced by true division with
``from __future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
| gpl-3.0 |
bdestombe/flopy-1 | flopy/utils/mflistfile.py | 1 | 25207 | """
This is a set of classes for reading budget information out of MODFLOW-style
listing files. Cumulative and incremental budgets are returned as numpy
recarrays, which can then be easily plotted.
"""
import collections
import os
import re
import sys
from datetime import timedelta
import numpy as np
from ..utils.utils_def import totim_to_datetime
class ListBudget(object):
"""
MODFLOW family list file handling
Parameters
----------
file_name : str
the list file name
budgetkey : str
the text string identifying the budget table. (default is None)
timeunit : str
the time unit to return in the recarray. (default is 'days')
Notes
-----
The ListBudget class should not be instantiated directly. Access is
through derived classes: MfListBudget (MODFLOW), SwtListBudget (SEAWAT)
and SwrListBudget (MODFLOW with the SWR process)
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incremental, cumulative = mf_list.get_budget()
>>> df_in, df_out = mf_list.get_dataframes(start_datetime="10-21-2015")
"""
def __init__(self, file_name, budgetkey=None, timeunit='days'):
# Set up file reading
assert os.path.exists(file_name)
self.file_name = file_name
if sys.version_info[0] == 2:
self.f = open(file_name, 'r')
elif sys.version_info[0] == 3:
self.f = open(file_name, 'r', encoding='ascii', errors='replace')
self.tssp_lines = 0
# Assign the budgetkey, which should have been overriden
if budgetkey is None:
self.set_budget_key()
else:
self.budgetkey = budgetkey
self.totim = []
self.timeunit = timeunit
self.idx_map = []
self.entries = []
self.null_entries = []
self.time_line_idx = 20
if timeunit.upper() == 'SECONDS':
self.timeunit = 'S'
self.time_idx = 0
elif timeunit.upper() == 'MINUTES':
self.timeunit = 'M'
self.time_idx = 1
elif timeunit.upper() == 'HOURS':
self.timeunit = 'H'
self.time_idx = 2
elif timeunit.upper() == 'DAYS':
self.timeunit = 'D'
self.time_idx = 3
elif timeunit.upper() == 'YEARS':
self.timeunit = 'Y'
self.time_idx = 4
else:
raise Exception('need to reset time_idxs attribute to '
'use units other than days and check usage of '
'timedelta')
# Fill budget recarrays
self._load()
self._isvalid = False
if len(self.idx_map) > 0:
self._isvalid = True
# Close the open file
self.f.close()
# return
return
def set_budget_key(self):
raise Exception('Must be overridden...')
def isvalid(self):
"""
Get a boolean indicating if budget data are available in the file.
Returns
-------
out : boolean
Boolean indicating if budget data are available in the file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> valid = mf_list.isvalid()
"""
return self._isvalid
def get_record_names(self):
"""
Get a list of water budget record names in the file.
Returns
-------
out : list of strings
List of unique text names in the binary file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> names = mf_list.get_record_names()
"""
if not self._isvalid:
return None
return self.inc.dtype.names
def get_times(self):
"""
Get a list of unique water budget times in the list file.
Returns
-------
out : list of floats
List contains unique water budget simulation times (totim) in list file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> times = mf_list.get_times()
"""
if not self._isvalid:
return None
return self.inc['totim'].tolist()
def get_kstpkper(self):
"""
Get a list of unique stress periods and time steps in the list file
water budgets.
Returns
----------
out : list of (kstp, kper) tuples
List of unique kstp, kper combinations in list file. kstp and
kper values are zero-based.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> kstpkper = mf_list.get_kstpkper()
"""
if not self._isvalid:
return None
kstpkper = []
for kstp, kper in zip(self.inc['time_step'],
self.inc['stress_period']):
kstpkper.append((kstp, kper))
return kstpkper
def get_incremental(self, names=None):
"""
Get a recarray with the incremental water budget items in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarray
Numpy recarray with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incremental = mf_list.get_incremental()
"""
if not self._isvalid:
return None
if names is None:
return self.inc
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.inc[names].view(np.recarray)
def get_cumulative(self, names=None):
"""
Get a recarray with the cumulative water budget items in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarray
Numpy recarray with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> cumulative = mf_list.get_cumulative()
"""
if not self._isvalid:
return None
if names is None:
return self.cum
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.cum[names].view(np.recarray)
def get_budget(self, names=None):
"""
Get the recarrays with the incremental and cumulative water budget items
in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarrays
Numpy recarrays with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period. A
separate recarray is returned for the incremental and cumulative
water budget entries.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> budget = mf_list.get_budget()
"""
if not self._isvalid:
return None
if names is None:
return self.inc, self.cum
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.inc[names].view(np.recarray), self.cum[names].view(
np.recarray)
def get_data(self, kstpkper=None, idx=None, totim=None, incremental=False):
"""
Get water budget data from the list file for the specified conditions.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
(default is None).
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values. (default is None).
totim : float
The simulation time. (default is None).
incremental : bool
Boolean flag used to determine if incremental or cumulative water
budget data for the specified conditions will be returned. If
incremental=True, incremental water budget data will be returned.
If incremental=False, cumulative water budget data will be
returned. (default is False).
Returns
-------
data : numpy recarray
Array has size (number of budget items, 3). Recarray names are 'index',
'value', 'name'.
See Also
--------
Notes
-----
if both kstpkper and totim are None, will return the last entry
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import flopy
>>> mf_list = flopy.utils.MfListBudget("my_model.list")
>>> data = mf_list.get_data(kstpkper=(0,0))
>>> plt.bar(data['index'], data['value'])
>>> plt.xticks(data['index'], data['name'], rotation=45, size=6)
>>> plt.show()
"""
if not self._isvalid:
return None
ipos = None
if kstpkper is not None:
try:
ipos = self.get_kstpkper().index(kstpkper)
except:
pass
elif totim is not None:
try:
ipos = self.get_times().index(totim)
except:
pass
elif idx is not None:
ipos = idx
else:
ipos = -1
if ipos is None:
print('Could not find specified condition.')
print(' kstpkper = {}'.format(kstpkper))
print(' totim = {}'.format(totim))
return None
if incremental:
t = self.inc[ipos]
else:
t = self.cum[ipos]
dtype = np.dtype(
[('index', np.int32), ('value', np.float32), ('name', '|S25')])
v = np.recarray(shape=(len(self.inc.dtype.names[3:])), dtype=dtype)
for i, name in enumerate(self.inc.dtype.names[3:]):
mult = 1.
if '_OUT' in name:
mult = -1.
v[i]['index'] = i
v[i]['value'] = mult * t[name]
v[i]['name'] = name
return v
def get_dataframes(self, start_datetime='1-1-1970',diff=False):
"""
Get pandas dataframes with the incremental and cumulative water budget
items in the list file.
Parameters
----------
start_datetime : str
If start_datetime is passed as None, the rows are indexed on totim.
Otherwise, a DatetimeIndex is set. (default is 1-1-1970).
Returns
-------
out : panda dataframes
Pandas dataframes with the incremental and cumulative water budget
items in list file. A separate pandas dataframe is returned for the
incremental and cumulative water budget entries.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incrementaldf, cumulativedf = mf_list.get_dataframes()
"""
try:
import pandas as pd
except Exception as e:
raise Exception(
"ListBudget.get_dataframe() error import pandas: " + \
str(e))
if not self._isvalid:
return None
totim = self.get_times()
if start_datetime is not None:
totim = totim_to_datetime(totim,
start=pd.to_datetime(start_datetime),
timeunit=self.timeunit)
df_flux = pd.DataFrame(self.inc, index=totim).loc[:, self.entries]
df_vol = pd.DataFrame(self.cum, index=totim).loc[:, self.entries]
if not diff:
return df_flux, df_vol
else:
in_names = [col for col in df_flux.columns if col.endswith("_IN")]
out_names = [col for col in df_flux.columns if col.endswith("_OUT")]
#print(in_names,out_names)
#print(df_flux.columns)
base_names = [name.replace("_IN",'') for name in in_names]
for name in base_names:
in_name = name + "_IN"
out_name = name + "_OUT"
df_flux.loc[:,name.lower()] = df_flux.loc[:,in_name] - df_flux.loc[:,out_name]
df_flux.pop(in_name)
df_flux.pop(out_name)
df_vol.loc[:,name.lower()] = df_vol.loc[:,in_name] - df_vol.loc[:,out_name]
df_vol.pop(in_name)
df_vol.pop(out_name)
cols = list(df_flux.columns)
cols.sort()
cols = [col.lower() for col in cols]
df_flux.columns = cols
df_vol.columns = cols
return df_flux, df_vol
def _build_index(self, maxentries):
self.idx_map = self._get_index(maxentries)
return
def _get_index(self, maxentries):
# --parse through the file looking for matches and parsing ts and sp
idxs = []
l_count = 1
while True:
seekpoint = self.f.tell()
line = self.f.readline()
if line == '':
break
if self.budgetkey in line:
for l in range(self.tssp_lines):
line = self.f.readline()
try:
ts, sp = self._get_ts_sp(line)
except:
print('unable to cast ts,sp on line number', l_count,
' line: ', line)
break
# print('info found for timestep stress period',ts,sp)
idxs.append([ts, sp, seekpoint])
if maxentries and len(idxs) >= maxentries:
break
return idxs
def _seek_to_string(self, s):
"""
Parameters
----------
s : str
Seek through the file to the next occurrence of s. Return the
seek location when found.
Returns
-------
seekpoint : int
Next location of the string
"""
while True:
seekpoint = self.f.tell()
line = self.f.readline()
if line == '':
break
if s in line:
break
return seekpoint
def _get_ts_sp(self, line):
"""
From the line string, extract the time step and stress period numbers.
"""
# Old method. Was not generic enough.
# ts = int(line[self.ts_idxs[0]:self.ts_idxs[1]])
# sp = int(line[self.sp_idxs[0]:self.sp_idxs[1]])
# Get rid of nasty things
line = line.replace(',', '')
searchstring = 'TIME STEP'
idx = line.index(searchstring) + len(searchstring)
ll = line[idx:].strip().split()
ts = int(ll[0])
searchstring = 'STRESS PERIOD'
idx = line.index(searchstring) + len(searchstring)
ll = line[idx:].strip().split()
sp = int(ll[0])
return ts, sp
def _set_entries(self):
if len(self.idx_map) < 1:
return None, None
if len(self.entries) > 0:
raise Exception('entries already set:' + str(self.entries))
if not self.idx_map:
raise Exception('must call build_index before call set_entries')
try:
incdict, cumdict = self._get_sp(self.idx_map[0][0],
self.idx_map[0][1],
self.idx_map[0][2])
except:
raise Exception('unable to read budget information from first '
'entry in list file')
self.entries = incdict.keys()
null_entries = collections.OrderedDict()
incdict = collections.OrderedDict()
cumdict = collections.OrderedDict()
for entry in self.entries:
incdict[entry] = []
cumdict[entry] = []
null_entries[entry] = np.NaN
self.null_entries = [null_entries, null_entries]
return incdict, cumdict
def _load(self, maxentries=None):
self._build_index(maxentries)
incdict, cumdict = self._set_entries()
if incdict is None and cumdict is None:
return
totim = []
for ts, sp, seekpoint in self.idx_map:
tinc, tcum = self._get_sp(ts, sp, seekpoint)
for entry in self.entries:
incdict[entry].append(tinc[entry])
cumdict[entry].append(tcum[entry])
# Get the time for this record
seekpoint = self._seek_to_string('TIME SUMMARY AT END')
tslen, sptim, tt = self._get_totim(ts, sp, seekpoint)
totim.append(tt)
# get kstp and kper
idx_array = np.array(self.idx_map)
# build dtype for recarray
dtype_tups = [('totim', np.float32), ("time_step", np.int32),
("stress_period", np.int32)]
for entry in self.entries:
dtype_tups.append((entry, np.float32))
dtype = np.dtype(dtype_tups)
# create recarray
nentries = len(incdict[entry])
self.inc = np.recarray(shape=(nentries,), dtype=dtype)
self.cum = np.recarray(shape=(nentries,), dtype=dtype)
# fill each column of the recarray
for entry in self.entries:
self.inc[entry] = incdict[entry]
self.cum[entry] = cumdict[entry]
# file the totim, time_step, and stress_period columns for the
# incremental and cumulative recarrays (zero-based kstp,kper)
self.inc['totim'] = np.array(totim)[:]
self.inc["time_step"] = idx_array[:, 0] - 1
self.inc["stress_period"] = idx_array[:, 1] - 1
self.cum['totim'] = np.array(totim)[:]
self.cum["time_step"] = idx_array[:, 0] - 1
self.cum["stress_period"] = idx_array[:, 1] - 1
return
def _get_sp(self, ts, sp, seekpoint):
self.f.seek(seekpoint)
# --read to the start of the "in" budget information
while True:
line = self.f.readline()
if line == '':
print(
'end of file found while seeking budget information for ts,sp',
ts, sp)
return self.null_entries
# --if there are two '=' in this line, then it is a budget line
if len(re.findall('=', line)) == 2:
break
tag = 'IN'
incdict = collections.OrderedDict()
cumdict = collections.OrderedDict()
while True:
if line == '':
# raise Exception('end of file found while seeking budget information')
print(
'end of file found while seeking budget information for ts,sp',
ts, sp)
return self.null_entries
if len(re.findall('=', line)) == 2:
try:
entry, flux, cumu = self._parse_budget_line(line)
except e:
print('error parsing budget line in ts,sp', ts, sp)
return self.null_entries
if flux is None:
print(
'error casting in flux for', entry,
' to float in ts,sp',
ts, sp)
return self.null_entries
if cumu is None:
print(
'error casting in cumu for', entry,
' to float in ts,sp',
ts, sp)
return self.null_entries
if entry.endswith(tag.upper()):
if ' - ' in entry.upper():
key = entry.replace(' ', '')
else:
key = entry.replace(' ', '_')
elif 'PERCENT DISCREPANCY' in entry.upper():
key = entry.replace(' ', '_')
else:
key = '{}_{}'.format(entry.replace(' ', '_'), tag)
incdict[key] = flux
cumdict[key] = cumu
else:
if 'OUT:' in line.upper():
tag = 'OUT'
line = self.f.readline()
if entry.upper() == 'PERCENT DISCREPANCY':
break
return incdict, cumdict
def _parse_budget_line(self, line):
# get the budget item name
entry = line.strip().split('=')[0].strip()
# get the cumulative string
idx = line.index('=') + 1
line2 = line[idx:]
ll = line2.strip().split()
cu_str = ll[0]
idx = line2.index('=') + 1
fx_str = line2[idx:].strip()
#
# cu_str = line[self.cumu_idxs[0]:self.cumu_idxs[1]]
# fx_str = line[self.flux_idxs[0]:self.flux_idxs[1]]
flux, cumu = None, None
try:
cumu = float(cu_str)
except:
if 'NAN' in cu_str.strip().upper():
cumu = np.NaN
try:
flux = float(fx_str)
except:
if 'NAN' in fx_str.strip().upper():
flux = np.NaN
return entry, flux, cumu
def _get_totim(self, ts, sp, seekpoint):
self.f.seek(seekpoint)
# --read header lines
ihead = 0
while True:
line = self.f.readline()
ihead += 1
if line == '':
print(
'end of file found while seeking time information for ts,sp',
ts, sp)
return np.NaN, np.NaN, np.Nan
elif ihead == 2 and 'SECONDS MINUTES HOURS DAYS YEARS' not in line:
break
elif '-----------------------------------------------------------' in line:
line = self.f.readline()
break
tslen = self._parse_time_line(line)
if tslen == None:
print('error parsing tslen for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
sptim = self._parse_time_line(self.f.readline())
if sptim == None:
print('error parsing sptim for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
totim = self._parse_time_line(self.f.readline())
if totim == None:
print('error parsing totim for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
return tslen, sptim, totim
def _parse_time_line(self, line):
if line == '':
print('end of file found while parsing time information')
return None
try:
time_str = line[self.time_line_idx:]
raw = time_str.split()
idx = self.time_idx
# catch case where itmuni is undefined
# in this case, the table format is different
try:
v = float(raw[0])
except:
time_str = line[45:]
raw = time_str.split()
idx = 0
tval = float(raw[idx])
except:
print('error parsing tslen information', time_str)
return None
return tval
class SwtListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'MASS BUDGET FOR ENTIRE MODEL'
return
class MfListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC BUDGET FOR ENTIRE MODEL'
return
class MfusgListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC BUDGET FOR ENTIRE MODEL'
return
class SwrListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC SURFACE WATER BUDGET FOR ENTIRE MODEL'
self.tssp_lines = 1
return
| bsd-3-clause |
vermouthmjl/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 131 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
preprocessed-connectomes-project/quality-assessment-protocol | scripts/qap_check_output_csv.py | 1 | 1302 | #!/usr/bin/env python
def main():
import os
import argparse
from qap.script_utils import check_csv_missing_subs, csv_to_pandas_df, \
write_inputs_dict_to_yaml_file, read_yml_file
from qap.qap_utils import raise_smart_exception
parser = argparse.ArgumentParser()
parser.add_argument("output_csv", type=str,
help="the main output directory of the QAP run "
"which contains the participant directories")
parser.add_argument("data_config", type=str,
help="the main output directory of the QAP run "
"which contains the participant directories")
parser.add_argument("data_type", type=str,
help="the main output directory of the QAP run "
"which contains the participant directories")
args = parser.parse_args()
csv_df = csv_to_pandas_df(args.output_csv)
data_dict = read_yml_file(args.data_config)
new_dict = check_csv_missing_subs(csv_df, data_dict, args.data_type)
if new_dict:
out_file = os.path.join(os.getcwd(),
"missing_%s_data.yml" % args.data_type)
write_inputs_dict_to_yaml_file(new_dict, out_file)
if __name__ == "__main__":
main() | bsd-3-clause |
Ttl/scikit-rf | skrf/io/general.py | 3 | 22567 |
'''
.. module:: skrf.io.general
========================================
general (:mod:`skrf.io.general`)
========================================
General io functions for reading and writing skrf objects
.. autosummary::
:toctree: generated/
read
read_all
read_all_networks
write
write_all
save_sesh
Writing output to spreadsheet
.. autosummary::
:toctree: generated/
network_2_spreadsheet
networkset_2_spreadsheet
'''
import sys
import six.moves.cPickle as pickle
from six.moves.cPickle import UnpicklingError
import inspect
import os
import zipfile
import warnings
import sys
from ..util import get_extn, get_fid
from ..network import Network
from ..frequency import Frequency
from ..media import Media
from ..networkSet import NetworkSet
from ..calibration.calibration import Calibration
from copy import copy
dir_ = copy(dir)
# delayed import: from pandas import DataFrame, Series for ntwk_2_spreadsheet
# file extension conventions for skrf objects.
global OBJ_EXTN
OBJ_EXTN = [
[Frequency, 'freq'],
[Network, 'ntwk'],
[NetworkSet, 'ns'],
[Calibration, 'cal'],
[Media, 'med'],
[object, 'p'],
]
def read(file, *args, **kwargs):
'''
Read skrf object[s] from a pickle file
Reads a skrf object that is written with :func:`write`, which uses
the :mod:`pickle` module.
Parameters
------------
file : str or file-object
name of file, or a file-object
\*args, \*\*kwargs : arguments and keyword arguments
passed through to pickle.load
Examples
-------------
>>> n = rf.Network(f=[1,2,3],s=[1,1,1],z0=50)
>>> n.write('my_ntwk.ntwk')
>>> n_2 = rf.read('my_ntwk.ntwk')
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Notes
-------
if `file` is a file-object it is left open, if it is a filename then
a file-object is opened and closed. If file is a file-object
and reading fails, then the position is reset back to 0 using seek
if possible.
'''
fid = get_fid(file, mode='rb')
try:
obj = pickle.load(fid, *args, **kwargs)
except (UnpicklingError, UnicodeDecodeError) as e:
# if fid is seekable then reset to beginning of file
fid.seek(0)
if isinstance(file, str):
# we created the fid so close it
fid.close()
raise
if isinstance(file, str):
# we created the fid so close it
fid.close()
return obj
def write(file, obj, overwrite = True):
'''
Write skrf object[s] to a file
This uses the :mod:`pickle` module to write skrf objects to a file.
Note that you can write any pickl-able python object. For example,
you can write a list or dictionary of :class:`~skrf.network.Network`
objects
or :class:`~skrf.calibration.calibration.Calibration` objects. This
will write out a single file. If you would like to write out a
seperate file for each object, use :func:`write_all`.
Parameters
------------
file : file or string
File or filename to which the data is saved. If file is a
file-object, then the filename is unchanged. If file is a
string, an appropriate extension will be appended to the file
name if it does not already have an extension.
obj : an object, or list/dict of objects
object or list/dict of objects to write to disk
overwrite : Boolean
if file exists, should it be overwritten?
Notes
-------
If `file` is a str, but doesnt contain a suffix, one is chosen
automatically. Here are the extensions
==================================================== ===============
skrf object extension
==================================================== ===============
:class:`~skrf.frequency.Frequency` '.freq'
:class:`~skrf.network.Network` '.ntwk'
:class:`~skrf.networkSet.NetworkSet` '.ns'
:class:`~skrf.calibration.calibration.Calibration` '.cal'
:class:`~skrf.media.media.Media` '.med'
other '.p'
==================================================== ===============
To make the file written by this method cross-platform, the pickling
protocol 2 is used. See :mod:`pickle` for more info.
Examples
-------------
Convert a touchstone file to a pickled Network,
>>> n = rf.Network('my_ntwk.s2p')
>>> rf.write('my_ntwk',n)
>>> n_red = rf.read('my_ntwk.ntwk')
Writing a list of different objects
>>> n = rf.Network('my_ntwk.s2p')
>>> ns = rf.NetworkSet([n,n,n])
>>> rf.write('out',[n,ns])
>>> n_red = rf.read('out.p')
See Also
------------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
skrf.network.Network.write : write method of Network
skrf.calibration.calibration.Calibration.write : write method of Calibration
'''
if isinstance(file, str):
extn = get_extn(file)
if extn is None:
# if there is not extension add one
for obj_extn in OBJ_EXTN:
if isinstance(obj, obj_extn[0]):
extn = obj_extn[1]
break
file = file + '.' + extn
if os.path.exists(file):
if not overwrite:
warnings.warn('file exists, and overwrite option is False. Not writing.')
return
with open(file, 'wb') as fid:
pickle.dump(obj, fid, protocol=2)
else:
fid = file
pickle.dump(obj, fid, protocol=2)
fid.close()
def read_all(dir='.', contains = None, f_unit = None, obj_type=None):
'''
Read all skrf objects in a directory
Attempts to load all files in `dir`, using :func:`read`. Any file
that is not readable by skrf is skipped. Optionally, simple filtering
can be achieved through the use of `contains` argument.
Parameters
--------------
dir : str, optional
the directory to load from, default \'.\'
contains : str, optional
if not None, only files containing this substring will be loaded
f_unit : ['hz','khz','mhz','ghz','thz']
for all :class:`~skrf.network.Network` objects, set their
frequencies's :attr:`~skrf.frequency.Frequency.f_unit`
obj_type : str
Name of skrf object types to read (ie 'Network')
Returns
---------
out : dictionary
dictionary containing all loaded skrf objects. keys are the
filenames without extensions, and the values are the objects
Examples
----------
>>> rf.read_all('skrf/data/')
{'delay_short': 1-Port Network: 'delay_short', 75-110 GHz, 201 pts, z0=[ 50.+0.j],
'line': 2-Port Network: 'line', 75-110 GHz, 201 pts, z0=[ 50.+0.j 50.+0.j],
'ntwk1': 2-Port Network: 'ntwk1', 1-10 GHz, 91 pts, z0=[ 50.+0.j 50.+0.j],
'one_port': one port Calibration: 'one_port', 500-750 GHz, 201 pts, 4-ideals/4-measured,
...
>>> rf.read_all('skrf/data/', obj_type = 'Network')
{'delay_short': 1-Port Network: 'delay_short', 75-110 GHz, 201 pts, z0=[ 50.+0.j],
'line': 2-Port Network: 'line', 75-110 GHz, 201 pts, z0=[ 50.+0.j 50.+0.j],
'ntwk1': 2-Port Network: 'ntwk1', 1-10 GHz, 91 pts, z0=[ 50.+0.j 50.+0.j],
...
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
'''
out={}
for filename in os.listdir(dir):
if contains is not None and contains not in filename:
continue
fullname = os.path.join(dir,filename)
keyname = os.path.splitext(filename)[0]
try:
out[keyname] = read(fullname)
continue
except:
pass
try:
out[keyname] = Network(fullname)
continue
except:
pass
if f_unit is not None:
for keyname in out:
try:
out[keyname].frequency.unit = f_unit
except:
pass
if obj_type is not None:
out = dict([(k, out[k]) for k in out if
isinstance(out[k],sys.modules[__name__].__dict__[obj_type])])
return out
def read_all_networks(*args, **kwargs):
'''
Read all networks in a directory.
This is a convenience function. It just calls::
read_all(*args,obj_type='Network', **kwargs)
See Also
----------
read_all
'''
if 'f_unit' not in kwargs:
kwargs.update({'f_unit':'ghz'})
return read_all(*args,obj_type='Network', **kwargs)
ran = read_all_networks
def write_all(dict_objs, dir='.', *args, **kwargs):
'''
Write a dictionary of skrf objects individual files in `dir`.
Each object is written to its own file. The filename used for each
object is taken from its key in the dictionary. If no extension
exists in the key, then one is added. See :func:`write` for a list
of extensions. If you would like to write the dictionary to a single
output file use :func:`write`.
Notes
-------
Any object in dict_objs that is pickl-able will be written.
Parameters
------------
dict_objs : dict
dictionary of skrf objects
dir : str
directory to save skrf objects into
\*args, \*\*kwargs :
passed through to :func:`~skrf.io.general.write`. `overwrite`
option may be of use.
See Also
-----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Examples
----------
Writing a diction of different skrf objects
>>> from skrf.data import line, short
>>> d = {'ring_slot':ring_slot, 'one_port_cal':one_port_cal}
>>> rf.write_all(d)
'''
if not os.path.exists('.'):
raise OSError('No such directory: %s'%dir)
for k in dict_objs:
filename = k
obj = dict_objs[k]
extn = get_extn(filename)
if extn is None:
# if there is not extension add one
for obj_extn in OBJ_EXTN:
if isinstance(obj, obj_extn[0]):
extn = obj_extn[1]
break
filename = filename + '.' + extn
try:
with open(os.path.join(dir+'/', filename), 'wb') as fid:
write(fid, obj,*args, **kwargs)
except Exception as inst:
print(inst)
warnings.warn('couldnt write %s: %s'%(k,str(inst)))
pass
def save_sesh(dict_objs, file='skrfSesh.p', module='skrf', exclude_prefix='_'):
'''
Save all `skrf` objects in the local namespace.
This is used to save current workspace in a hurry, by passing it the
output of :func:`locals` (see Examples). Note this can be
used for other modules as well by passing a different `module` name.
Parameters
------------
dict_objs : dict
dictionary containing `skrf` objects. See the Example.
file : str or file-object, optional
the file to save all objects to
module : str, optional
the module name to grep for.
exclude_prefix: str, optional
dont save objects which have this as a prefix.
See Also
----------
read : read a skrf object
write : write skrf object[s]
read_all : read all skrf objects in a directory
write_all : write dictionary of skrf objects to a directory
Examples
---------
Write out all skrf objects in current namespace.
>>> rf.write_all(locals(), 'mysesh.p')
'''
objects = {}
print('pickling: ')
for k in dict_objs:
try:
if module in inspect.getmodule(dict_objs[k]).__name__:
try:
pickle.dumps(dict_objs[k])
if k[0] != '_':
objects[k] = dict_objs[k]
print(k+', ')
finally:
pass
except(AttributeError, TypeError):
pass
if len (objects ) == 0:
print('nothing')
write(file, objects)
def load_all_touchstones(dir = '.', contains=None, f_unit=None):
'''
Loads all touchtone files in a given dir into a dictionary.
Notes
-------
Alternatively you can use the :func:`read_all` function.
Parameters
-----------
dir : string
the path
contains : string
a string the filenames must contain to be loaded.
f_unit : ['hz','mhz','ghz']
the frequency unit to assign all loaded networks. see
:attr:`frequency.Frequency.unit`.
Returns
---------
ntwkDict : a dictonary with keys equal to the file name (without
a suffix), and values equal to the corresponding ntwk types
Examples
----------
>>> ntwk_dict = rf.load_all_touchstones('.', contains ='20v')
See Also
-----------
read_all
'''
ntwkDict = {}
for f in os.listdir (dir):
if contains is not None and contains not in f:
continue
fullname = os.path.join(dir,f)
keyname,extn = os.path.splitext(f)
extn = extn.lower()
try:
if extn[1]== 's' and extn[-1]=='p':
ntwkDict[keyname]=(Network(dir +'/'+f))
if f_unit is not None: ntwkDict[keyname].frequency.unit=f_unit
except:
pass
return ntwkDict
def write_dict_of_networks(ntwkDict, dir='.'):
'''
Saves a dictionary of networks touchstone files in a given directory
The filenames assigned to the touchstone files are taken from
the keys of the dictionary.
Parameters
-----------
ntwkDict : dictionary
dictionary of :class:`Network` objects
dir : string
directory to write touchstone file to
'''
warnings.warn('Deprecated. use write_all.', DeprecationWarning)
for ntwkKey in ntwkDict:
ntwkDict[ntwkKey].write_touchstone(filename = dir+'/'+ntwkKey)
def read_csv(filename):
'''
Read a 2-port s-parameter data from a csv file.
Specifically, this reads a two-port csv file saved from a Rohde Shcwarz
ZVA-40, and possibly other network analyzers. It returns into a
:class:`Network` object.
Parameters
------------
filename : str
name of file
Returns
--------
ntwk : :class:`Network` object
the network representing data in the csv file
'''
ntwk = Network(name=filename[:-4])
try:
data = npy.loadtxt(filename, skiprows=3,delimiter=',',\
usecols=range(9))
s11 = data[:,1] +1j*data[:,2]
s21 = data[:,3] +1j*data[:,4]
s12 = data[:,5] +1j*data[:,6]
s22 = data[:,7] +1j*data[:,8]
ntwk.s = npy.array([[s11, s21],[s12,s22]]).transpose().reshape(-1,2,2)
except(IndexError):
data = npy.loadtxt(filename, skiprows=3,delimiter=',',\
usecols=range(3))
ntwk.s = data[:,1] +1j*data[:,2]
ntwk.frequency.f = data[:,0]
ntwk.frequency.unit='ghz'
return ntwk
## file conversion
def statistical_2_touchstone(file_name, new_file_name=None,\
header_string='# GHz S RI R 50.0'):
'''
Converts Statistical file to a touchstone file.
Converts the file format used by Statistical and other Dylan Williams
software to standard touchstone format.
Parameters
------------
file_name : string
name of file to convert
new_file_name : string
name of new file to write out (including extension)
header_string : string
touchstone header written to first beginning of file
'''
if new_file_name is None:
new_file_name = 'tmp-'+file_name
remove_tmp_file = True
# This breaks compatibility with python 2.6 and older
with file(file_name, 'r') as old_file, open(new_file_name, 'w') as new_file:
new_file.write('%s\n'%header_string)
for line in old_file:
new_file.write(line)
if remove_tmp_file is True:
os.rename(new_file_name,file_name)
def network_2_spreadsheet(ntwk, file_name =None, file_type= 'excel', form='db',
*args, **kwargs):
'''
Write a Network object to a spreadsheet, for your boss
Write the s-parameters of a network to a spreadsheet, in a variety
of forms. This functions makes use of the pandas module, which in
turn makes use of the xlrd module. These are imported during this
function call. For more details about the file-writing functions
see the pandas.DataFrom.to_?? functions.
Notes
------
The frequency unit used in the spreadsheet is take from
`ntwk.frequency.unit`
Parameters
-----------
ntwk : :class:`~skrf.network.Network` object
the network to write
file_name : str, None
the file_name to write. if None, ntwk.name is used.
file_type : ['csv','excel','html']
the type of file to write. See pandas.DataFrame.to_??? functions.
form : 'db','ma','ri'
format to write data,
* db = db, deg
* ma = mag, deg
* ri = real, imag
\*args, \*\*kwargs :
passed to pandas.DataFrame.to_??? functions.
See Also
---------
networkset_2_spreadsheet : writes a spreadsheet for many networks
'''
from pandas import DataFrame, Series # delayed because its not a requirement
file_extns = {'csv':'csv','excel':'xls','html':'html'}
form = form.lower()
if form not in ['db','ri','ma']:
raise ValueError('`form` must be either `db`,`ma`,`ri`')
file_type = file_type.lower()
if file_type not in file_extns.keys():
raise ValueError('file_type must be `csv`,`html`,`excel` ')
if ntwk.name is None and file_name is None:
raise ValueError('Either ntwk must have name or give a file_name')
if file_name is None and 'excel_writer' not in kwargs.keys():
file_name = ntwk.name + '.'+file_extns[file_type]
d = {}
index =ntwk.frequency.f_scaled
if form =='db':
for m,n in ntwk.port_tuples:
d['S%i%i Log Mag(dB)'%(m+1,n+1)] = \
Series(ntwk.s_db[:,m,n], index = index)
d[u'S%i%i Phase(deg)'%(m+1,n+1)] = \
Series(ntwk.s_deg[:,m,n], index = index)
elif form =='ma':
for m,n in ntwk.port_tuples:
d['S%i%i Mag(lin)'%(m+1,n+1)] = \
Series(ntwk.s_mag[:,m,n], index = index)
d[u'S%i%i Phase(deg)'%(m+1,n+1)] = \
Series(ntwk.s_deg[:,m,n], index = index)
elif form =='ri':
for m,n in ntwk.port_tuples:
d['S%i%i Real'%(m+1,n+1)] = \
Series(ntwk.s_re[:,m,n], index = index)
d[u'S%i%i Imag'%(m+1,n+1)] = \
Series(ntwk.s_im[:,m,n], index = index)
df = DataFrame(d)
df.__getattribute__('to_%s'%file_type)(file_name,
index_label='Freq(%s)'%ntwk.frequency.unit, *args, **kwargs)
def network_2_dataframe(ntwk, attrs=['s_db'], ports = None):
'''
Convert one or more attributes of a network to a pandas DataFrame
Parameters
--------------
ntwk : :class:`~skrf.network.Network` object
the network to write
attrs : list Network attributes
like ['s_db','s_deg']
ports : list of tuples
list of port pairs to write. defaults to ntwk.port_tuples
(like [[0,0]])
Returns
----------
df : pandas DataFrame Object
'''
from pandas import DataFrame, Series # delayed because its not a requirement
d = {}
index =ntwk.frequency.f_scaled
if ports is None:
ports = ntwk.port_tuples
for attr in attrs:
for m,n in ports:
d['%s %i%i'%(attr, m+1,n+1)] = \
Series(ntwk.__getattribute__(attr)[:,m,n], index = index)
return DataFrame(d)
def networkset_2_spreadsheet(ntwkset, file_name=None, file_type= 'excel',
*args, **kwargs):
'''
Write a NetworkSet object to a spreadsheet, for your boss
Write the s-parameters of a each network in the networkset to a
spreadsheet. If the `excel` file_type is used, then each network,
is written to its own sheet, with the sheetname taken from the
network `name` attribute.
This functions makes use of the pandas module, which in turn makes
use of the xlrd module. These are imported during this function
Notes
------
The frequency unit used in the spreadsheet is take from
`ntwk.frequency.unit`
Parameters
-----------
ntwkset : :class:`~skrf.networkSet.NetworkSet` object
the network to write
file_name : str, None
the file_name to write. if None, ntwk.name is used.
file_type : ['csv','excel','html']
the type of file to write. See pandas.DataFrame.to_??? functions.
form : 'db','ma','ri'
format to write data,
* db = db, deg
* ma = mag, deg
* ri = real, imag
\*args, \*\*kwargs :
passed to pandas.DataFrame.to_??? functions.
See Also
---------
networkset_2_spreadsheet : writes a spreadsheet for many networks
'''
from pandas import DataFrame, Series, ExcelWriter # delayed because its not a requirement
if ntwkset.name is None and file_name is None:
raise(ValueError('Either ntwkset must have name or give a file_name'))
if file_type == 'excel':
writer = ExcelWriter(file_name)
[network_2_spreadsheet(k, writer, sheet_name =k.name, *args, **kwargs) for k in ntwkset]
writer.save()
else:
[network_2_spreadsheet(k,*args, **kwargs) for k in ntwkset]
# Provide a StringBuffer that let's me work with Python2 strings and Python3 unicode strings without thinking
if sys.version_info < (3, 0):
import StringIO
class StringBuffer(StringIO.StringIO):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
else:
import io
StringBuffer = io.StringIO
| bsd-3-clause |
a113n/bcbio-nextgen | bcbio/rnaseq/sailfish.py | 4 | 8177 | import os
from collections import namedtuple
import pandas as pd
from bcbio import utils
import bcbio.pipeline.datadict as dd
import bcbio.rnaseq.gtf as gtf
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.utils import (file_exists, safe_makedir, is_gzipped,
R_package_path, Rscript_cmd)
from bcbio.pipeline import config_utils, disambiguate
from bcbio.bam import fastq
from bcbio import bam
def run_sailfish(data):
samplename = dd.get_sample_name(data)
files = dd.get_input_sequence_files(data)
work_dir = dd.get_work_dir(data)
if len(files) == 2:
fq1, fq2 = files
else:
fq1, fq2 = files[0], None
if not fastq.is_fastq(fq1):
return [[data]]
sailfish_dir = os.path.join(work_dir, "sailfish", samplename)
gtf_file = dd.get_gtf_file(data)
assert file_exists(gtf_file), "%s was not found, exiting." % gtf_file
fasta_file = dd.get_ref_file(data)
assert file_exists(fasta_file), "%s was not found, exiting." % fasta_file
stranded = dd.get_strandedness(data).lower()
out_file = sailfish(fq1, fq2, sailfish_dir, gtf_file, fasta_file, stranded, data)
data = dd.set_sailfish(data, out_file)
data = dd.set_sailfish_dir(data, sailfish_dir)
return [[data]]
def sailfish(fq1, fq2, sailfish_dir, gtf_file, ref_file, strandedness, data):
safe_makedir(sailfish_dir)
samplename = dd.get_sample_name(data)
quant_dir = os.path.join(sailfish_dir, "quant")
out_file = os.path.join(quant_dir, "quant.sf")
if file_exists(out_file):
return out_file
build_string = get_build_string(data)
sailfish_idx = sailfish_index(ref_file, gtf_file, data, build_string)
num_cores = dd.get_num_cores(data)
sailfish = config_utils.get_program("sailfish", data["config"])
cmd = "{sailfish} quant -i {sailfish_idx} -p {num_cores} "
cmd += _libtype_string(fq1, fq2, strandedness)
fq1_cmd = "{fq1}" if not is_gzipped(fq1) else "<(gzip -cd {fq1})"
fq1_cmd = fq1_cmd.format(fq1=fq1)
if not fq2:
cmd += " -r {fq1_cmd} "
else:
fq2_cmd = "{fq2}" if not is_gzipped(fq2) else "<(gzip -cd {fq2})"
fq2_cmd = fq2_cmd.format(fq2=fq2)
cmd += " -1 {fq1_cmd} -2 {fq2_cmd} "
cmd += "--useVBOpt --numBootstraps 30 "
cmd += "-o {tx_out_dir}"
message = "Quantifying transcripts in {fq1} and {fq2}."
with file_transaction(data, quant_dir) as tx_out_dir:
do.run(cmd.format(**locals()), message.format(**locals()), None)
sleuthify_sailfish(tx_out_dir)
return out_file
def sleuthify_sailfish(sailfish_dir):
"""
if installed, use wasabi to create abundance.h5 output for use with
sleuth
"""
if not R_package_path("wasabi"):
return None
else:
rscript = Rscript_cmd()
cmd = """{rscript} --vanilla -e 'library("wasabi"); prepare_fish_for_sleuth(c("{sailfish_dir}"))'"""
do.run(cmd.format(**locals()), "Converting Sailfish to Sleuth format.")
return os.path.join(sailfish_dir, "abundance.h5")
def create_combined_fasta(data):
"""
if there are genomes to be disambiguated, create a FASTA file of
all of the transcripts for all genomes
"""
out_dir = os.path.join(dd.get_work_dir(data), "inputs", "transcriptome")
items = disambiguate.split([data])
fasta_files = []
for i in items:
odata = i[0]
gtf_file = dd.get_gtf_file(odata)
ref_file = dd.get_ref_file(odata)
out_file = os.path.join(out_dir, dd.get_genome_build(odata) + ".fa")
if file_exists(out_file):
fasta_files.append(out_file)
else:
out_file = gtf.gtf_to_fasta(gtf_file, ref_file, out_file=out_file)
fasta_files.append(out_file)
out_stem = os.path.join(out_dir, dd.get_genome_build(data))
if dd.get_disambiguate(data):
out_stem = "-".join([out_stem] + (dd.get_disambiguate(data) or []))
combined_file = out_stem + ".fa"
if file_exists(combined_file):
return combined_file
fasta_file_string = " ".join(fasta_files)
cmd = "cat {fasta_file_string} > {tx_out_file}"
with file_transaction(data, combined_file) as tx_out_file:
do.run(cmd.format(**locals()), "Combining transcriptome FASTA files.")
return combined_file
def create_combined_tx2gene(data):
out_dir = os.path.join(dd.get_work_dir(data), "inputs", "transcriptome")
items = disambiguate.split([data])
tx2gene_files = []
for i in items:
odata = i[0]
gtf_file = dd.get_transcriptome_gtf(odata)
if not gtf_file:
gtf_file = dd.get_gtf_file(odata)
out_file = os.path.join(out_dir, dd.get_genome_build(odata) + "-tx2gene.csv")
if file_exists(out_file):
tx2gene_files.append(out_file)
else:
out_file = gtf.tx2genefile(gtf_file, out_file, tsv=False)
tx2gene_files.append(out_file)
combined_file = os.path.join(out_dir, "tx2gene.csv")
if file_exists(combined_file):
return combined_file
tx2gene_file_string = " ".join(tx2gene_files)
cmd = "cat {tx2gene_file_string} > {tx_out_file}"
with file_transaction(data, combined_file) as tx_out_file:
do.run(cmd.format(**locals()), "Combining tx2gene CSV files.")
return combined_file
def get_build_string(data):
build_string = dd.get_genome_build(data)
if dd.get_disambiguate(data):
build_string = "-".join([build_string] + (dd.get_disambiguate(data) or []))
return build_string
def run_sailfish_index(*samples):
samples = [utils.to_single_data(x) for x in samples]
Build = namedtuple('Build', ['build', 'ref', 'gtf'])
builds = {Build(get_build_string(x), dd.get_ref_file(x), dd.get_gtf_file(x))
for x in samples}
data = samples[0]
indexdirs = {}
for build in builds:
indexdirs[build.build] = sailfish_index(build.ref, build.gtf, data,
build.build)
return [[x] for x in samples]
def sailfish_index(gtf_file, ref_file, data, build):
work_dir = dd.get_work_dir(data)
out_dir = os.path.join(work_dir, "sailfish", "index", build)
sailfish = config_utils.get_program("sailfish", data["config"])
num_cores = dd.get_num_cores(data)
gtf_fa = create_combined_fasta(data)
if file_exists(os.path.join(out_dir, "versionInfo.json")):
return out_dir
with file_transaction(data, out_dir) as tx_out_dir:
fq1, _ = dd.get_input_sequence_files(data)
kmersize = pick_kmersize(fq1)
cmd = ("{sailfish} index -p {num_cores} -t {gtf_fa} -o {tx_out_dir} "
"-k {kmersize}")
message = "Creating sailfish index for {gtf_fa} with {kmersize} bp kmers."
do.run(cmd.format(**locals()), message.format(**locals()), None)
return out_dir
def _libtype_string(fq1, fq2, strandedness):
"""
supports just the Tophat unstranded/firstrand/secondstrand
"""
libtype = "-l I" if fq2 else "-l "
strand = _sailfish_strand_string(strandedness)
return libtype + strand
def _sailfish_strand_string(strandedness):
return {'unstranded': "U",
'firststrand': "SR",
'secondstrand': "SF"}.get(strandedness, "U")
def _sailfish_expression_parser(sailfish_file, samplename):
col_names = ["name", "length", "effectiveLength", "tpm", "numreads"]
df = pd.read_csv(sailfish_file, comment="#", header=None, skiprows=1, index_col=0,
names=col_names, sep="\t")
df["sample"] = samplename
return df
def pick_kmersize(fq):
"""
pick an appropriate kmer size based off of https://www.biostars.org/p/201474/
tl;dr version: pick 31 unless the reads are very small, if not then guess
that readlength / 2 is about right.
"""
if bam.is_bam(fq):
readlength = bam.estimate_read_length(fq)
else:
readlength = fastq.estimate_read_length(fq)
halfread = int(round(readlength / 2))
if halfread >= 31:
kmersize = 31
else:
kmersize = halfread
if kmersize % 2 == 0:
kmersize += 1
return kmersize
| mit |
daleloogn/all-in-one | evaluation.py | 2 | 2290 | import argparse
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score
_ALL_ = ["random", "decision_trees", "linear_svm", "gaussian_naive_bayes"]
def precision_recall_f1score(GT, Y_pred):
precisions, recalls, f1scores = [], [], []
for i in range(GT.shape[0]):
precisions.append(precision_score(GT[i][:], Y_pred[i][:]))
recalls.append(recall_score(GT[i][:], Y_pred[i][:]))
f1scores.append(f1_score(GT[i][:], Y_pred[i][:]))
precisions, recalls, f1scores = np.array(precisions), np.array(recalls), np.array(f1scores)
precision = (np.mean(precisions), np.median(precisions), np.std(precisions))
recall = (np.mean(recalls), np.median(recalls), np.std(recalls))
f1score = (np.mean(f1scores), np.median(f1scores), np.std(f1scores))
return precision, recall, f1score
def annotation_evaluation(collection, algorithm, model="track_based"):
print "\n\n####Evaluating %s annotation predictions by algorithm '%s'####" % (model, algorithm)
evaluation = {}
filename = "classification/%s/confusion_matrix__%s.out" % (collection, algorithm)
Y_pred = np.loadtxt(open(filename))
GT = np.loadtxt(open("classification/%s/ground_truth.out" % collection))
if model == "tag_based":
GT = GT.transpose()
Y_pred = Y_pred.transpose()
precision, recall, f1score = precision_recall_f1score(GT, Y_pred)
print "-----------------------------------------------------------"
print "Precision: %s" % str(precision)
print "Recall: %s" % str(recall)
print "F1score: %s" % str(f1score)
print "-----------------------------------------------------------"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate annotation predictions using track_based and tag_based measures')
parser.add_argument('collection', help='Collection name (e.g.: majorminer)')
parser.add_argument('-a', '--algorithm', nargs='?', default="all", help='algorithm to evaluate (default="all")')
args = parser.parse_args()
if args.algorithm == "all":
for algorithm in _ALL_:
annotation_evaluation(args.collection, algorithm, "track_based")
annotation_evaluation(args.collection, algorithm, "tag_based")
else:
annotation_evaluation(args.collection, args.algorithm, "track_based")
annotation_evaluation(args.collection, args.algorithm, "tag_based")
| gpl-2.0 |
arahuja/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
vshtanko/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
xzh86/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
google-research/google-research | learn_to_infer/run_ring.py | 1 | 10211 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner for transformer experiments.
"""
import os
from . import metrics
from . import plotting
from . import ring_dist
from . import ring_models
from . import train
from absl import app
from absl import flags
import jax
from jax.config import config
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as onp
flags.DEFINE_integer("num_encoders", 6,
"Number of encoder modules in the transformer.")
flags.DEFINE_integer("num_decoders", 6,
"Number of decoder modules in the transformer.")
flags.DEFINE_integer("num_heads", 8,
"Number of attention heads in the transformer.")
flags.DEFINE_integer("key_dim", 32,
"The dimension of the keys in the transformer.")
flags.DEFINE_integer("value_dim_per_head", 32,
"The dimension of the values in the transformer for each head.")
flags.DEFINE_integer("k", 2,
"The number of modes in the data.")
flags.DEFINE_integer("data_points_per_mode", 25,
"Number of data points to include per mode in the data.")
flags.DEFINE_boolean("parallel", True,
"If possible, train in parallel across devices.")
flags.DEFINE_integer("batch_size", 64,
"The batch size.")
flags.DEFINE_integer("eval_batch_size", 256,
"The batch size for evaluation.")
flags.DEFINE_integer("num_steps", int(1e6),
"The number of steps to train for.")
flags.DEFINE_float("lr", 1e-3,
"The learning rate for ADAM.")
flags.DEFINE_integer("summarize_every", 100,
"Number of steps between summaries.")
flags.DEFINE_integer("checkpoint_every", 5000,
"Number of steps between checkpoints.")
flags.DEFINE_boolean("clobber_checkpoint", False,
"If true, remove any existing summaries and checkpoints in logdir.")
flags.DEFINE_string("logdir", "/tmp/transformer",
"The directory to put summaries and checkpoints.")
flags.DEFINE_boolean("debug_nans", False,
"If true, run in debug mode and fail on nans.")
FLAGS = flags.FLAGS
def make_model(key,
num_encoders=4,
num_decoders=4,
num_heads=8,
value_dim=128,
data_points_per_mode=25,
k=10):
model = ring_models.RingInferenceMachine(
max_k=k,
max_num_data_points=k*data_points_per_mode, num_heads=num_heads,
num_encoders=num_encoders, num_decoders=num_decoders, qkv_dim=value_dim)
params = model.init_params(key)
return model, params
def sample_batch(key, batch_size, k, data_points_per_mode):
keys = jax.random.split(key, num=batch_size)
xs, cs, params = jax.vmap(
ring_dist.sample_params_and_points,
in_axes=(0, None, None, None, None, None, None, None, None,
None))(keys, k * data_points_per_mode, k, 1., 0.5, 2, .02,
jnp.zeros([2]), jnp.eye(2), 0.1)
return xs, cs, params
def make_loss(model,
k=2,
data_points_per_mode=25,
batch_size=128):
def sample_train_batch(key):
xs, _, params = sample_batch(key, batch_size, k, data_points_per_mode)
return xs, params
def loss(params, key):
key, subkey = jax.random.split(key)
xs, ring_params = sample_train_batch(key)
ks = jnp.full([batch_size], k)
losses = model.loss(
params, xs, ks*data_points_per_mode, ring_params, ks, subkey)
return jnp.mean(losses)
return jax.jit(loss)
def make_summarize(
model,
k=2,
data_points_per_mode=25,
eval_batch_size=256):
def sample_eval_batch(key):
return sample_batch(key, eval_batch_size, k, data_points_per_mode)
sample_eval_batch = jax.jit(sample_eval_batch)
def sample_single(key):
xs, cs, params = sample_batch(key, 1, k, data_points_per_mode)
return xs[0], cs[0], (params[0][0], params[1][0], params[2][0],
params[3][0])
def model_classify(params, inputs, batch_size):
return model.classify(params, inputs,
jnp.full([batch_size], k*data_points_per_mode),
jnp.full([batch_size], k))
def sample_and_classify_eval_batch(key, params):
xs, cs, true_ring_params = sample_eval_batch(key)
tfmr_cs, tfmr_ring_params = model_classify(params, xs, eval_batch_size)
return xs, cs, true_ring_params, tfmr_cs, tfmr_ring_params
def sample_and_classify_single_mm(key, params):
xs, cs, ring_params = sample_single(key)
tfmr_cs, tfmr_ring_params = model_classify(params, xs[jnp.newaxis], 1)
return xs, cs, ring_params, tfmr_cs, tfmr_ring_params
sample_and_classify_eval_batch = jax.jit(sample_and_classify_eval_batch)
sample_and_classify_single_mm= jax.jit(sample_and_classify_single_mm)
def summarize_baselines(writer, step, key):
key, subkey = jax.random.split(key)
xs, cs, _ = sample_eval_batch(subkey)
ks = onp.full([eval_batch_size], k)
baseline_metrics = metrics.compute_masked_baseline_metrics(
xs, cs, ks, ks*data_points_per_mode)
for method_name, method_metrics in baseline_metrics.items():
for metric_name, metric_val in method_metrics.items():
writer.scalar("%s/%s" % (method_name, metric_name),
metric_val, step=step)
print("%s %s: %0.3f" % (method_name, metric_name, metric_val))
def plot_params(num_data_points, writer, step, params, key):
outs = sample_and_classify_single_mm(key, params)
xs, true_cs, true_params, pred_cs, pred_params = outs
pred_cs = pred_cs[0]
pred_params = (pred_params[0][0], pred_params[1][0],
pred_params[2][0], pred_params[3][0])
fig = plotting.plot_rings(
xs, k, true_cs, true_params, pred_cs, pred_params)
plot_image = plotting.plot_to_numpy_image(plt)
writer.image(
"%d_modes_%d_points" % (k, num_data_points), plot_image, step=step)
plt.close(fig)
def comparison_inference(params):
rings_inputs, true_cs = plotting.make_comparison_rings()
rings_inputs = rings_inputs[jnp.newaxis, Ellipsis]
new_model = ring_models.RingInferenceMachine(
max_k=2, max_num_data_points=1500, num_heads=FLAGS.num_heads,
num_encoders=FLAGS.num_encoders, num_decoders=FLAGS.num_decoders,
qkv_dim=FLAGS.value_dim_per_head*FLAGS.num_heads)
pred_cs, pred_params = new_model.classify(
params, rings_inputs, jnp.array([1500]), jnp.array([2]))
pred_cs = pred_cs[0]
pred_params = (pred_params[0][0], pred_params[1][0],
pred_params[2][0], pred_params[3][0])
return rings_inputs[0], true_cs, pred_cs, pred_params
comparison_inference = jax.jit(comparison_inference)
def plot_sklearn_comparison(writer, step, params):
ring_xs, true_cs, pred_cs, pred_params = comparison_inference(params)
fig = plotting.plot_comparison_rings(ring_xs, true_cs, pred_cs, pred_params)
writer.image(
"sklearn_comparison", plotting.plot_to_numpy_image(plt), step=step)
plt.close(fig)
def summarize(writer, step, params, key):
k1, k2, k3 = jax.random.split(key, num=3)
_, cs, _, tfmr_cs, _ = sample_and_classify_eval_batch(k1, params)
ks = onp.full([eval_batch_size], k)
tfmr_metrics = metrics.compute_masked_metrics(
cs, tfmr_cs, ks, ks*data_points_per_mode,
metrics=["pairwise_accuracy", "pairwise_f1",
"pairwise_macro_f1", "pairwise_micro_f1"])
for metric_name, metric_val in tfmr_metrics.items():
writer.scalar("transformer/%s" % metric_name,
metric_val, step=step)
print("Transformer %s: %0.3f" % (metric_name, metric_val))
plot_params(k*data_points_per_mode, writer, step, params, k2)
plot_sklearn_comparison(writer, step, params)
if step == 0:
summarize_baselines(writer, step, k3)
return summarize
def make_logdir(config):
basedir = config.logdir
exp_dir = (
"ring_nheads_%d_nencoders_%d_ndecoders_%d_num_modes_%d"
% (config.num_heads, config.num_encoders, config.num_decoders, config.k))
return os.path.join(basedir, exp_dir)
def main(unused_argv):
if FLAGS.debug_nans:
config.update("jax_debug_nans", True)
if FLAGS.parallel and train.can_train_parallel():
assert FLAGS.batch_size % jax.local_device_count(
) == 0, "Device count must evenly divide batch_size"
FLAGS.batch_size = int(FLAGS.batch_size / jax.local_device_count())
key = jax.random.PRNGKey(0)
key, subkey = jax.random.split(key)
model, init_params = make_model(
key,
num_encoders=FLAGS.num_encoders,
num_decoders=FLAGS.num_decoders,
num_heads=FLAGS.num_heads,
value_dim=FLAGS.value_dim_per_head*FLAGS.num_heads,
data_points_per_mode=FLAGS.data_points_per_mode,
k=FLAGS.k)
loss_fn = make_loss(
model,
k=FLAGS.k,
data_points_per_mode=FLAGS.data_points_per_mode,
batch_size=FLAGS.batch_size)
summarize_fn = make_summarize(
model,
k=FLAGS.k,
data_points_per_mode=FLAGS.data_points_per_mode,
eval_batch_size=FLAGS.eval_batch_size)
train.train_loop(
subkey,
init_params,
loss_fn,
parallel=FLAGS.parallel,
lr=FLAGS.lr,
num_steps=FLAGS.num_steps,
summarize_fn=summarize_fn,
summarize_every=FLAGS.summarize_every,
checkpoint_every=FLAGS.checkpoint_every,
clobber_checkpoint=FLAGS.clobber_checkpoint,
logdir=make_logdir(FLAGS))
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| artistic-2.0 |
edonyM/emthesis | code/3point2plane.py | 1 | 3545 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - [email protected]
#
# twitter : @edonyzpc
#
# Last modified: 2015-11-30 16:04
#
# Filename: 3point2plane.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
import numpy as np
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
fig = plt.figure('3 point into plane')
ax = fig.gca(projection='3d')
X = np.arange(0, 10, 0.1)
Y = np.arange(0, 10, 0.1)
X, Y = np.meshgrid(X, Y)
Z = 5 - 0.3*X + 0.48*Y
p1 = [5.3, 0.1, 5-0.3*5.3+0.48*0.1]
p2 = [2.3, 0.7, 5-0.3*2.3+0.48*0.7]
p3 = [8.3, 3.1, 5-0.3*8.3+0.48*3.1]
ax.plot_surface(X, Y, Z, rstride=100, cstride=100, alpha=0.3)
ax.scatter(p1[0], p1[1], p1[2])
ax.scatter(p2[0], p2[1], p2[2])
ax.scatter(p3[0], p3[1], p3[2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
| mit |
lgeiger/ide-python | lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydev_ipython/matplotlibtools.py | 8 | 5428 |
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt4'
backend2gui['Qt5Agg'] = 'qt5'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
matplotlib.real_use = matplotlib.use
matplotlib.use = patched_use
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
matplotlib.real_is_interactive = matplotlib.is_interactive
matplotlib.is_interactive = patched_is_interactive
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
| mit |
zorojean/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 18 | 26105 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| gpl-2.0 |
michaelhuang/QuantSoftwareToolkit | Examples/Basic/tutorial3.py | 4 | 3612 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on January, 24, 2013
@author: Sourabh Bajaj
@contact: [email protected]
@summary: Example tutorial code.
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def main():
''' Main Function'''
# Reading the portfolio
na_portfolio = np.loadtxt('tutorial3portfolio.csv', dtype='S5,f4',
delimiter=',', comments="#", skiprows=1)
print na_portfolio
# Sorting the portfolio by symbol name
na_portfolio = sorted(na_portfolio, key=lambda x: x[0])
print na_portfolio
# Create two list for symbol names and allocation
ls_port_syms = []
lf_port_alloc = []
for port in na_portfolio:
ls_port_syms.append(port[0])
lf_port_alloc.append(port[1])
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
ls_all_syms = c_dataobj.get_all_symbols()
# Bad symbols are symbols present in portfolio but not in all syms
ls_bad_syms = list(set(ls_port_syms) - set(ls_all_syms))
if len(ls_bad_syms) != 0:
print "Portfolio contains bad symbols : ", ls_bad_syms
for s_sym in ls_bad_syms:
i_index = ls_port_syms.index(s_sym)
ls_port_syms.pop(i_index)
lf_port_alloc.pop(i_index)
# Reading the historical data.
dt_end = dt.datetime(2011, 1, 1)
dt_start = dt_end - dt.timedelta(days=1095) # Three years
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Keys to be read from the data, it is good to read everything in one go.
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
# Reading the data, now d_data is a dictionary with the keys above.
# Timestamps and symbols are the ones that were specified before.
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_port_syms, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Copying close price into separate dataframe to find rets
df_rets = d_data['close'].copy()
# Filling the data.
df_rets = df_rets.fillna(method='ffill')
df_rets = df_rets.fillna(method='bfill')
df_rets = df_rets.fillna(1.0)
# Numpy matrix of filled data values
na_rets = df_rets.values
# returnize0 works on ndarray and not dataframes.
tsu.returnize0(na_rets)
# Estimate portfolio returns
na_portrets = np.sum(na_rets * lf_port_alloc, axis=1)
na_port_total = np.cumprod(na_portrets + 1)
na_component_total = np.cumprod(na_rets + 1, axis=0)
# Plotting the results
plt.clf()
fig = plt.figure()
fig.add_subplot(111)
plt.plot(ldt_timestamps, na_component_total, alpha=0.4)
plt.plot(ldt_timestamps, na_port_total)
ls_names = ls_port_syms
ls_names.append('Portfolio')
plt.legend(ls_names)
plt.ylabel('Cumulative Returns')
plt.xlabel('Date')
fig.autofmt_xdate(rotation=45)
plt.savefig('tutorial3.pdf', format='pdf')
if __name__ == '__main__':
main()
| bsd-3-clause |
maxlikely/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 6 | 8149 | import warnings
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding import SpectralEmbedding
from sklearn.manifold.spectral_embedding import _graph_is_connected
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
"""Test spectral embedding with two components"""
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
# test that we can still import spectral embedding
from sklearn.cluster import spectral_embedding as se_deprecated
with warnings.catch_warnings(record=True) as warning_list:
embedded_depr = se_deprecated(affinity, n_components=1,
random_state=np.random.RandomState(seed))
assert_equal(len(warning_list), 1)
assert_true(_check_with_col_sign_flipping(embedded_coordinate,
embedded_depr, 0.05))
def test_spectral_embedding_precomputed_affinity(seed=36):
"""Test spectral embedding with precomputed kernel"""
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
"""Test spectral embedding with callable affinity"""
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
"""Test spectral embedding with amg solver"""
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipline_spectral_clustering(seed=36):
"""Test using pipline to do spectral clustering"""
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
"""Test that SpectralClustering fails with an unknown eigensolver"""
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
"""Test that SpectralClustering fails with an unknown affinity type"""
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
"""Test that graph connectivity test works as expected"""
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
| bsd-3-clause |
LeSam/avoplot | src/avoplot/gui/analysis_tools.py | 3 | 4491 | #Copyright (C) Nial Peters 2013
#
#This file is part of AvoPlot.
#
#AvoPlot is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#AvoPlot is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with AvoPlot. If not, see <http://www.gnu.org/licenses/>.
"""
This module is still under construction! Eventually, it will contain a set of
data analysis tools for working with data.
"""
#The DataFollower class is still under construction - come back soon!
#class DataFollower:
# def __init__(self):
# self.line = None
#
# def connect(self, axes):
# self.axes = axes
#
# self.cid = self.axes.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
#
# def on_motion(self, event):
# if event.inaxes != self.axes: return
# if self.line is None:
# self.line, = self.axes.plot([event.xdata] * 2, self.axes.get_ylim(), 'k-')
# self.line.set_animated(True)
#
#
# trans = self.line.get_transform()
# inv_trans = trans.inverted()
#
# x0, y0 = inv_trans.transform_point([event.xdata - 10, self.axes.get_ylim()[1]])
# x1, y1 = inv_trans.transform_point([event.xdata + 10, self.axes.get_ylim()[0]])
# print "untransformed 0 = ", x0, y0
# print "untransformed 0 = ", x1, y1
# #add the line width to it
# #x0,y0 = trans.transform_point([x0-(self.line.get_linewidth()/2.0),y0])
# #x1,y1 = trans.transform_point([x1+(self.line.get_linewidth()/2.0),y1])
#
# #print "transformed 0 = ",x0,y0
# #print "transformed 0 = ",x1,y1
# bbox = matplotlib.transforms.Bbox([[x0, y0], [x1, y1]])
# #bbox.update_from_data_xy([[x0,y0],[x1,y1]])
# self.background = self.axes.figure.canvas.copy_from_bbox(self.line.axes.bbox)
# print self.background
# self.region_to_restore = bbox
# print bbox
#
# #print self.line.axes.bbox
# #print self.line.axes.bbox.bbox.update_from_data(numpy.array([[event.xdata-10, event.xdata+10],[event.xdata+10, self.axes.get_ylim()[1]]]))
# #print self.line.axes.bbox
# else:
# self.line.set_xdata([event.xdata] * 2,)
# self.line.set_ydata(self.line.axes.get_ylim())
#
## x0, xpress, ypress = self.press
## dx = event.xdata - xpress
## dy = event.ydata - ypress
##
## self.line.set_xdata([x0[0] + dx]*2)
## self.line.set_ydata(self.line.axes.get_ylim())
## self.line.set_linestyle('--')
##
# canvas = self.line.figure.canvas
# axes = self.line.axes
## # restore the background region
# self.axes.figure.canvas.restore_region(self.background, bbox=self.region_to_restore)
##
## # redraw just the current rectangle
# axes.draw_artist(self.line)
##
## # blit just the redrawn area
# canvas.blit(self.axes.bbox)
#
# trans = self.line.get_transform()
# inv_trans = trans.inverted()
#
# x0, y0 = self.axes.transData.transform([event.xdata - 10, self.axes.get_ylim()[1]])
# x1, y1 = self.axes.transData.transform([event.xdata + 10, self.axes.get_ylim()[0]])
# print "untransformed 0 = ", x0, y0
# print "untransformed 0 = ", x1, y1
# #add the line width to it
# #x0,y0 = trans.transform_point([x0-(self.line.get_linewidth()/2.0),y0])
# #x1,y1 = trans.transform_point([x1+(self.line.get_linewidth()/2.0),y1])
#
# #print "transformed 0 = ",x0,y0
# #print "transformed 0 = ",x1,y1
# bbox = matplotlib.transforms.Bbox([[x0, y0], [x1, y1]])
# #bbox.update_from_data_xy([[x0,y0],[x1,y1]])
# self.region_to_restore = bbox
#
#
# def disconnect(self):
# self.axes.figure.canvas.mpl_disconnect(self.cid)
# self.axes.figure.canvas.restore_region(self.background)
# self.axes.figure.canvas.blit(self.axes.bbox)
# pass | gpl-3.0 |
stylianos-kampakis/scikit-learn | sklearn/mixture/gmm.py | 68 | 31091 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
davidgbe/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
OTAkeys/RIOT | tests/pkg_utensor/generate_digit.py | 19 | 1149 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 28x28.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (mnist_test, _) = tf.keras.datasets.mnist.load_data()
data = mnist_test[args.index]
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data.astype('float32'), output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(28, 28))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
abyssxsy/gnuradio | gr-utils/python/utils/plot_fft_base.py | 53 | 10449 | #!/usr/bin/env python
#
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_fft_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = getattr(scipy, datatype)
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.94, ("File: %s" % filename), weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % (self.position))
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.iq_fft = self.dofft(self.iq)
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.freq = self.calc_freq(self.time, self.sample_rate)
def dofft(self, iq):
N = len(iq)
iq_fft = scipy.fftpack.fftshift(scipy.fft(iq)) # fft and shift axis
iq_fft = 20*scipy.log10(abs((iq_fft+1e-15)/N)) # convert to decibels, adjust power
# adding 1e-15 (-300 dB) to protect against value errors if an item in iq_fft is 0
return iq_fft
def calc_freq(self, time, sample_rate):
N = len(time)
Fs = 1.0 / (time.max() - time.min())
Fn = 0.5 * sample_rate
freq = scipy.array([-Fn + i*Fs for i in xrange(N)])
return freq
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=[0.075, 0.2, 0.4, 0.6])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for FFT plot
self.sp_fft = self.fig.add_subplot(2,2,2, position=[0.575, 0.2, 0.4, 0.6])
self.sp_fft.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp_fft.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_fft.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time() # draw the plot
self.plot_fft = self.sp_fft.plot([], 'bo-') # make plot for FFT
self.draw_fft() # draw the plot
draw()
def draw_time(self):
reals = self.iq.real
imags = self.iq.imag
self.plot_iq[0].set_data([self.time, reals])
self.plot_iq[1].set_data([self.time, imags])
self.sp_iq.set_xlim(self.time.min(), self.time.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_fft(self):
self.plot_fft[0].set_data([self.freq, self.iq_fft])
self.sp_fft.set_xlim(self.freq.min(), self.freq.max())
self.sp_fft.set_ylim([self.iq_fft.min()-10, self.iq_fft.max()+10])
def update_plots(self):
self.draw_time()
self.draw_fft()
self.xlim = self.sp_iq.get_xlim()
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
self.xlim = newxlim
#xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0]))))
xmax = min(int(ceil(self.sample_rate*(self.xlim[1]))), len(self.iq))
iq = self.iq[xmin : xmax]
time = self.time[xmin : xmax]
iq_fft = self.dofft(iq)
freq = self.calc_freq(time, self.sample_rate)
self.plot_fft[0].set_data(freq, iq_fft)
self.sp_fft.axis([freq.min(), freq.max(),
iq_fft.min()-10, iq_fft.max()+10])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time as well as the frequency domain (FFT) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. This value defaults to 1000. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_fft_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_fft_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
rfriesen/DR1_analysis | property_histograms.py | 2 | 7527 | from astropy.io import fits
import aplpy
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import astropy.units as u
import astropy.constants as c
import warnings
import numpy as np
from astropy.visualization import hist
from config import plottingDictionary
"""
Make histogram plots of NH3-derived properties for DR1 regions
"""
def mask_hist(par_data,epar_data,epar_lim,par_max,par_min=0):
mask1 = np.isfinite(epar_data)
mask2 = epar_data < epar_lim
mask3 = epar_data > 0
mask4 = par_data < par_max
mask5 = par_data > par_min
return par_data * mask1 * mask2 * mask3 * mask4 * mask5
region_list = ['B18','NGC1333','L1688','OrionA']
par_list = ['Vlsr','Sigma','Tkin','Tex','N_NH3']
epar_ext = [10,9,6,7,8]
epar_limits = [0.05,0.1,1,2,0.5]
extension = 'DR1_rebase3'
label_list=['$v_{LSR}$ (km s$^{-1}$)','$\sigma_v$ (km s$^{-1}$)','$T_K$ (K)','$T_{ex}$ (K)','log N(para-NH$_3$) (cm$^{-2}$)']
label_short = ['$v_{LSR}$','$\sigma_v$','$T_K$','$T_{ex}$','log N(para-NH$_3$)']
plot_colours = ['black','blue','green','orange']
#plot_colours = ['black','darkblue','blue','cornflowerblue']
#plot_colours = ['#a6cee3', '#fdbf6f', '#33a02c', '#fb9a99']
hist_minx_list = [2,0,5,2.7,13]
hist_maxx_list = [13,1.5,37,12,15.7]
hist_maxy_list = [2.1,8,0.65,1.6,2]
ytick_int_maj = [0.4,2,0.2,0.4,0.5]
ytick_int_min = [0.1,0.5,0.025,0.1,0.25]
dataDir = ''
hist_kwds1 = dict(histtype='stepfilled',alpha=0.2,normed=True)
# Separate regions in plots to better show distributions
for par_i in range(len(par_list)):
fig,axes = plt.subplots(len(region_list),1,figsize=(4,5))
par = par_list[par_i]
label = label_list[par_i]
ylabel = label_short[par_i]
for i, ax in enumerate(fig.axes):
region_i = i
region = region_list[region_i]
plot_param=plottingDictionary[region]
par_file = dataDir + '{0}/parameterMaps/{0}_{1}_{2}_flag.fits'.format(region,par,extension)
epar_file = dataDir + '{0}/{0}_parameter_maps_{1}_trim.fits'.format(region,extension)
epar_hdu = fits.open(epar_file)
epar_data = epar_hdu[0].data[epar_ext[par_i],:,:]
epar_hdu.close()
par_hdu = fits.open(par_file)
par_data = par_hdu[0].data
par_hdu.close()
pmin_list = plot_param['pmin_list']
pmax_list = plot_param['pmax_list']
pmin = np.max([pmin_list[par_i],np.nanmin(par_data)])
pmax = np.min([pmax_list[par_i],np.nanmax(par_data)])
par_masked = mask_hist(par_data,epar_data,epar_limits[par_i],pmax,par_min=pmin)
par_masked = par_masked[np.isfinite(par_masked)]
if par == 'Vlsr':
bin_width = 0.3
nbins = np.int((np.max(par_masked) - np.min(par_masked !=0))/bin_width)
hist(par_masked[par_masked !=0],bins=nbins,ax=ax,histtype='stepfilled',alpha=0.3,
color=plot_colours[region_i],label=region,normed=True)
else:
hist(par_masked[par_masked !=0],bins='knuth',ax=ax,histtype='stepfilled',alpha=0.3,
color=plot_colours[region_i],label=region,normed=True)
if (i+1) != len(region_list):
ax.set_xticklabels([])
ax.set_xlim(hist_minx_list[par_i],hist_maxx_list[par_i])
#ax.set_ylim(0,hist_maxy_list[par_i])
if par == 'Tkin':
if region == 'OrionA' or region == 'L1688' or region == 'NGC1333':
ax.set_ylim(0,0.25)
ax.yaxis.set_major_locator(ticker.MultipleLocator(ytick_int_maj[par_i]))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(ytick_int_min[par_i]))
ax.annotate('{0}'.format(region),xy=(0.97,0.7),xycoords='axes fraction',horizontalalignment='right')
#ax.legend(frameon=False)
ax.set_xlabel(label)
fig.text(0.001,0.5,'P ({0})'.format(ylabel),va='center',rotation='vertical')
#fig.tight_layout()
fig.savefig('figures/{0}_histogram_separated.pdf'.format(par))
plt.close('all')
# Same plot for X(NH3)
# Need to apply Tex-based mask to get rid of some noisy data
fig,axes = plt.subplots(len(region_list),1,figsize=(4,5))
for i, ax in enumerate(fig.axes):
region_i = i
region = region_list[region_i]
plot_param=plottingDictionary[region]
par_file = dataDir + '{0}/parameterMaps/{0}_XNH3_{1}.fits'.format(region,extension)
epar_file = dataDir + '{0}/parameterMaps/{0}_eTex_{1}_flag.fits'.format(region,extension)
par_hdu = fits.open(par_file)
par_data = par_hdu[0].data
par_hdu.close()
epar_hdu = fits.open(epar_file)
epar_data = epar_hdu[0].data
epar_hdu.close()
pmin = -9.5
pmax = -6.5
#par_data[par_data == 0] = np.nan
par_masked = mask_hist(par_data,epar_data,epar_limits[3],pmax,par_min=pmin)
par_masked = par_masked[np.isfinite(par_masked)]
hist(par_masked[par_masked !=0],bins='knuth',ax=ax,histtype='stepfilled',alpha=0.3,
color=plot_colours[region_i],label=region,normed=True)
if (i+1) != len(region_list):
ax.set_xticklabels([])
ax.set_xlim(pmin,pmax)
#ax.set_ylim(0,hist_maxy_list[par_i])
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax.annotate('{0}'.format(region),xy=(0.97,0.7),xycoords='axes fraction',horizontalalignment='right')
#ax.legend(frameon=False)
ax.set_xlabel('log $X$(NH$_3$)')
fig.text(0.01,0.5,'P($X$(NH$_3$))',va='center',rotation='vertical')
#fig.tight_layout()
fig.savefig('figures/XNH3_histogram_separated.pdf',bbox_inches='tight')
plt.close('all')
'''
# All together
fig = plt.figure(figsize=(6.5,8))
for par_i in range(len(par_list)):
par = par_list[par_i]
label = label_list[par_i]
#fig = plt.figure()
#ax = plt.gca()
ax = plt.subplot(3,2,par_i+1)
for region_i in range(len(region_list)):
region = region_list[region_i]
plot_param=plottingDictionary[region]
par_file = dataDir + '{0}/parameterMaps/{0}_{1}_{2}_flag.fits'.format(region,par,extension)
epar_file = dataDir + '{0}/{0}_parameter_maps_{1}_trim.fits'.format(region,extension)
epar_hdu = fits.open(epar_file)
epar_data = epar_hdu[0].data[epar_ext[par_i],:,:]
epar_hdu.close()
par_hdu = fits.open(par_file)
par_data = par_hdu[0].data
par_hdu.close()
pmin_list = plot_param['pmin_list']
pmax_list = plot_param['pmax_list']
pmin = np.max([pmin_list[par_i],np.nanmin(par_data)])
pmax = np.min([pmax_list[par_i],np.nanmax(par_data)])
par_masked = mask_hist(par_data,epar_data,epar_limits[par_i],pmax,par_min=pmin)
par_masked = par_masked[np.isfinite(par_masked)]
if par == 'Vlsr':
bin_width = 0.3
nbins = np.int((np.max(par_masked) - np.min(par_masked !=0))/bin_width)
hist(par_masked[par_masked !=0],bins=nbins,ax=ax,histtype='stepfilled',alpha=0.3,
normed=True,color=plot_colours[region_i],label=region)
else:
hist(par_masked[par_masked !=0],bins='knuth',ax=ax,histtype='stepfilled',alpha=0.3,
normed=True,color=plot_colours[region_i],label=region)
ax.set_xlabel(label)
ax.set_ylabel('P(t)')
#ax.set_ylabel('N')
ax.set_xlim(hist_minx_list[par_i],hist_maxx_list[par_i])
ax.set_ylim(0,hist_maxy_list[par_i])
#ax.legend(frameon=False)
#fig.savefig('figures/{0}_histogram_number.pdf'.format(par))
ax.legend(frameon=False,bbox_to_anchor=(2.0,1.0))
fig.tight_layout()
fig.savefig('figures/all_histograms.pdf')
plt.close('all')
'''
| mit |
bsipocz/statsmodels | statsmodels/graphics/plot_grids.py | 33 | 5711 | '''create scatterplot with confidence ellipsis
Author: Josef Perktold
License: BSD-3
TODO: update script to use sharex, sharey, and visible=False
see http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label
for sharex I need to have the ax of the last_row when editing the earlier
rows. Or you axes_grid1, imagegrid
http://matplotlib.sourceforge.net/mpl_toolkits/axes_grid/users/overview.html
'''
from statsmodels.compat.python import range
import numpy as np
from scipy import stats
from . import utils
__all__ = ['scatter_ellipse']
def _make_ellipse(mean, cov, ax, level=0.95, color=None):
"""Support function for scatter_ellipse."""
from matplotlib.patches import Ellipse
v, w = np.linalg.eigh(cov)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan(u[1]/u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2 * np.sqrt(v * stats.chi2.ppf(level, 2)) #get size corresponding to level
ell = Ellipse(mean[:2], v[0], v[1], 180 + angle, facecolor='none',
edgecolor=color,
#ls='dashed', #for debugging
lw=1.5)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
def scatter_ellipse(data, level=0.9, varnames=None, ell_kwds=None,
plot_kwds=None, add_titles=False, keep_ticks=False,
fig=None):
"""Create a grid of scatter plots with confidence ellipses.
ell_kwds, plot_kdes not used yet
looks ok with 5 or 6 variables, too crowded with 8, too empty with 1
Parameters
----------
data : array_like
Input data.
level : scalar, optional
Default is 0.9.
varnames : list of str, optional
Variable names. Used for y-axis labels, and if `add_titles` is True
also for titles. If not given, integers 1..data.shape[1] are used.
ell_kwds : dict, optional
UNUSED
plot_kwds : dict, optional
UNUSED
add_titles : bool, optional
Whether or not to add titles to each subplot. Default is False.
Titles are constructed from `varnames`.
keep_ticks : bool, optional
If False (default), remove all axis ticks.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
"""
fig = utils.create_mpl_fig(fig)
import matplotlib.ticker as mticker
data = np.asanyarray(data) #needs mean and cov
nvars = data.shape[1]
if varnames is None:
#assuming single digit, nvars<=10 else use 'var%2d'
varnames = ['var%d' % i for i in range(nvars)]
plot_kwds_ = dict(ls='none', marker='.', color='k', alpha=0.5)
if plot_kwds:
plot_kwds_.update(plot_kwds)
ell_kwds_= dict(color='k')
if ell_kwds:
ell_kwds_.update(ell_kwds)
dmean = data.mean(0)
dcov = np.cov(data, rowvar=0)
for i in range(1, nvars):
#print '---'
ax_last=None
for j in range(i):
#print i,j, i*(nvars-1)+j+1
ax = fig.add_subplot(nvars-1, nvars-1, (i-1)*(nvars-1)+j+1)
## #sharey=ax_last) #sharey doesn't allow empty ticks?
## if j == 0:
## print 'new ax_last', j
## ax_last = ax
## ax.set_ylabel(varnames[i])
#TODO: make sure we have same xlim and ylim
formatter = mticker.FormatStrFormatter('% 3.1f')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatter)
idx = np.array([j,i])
ax.plot(*data[:,idx].T, **plot_kwds_)
if np.isscalar(level):
level = [level]
for alpha in level:
_make_ellipse(dmean[idx], dcov[idx[:,None], idx], ax, level=alpha,
**ell_kwds_)
if add_titles:
ax.set_title('%s-%s' % (varnames[i], varnames[j]))
if not ax.is_first_col():
if not keep_ticks:
ax.set_yticks([])
else:
ax.yaxis.set_major_locator(mticker.MaxNLocator(3))
else:
ax.set_ylabel(varnames[i])
if ax.is_last_row():
ax.set_xlabel(varnames[j])
else:
if not keep_ticks:
ax.set_xticks([])
else:
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
dcorr = np.corrcoef(data, rowvar=0)
dc = dcorr[idx[:,None], idx]
xlim = ax.get_xlim()
ylim = ax.get_ylim()
## xt = xlim[0] + 0.1 * (xlim[1] - xlim[0])
## yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
## if dc[1,0] < 0 :
## yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
## else:
## yt = ylim[1] - 0.2 * (ylim[1] - ylim[0])
yrangeq = ylim[0] + 0.4 * (ylim[1] - ylim[0])
if dc[1,0] < -0.25 or (dc[1,0] < 0.25 and dmean[idx][1] > yrangeq):
yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
else:
yt = ylim[1] - 0.2 * (ylim[1] - ylim[0])
xt = xlim[0] + 0.1 * (xlim[1] - xlim[0])
ax.text(xt, yt, '$\\rho=%0.2f$'% dc[1,0])
for ax in fig.axes:
if ax.is_last_row(): # or ax.is_first_col():
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
if ax.is_first_col():
ax.yaxis.set_major_locator(mticker.MaxNLocator(3))
return fig
| bsd-3-clause |
jereze/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
nhmc/LAE | cloudy/find_par.py | 1 | 13374 | from __future__ import division
from math import log, sqrt, pi
from barak.utilities import adict
from barak.absorb import split_trans_name
from barak.io import parse_config, loadobj
from barak.interp import AkimaSpline, MapCoord_Interpolator
from cloudy.utils import read_observed
import numpy as np
import os
from glob import glob
from barak.plot import get_nrows_ncols, puttext
from matplotlib.ticker import AutoMinorLocator
import astropy.constants as c
import astropy.units as u
from astropy.table import Table
import pylab as plt
import sys
# dex 1 sigma error in UVB (and so nH)
Unorm_sig = 0.3
USE_HEXBIN = True
def make_cmap_red():
from matplotlib.colors import LinearSegmentedColormap
x = np.linspace(0,1,9)
cm = plt.cm.Reds(x)
r,g,b = cm[:,0], cm[:,1], cm[:,2]
g[0] = 1
b[0] = 1
cdict = dict(red=zip(x, r, r), green=zip(x, g, g), blue=zip(x, b, b))
return LinearSegmentedColormap('red_nhmc', cdict)
def make_cmap_blue():
from matplotlib.colors import LinearSegmentedColormap
x = np.linspace(0,1,15)
cm = plt.cm.Blues(x)
r,g,b = cm[:,0], cm[:,1], cm[:,2]
g[0] = 1
b[0] = 1
r[1:10] = r[4:13]
g[1:10] = g[4:13]
b[1:10] = b[4:13]
cdict = dict(red=zip(x, r, r), green=zip(x, g, g), blue=zip(x, b, b))
return LinearSegmentedColormap('blue_nhmc', cdict)
def find_min_interval(x, alpha):
""" Determine the minimum interval containing a given probability.
x is an array of parameter values (such as from an MCMC trace).
alpha (0 -> 1) is the desired probability encompassed by the
interval.
Inspired by the pymc function of the same name.
"""
assert len(x) > 1
x = np.sort(x)
# Initialize interval
min_int = None, None
# Number of elements in trace
n = len(x)
# Start at far left
end0 = int(n*alpha)
start, end = 0, end0
# Initialize minimum width to large value
min_width = np.inf
for i in xrange(n - end0):
hi, lo = x[end+i], x[start+i]
width = hi - lo
if width < min_width:
min_width = width
min_int = lo, hi
return min_int
def make_interpolators_uvbtilt(trans, simnames):
""" Make interpolators including different UV slopes, given by the
simulation names.
simname naming scheme should be (uvb_k00, uvb_k01, uvb_k02, ...),
uvb k values must be sorted in ascending order!
"""
Models = []
aUV = []
for simname in simnames:
# need to define prefix, SIMNAME
gridname = os.path.join(simname, 'grid.cfg')
#print 'Reading', gridname
cfg = parse_config(gridname)
aUV.append(cfg.uvb_tilt)
name = os.path.join(simname, cfg.prefix + '_grid.sav.gz')
#print 'Reading', name
M = loadobj(name)
M = adict(M)
Uconst = (M.U + M.nH)[0]
#print 'Uconst', Uconst, cfg.uvb_tilt
assert np.allclose(Uconst, M.U + M.nH)
Models.append(M)
##########################################################################
# Interpolate cloudy grids onto a finer scale for plotting and
# likelihood calculation
##########################################################################
roman_map = {'I':0, 'II':1, 'III':2, 'IV':3, 'V':4, 'VI':5,
'VII':6, 'VIII':7, 'IX':8, 'X':9, '2':2}
Ncloudy = {}
Ncloudy_raw = {}
#print 'Interpolating...'
for tr in trans:
shape = len(M.NHI), len(M.nH), len(M.Z), len(aUV)
Nvals = np.zeros(shape)
if tr == 'Tgas':
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M['Tgas'][:,:,:,0]
elif tr == 'NH':
for i,M in enumerate(Models):
logNHI = M.N['H'][:,:,:,0]
logNHII = M.N['H'][:,:,:,1]
logNHtot = np.log10(10**logNHI + 10**logNHII)
Nvals[:,:,:,i] = logNHtot
elif tr in ['CII*']:
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M.Nex[tr][:,:,:]
else:
atom, stage = split_trans_name(tr)
ind = roman_map[stage]
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M.N[atom][:,:,:,ind]
# use ndimage.map_coordinates (which is spline interpolation)
coord = M.NHI, M.nH, M.Z, aUV
try:
Ncloudy[tr] = MapCoord_Interpolator(Nvals, coord)
except:
import pdb; pdb.set_trace()
Ncloudy_raw[tr] = Nvals
#print 'done'
return Ncloudy, Ncloudy_raw, Models, np.array(aUV, np.float)
def triplot(names, vals, sigvals, fig, indirect={}, labels=None, fontsize=14):
from barak.plot import hist_yedge, hist_xedge, puttext
npar = len(names)
bins = {}
for n in names:
x0, x1 = vals[n].min(), vals[n].max()
dx = x1 - x0
lo = x0 - 0.1*dx
hi = x1 + 0.1*dx
bins[n] = np.linspace(lo, hi, 20)
axes = {}
for i0,n0 in enumerate(names):
for i1,n1 in enumerate(names):
if i0 == i1:# or i1 < i0: # uncomment to keep just one triangle.
continue
ax = fig.add_subplot(npar,npar, i0 * npar + i1 + 1)
ax.locator_params(tight=True, nbins=8)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
axes[(n0 + ' ' + n1)] = ax
y,x = vals[n0], vals[n1]
if USE_HEXBIN:
ax.hexbin(x,y,cmap=CM, gridsize=40,linewidths=0.1)
else:
ax.plot(x,y,'r.', ms=0.5, mew=0)#, alpha=0.5)
color = 'k' if n0 not in indirect else 'g'
text = labels[n0] if labels is not None else n0
puttext(0.05, 0.95, text, ax, color=color ,fontsize=fontsize, va='top')
color = 'k' if n1 not in indirect else 'g'
text = labels[n1] if labels is not None else n1
puttext(0.95, 0.08, text, ax, color=color ,fontsize=fontsize, ha='right')
# set limits
y0, y1 = np.percentile(vals[n0], [5, 95])
dy = y1 - y0
ax.set_ylim(y0 - dy, y1 + dy)
x0, x1 = np.percentile(vals[n1], [5, 95])
dx = x1 - x0
ax.set_xlim(x0 - dx, x1 + dx)
c = 'k'
if i0 == 0:
ax.xaxis.set_tick_params(labeltop='on')
ax.xaxis.set_tick_params(labelbottom='off')
for t in ax.get_xticklabels():
t.set_rotation(60)
elif i0 == npar-1 or (i0 == npar-2 and i1 == npar-1):
hist_xedge(vals[n1], ax, color='forestgreen',
fill=dict(color='forestgreen',alpha=0.3),
bins=bins[n1], loc='bottom')
ax.axvline(sigvals[n1][0], ymax=0.2, color=c, lw=0.5)
ax.axvline(sigvals[n1][1], ymax=0.2, color=c, lw=0.5)
cen = sum(sigvals[n1]) / 2.
ax.axvline(cen, ymax=0.2, color=c, lw=1.5)
for t in ax.get_xticklabels():
t.set_rotation(60)
else:
ax.set_xticklabels('')
if not (i1 == 0 or (i0 == 0 and i1 == 1) or i1 == npar-1):
ax.set_yticklabels('')
if (i0 == 0 and i1 == 1) or i1 == 0:
hist_yedge(vals[n0], ax, color='forestgreen',
fill=dict(color='forestgreen',alpha=0.3),
bins=bins[n0], loc='left')
ax.axhline(sigvals[n0][0], xmax=0.2, color=c, lw=0.5)
ax.axhline(sigvals[n0][1], xmax=0.2, color=c, lw=0.5)
cen = sum(sigvals[n0]) / 2.
ax.axhline(cen, xmax=0.2, color=c, lw=1.5)
if i1 == npar - 1:
ax.yaxis.set_tick_params(labelright='on')
ax.yaxis.set_tick_params(labelleft='off')
#ax.minorticks_on()
return axes
if 1:
print_header = False
if len(sys.argv[1:]) > 0 and sys.argv[1] == '--header':
print_header = True
if 1:
##################################################
# Read configuration file, set global variables
##################################################
testing = 0
cfgname = 'model.cfg'
# we only need the cfg file for the prefix of the cloudy runs and
# the name of the file with the observed column densities.
opt = parse_config(cfgname)
simnames = sorted(glob(opt['simname']))
#print opt['simname']
#print simnames
#CM = make_cmap_blue() # plt.cm.binary
#CM = make_cmap_red() # plt.cm.binary
CM = plt.cm.gist_heat_r # plt.cm.binary
#CM = plt.cm.afmhot_r # plt.cm.binary
#CM = plt.cm.bone_r # plt.cm.binary
#CM = plt.cm.terrain_r # plt.cm.binary
#CM = plt.cm.ocean_r # plt.cm.binary
trans = 'Tgas', 'NH'
if 1:
################################################################
# Read the cloudy grids and make the interpolators
################################################################
Ncloudy, Ncloudy_raw, Models, aUV = make_interpolators_uvbtilt(
trans, simnames)
M = Models[0]
#import pdb; pdb.set_trace()
Uconst_vals = []
for model in Models:
Uconst_vals.append((model['U'] + model['nH'])[0])
# note it's a function of aUV!
Uconst = AkimaSpline(aUV, Uconst_vals)
# Now find the parameter chains
samples = loadobj('samples_mcmc.sav.gz')
nwalkers, nsamples, npar = samples['chain'].shape
parvals = samples['chain'].reshape(-1, npar)
PAR = samples['par']
assert PAR['names'][-1] == 'aUV'
assert PAR['names'][-2] == 'Z'
assert PAR['names'][-3] == 'nH'
assert PAR['names'][-4] == 'NHI'
aUV = parvals[:,-1]
logZ = parvals[:,-2]
lognH = parvals[:,-3]
logNHI = parvals[:,-4]
logU = Uconst(aUV) - lognH
#import pdb; pdb.set_trace()
# call the interpolators with these parameter values.
logT = Ncloudy['Tgas'](parvals[:,-4:].T)
logNtot = Ncloudy['NH'](parvals[:,-4:].T)
# note this is log of D in kpc
logD = logNtot - lognH - np.log10(c.kpc.to(u.cm).value)
logP = logT + lognH
#import pdb; pdb.set_trace()
H_massfrac = 0.76 # (1 / mu)
# Joe's mass calculation
mass = 4./3. * pi * (3./4. * 10**logD * u.kpc)**3 * 10**lognH * \
u.cm**-3 * u.M_p / H_massfrac
# D = NH / nH
logM = np.log10(mass.to(u.M_sun).value)
if 1:
# print out the results and uncertainties
vals = dict(U=logU, T=logT, N=logNtot, D=logD, P=logP, M=logM,
nH=lognH, aUV=aUV, NHI=logNHI, Z=logZ)
levels = 0.6827, 0.9545
sigvals = {}
for key in vals:
sigvals[key] = find_min_interval(vals[key], levels[0])
if print_header:
print r'$\log(Z/Z_\odot)$&$\alpha_{UV}$ & $\log \nH$ & $\log U$& $\log \NHI$ & $\log \NH$& $\log T$ & $\log (P/k)$& $\log D$ & $\log M$ \\'
print r' & & (\cmmm) & & (\cmm) & (\cmm) & (K) & (\cmmm K) & (kpc) & (\msun) \\'
s = ''
ALLPAR = 'Z aUV nH U NHI N T P D M'.split()
for key in ALLPAR:
sig = 0.5 * (sigvals[key][1] - sigvals[key][0])
val = 0.5 * (sigvals[key][1] + sigvals[key][0])
if key in {'nH', 'D', 'P'}:
sig1 = np.hypot(sig, Unorm_sig)
s += '$%.2f\\pm%.2f(%.2f)$ &' % (val, sig1, sig)
elif key == 'M':
sig1 = np.hypot(sig, 2*Unorm_sig)
s += '$%.2f\\pm%.2f(%.2f)$ &' % (val, sig1, sig)
else:
s += '$%.2f\\pm%.2f$ &' % (val, sig)
print s[:-1] + r'\\'
if 1:
labels = dict(U='$U$', Z='$Z$', NHI='$N_\mathrm{HI}$', aUV=r'$\alpha_\mathrm{UV}$',
T='$T$', P='$P$', N='$N_\mathrm{H}$', D='$D$', M='$Mass$')
if 0:
fig = plt.figure(figsize=(12,12))
fig.subplots_adjust(left=0.05, bottom=0.05, top=0.94,right=0.94, wspace=1e-4,hspace=1e-4)
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
names = 'U Z NHI aUV T P N D M'.split()
#direct = 'U Z NHI aUV'.split()
axes = triplot(names, vals, sigvals, fig, labels=labels)
plt.savefig('par.png', dpi=200)
if 1:
fig = plt.figure(figsize=(8,8))
fig.subplots_adjust(left=0.095, bottom=0.105, top=0.94,right=0.94, wspace=1e-4,hspace=1e-4)
plt.rc('xtick', labelsize=9.5)
plt.rc('ytick', labelsize=9.5)
names = 'U Z N aUV'.split()
axes = triplot(names, vals, sigvals, fig, labels=labels, fontsize=16)
axes['U Z'].set_ylabel('$\log_{10}U$')
axes['Z U'].set_ylabel('$\log_{10}[Z/Z_\odot]$')
axes['N U'].set_ylabel('$\log_{10}N_\mathrm{H}$')
axes['aUV U'].set_ylabel(r'$\log_{10}\alpha_\mathrm{UV}$')
axes['aUV U'].set_xlabel('$\log_{10}U$')
axes['aUV Z'].set_xlabel('$\log_{10}[Z/Z_\odot]$')
axes['aUV N'].set_xlabel('$\log_{10}N_\mathrm{H}$')
axes['N aUV'].set_xlabel(r'$\log_{10}\alpha_\mathrm{UV}$')
# special case:
if os.path.abspath('.') == '/Users/ncrighton/Projects/MPIA_QSO_LBG/Cloudy/J0004_NHI_2/comp1/final':
for k in ('N U', 'N Z', 'N aUV'):
axes[k].set_ylim(17.3, 19.2)
for k in ('U N', 'Z N', 'aUV N'):
axes[k].set_xlim(17.3, 19.2)
#plt.savefig('par2.pdf')
plt.savefig('par2.png',dpi=250)
| mit |
cloudera/ibis | ibis/backends/pandas/tests/conftest.py | 1 | 1158 | from pathlib import Path
import pandas as pd
import ibis
import ibis.expr.operations as ops
from ibis.backends.tests.base import BackendTest, RoundHalfToEven
class TestConf(BackendTest, RoundHalfToEven):
check_names = False
additional_skipped_operations = frozenset({ops.StringSQLLike})
supported_to_timestamp_units = BackendTest.supported_to_timestamp_units | {
'ns'
}
supports_divide_by_zero = True
returned_timestamp_unit = 'ns'
@staticmethod
def connect(data_directory: Path) -> ibis.client.Client:
return ibis.pandas.connect(
{
'functional_alltypes': pd.read_csv(
str(data_directory / 'functional_alltypes.csv'),
index_col=None,
dtype={'bool_col': bool, 'string_col': str},
parse_dates=['timestamp_col'],
encoding='utf-8',
),
'batting': pd.read_csv(str(data_directory / 'batting.csv')),
'awards_players': pd.read_csv(
str(data_directory / 'awards_players.csv')
),
}
)
| apache-2.0 |
seckcoder/lang-learn | python/sklearn/examples/covariance/plot_lw_vs_oas.py | 4 | 2864 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotical optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print __doc__
import numpy as np
import pylab as pl
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
pl.subplot(2, 1, 1)
pl.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
pl.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
pl.ylabel("Squared error")
pl.legend(loc="upper right")
pl.title("Comparison of covariance estimators")
pl.xlim(5, 31)
# plot shrinkage coefficient
pl.subplot(2, 1, 2)
pl.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
pl.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
pl.xlabel("n_samples")
pl.ylabel("Shrinkage")
pl.legend(loc="lower right")
pl.ylim(pl.ylim()[0], 1. + (pl.ylim()[1] - pl.ylim()[0]) / 10.)
pl.xlim(5, 31)
pl.show()
| unlicense |
jswanljung/iris | docs/iris/example_code/General/inset_plot.py | 7 | 2357 | """
Test Data Showing Inset Plots
=============================
This example demonstrates the use of a single 3D data cube with time, latitude
and longitude dimensions to plot a temperature series for a single latitude
coordinate, with an inset plot of the data region.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import cartopy.crs as ccrs
import iris.quickplot as qplt
import iris.plot as iplt
def main():
# Load the data
with iris.FUTURE.context(netcdf_promote=True):
cube1 = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
# Slice into cube to retrieve data for the inset map showing the
# data region
region = cube1[-1, :, :]
# Average over latitude to reduce cube to 1 dimension
plot_line = region.collapsed('latitude', iris.analysis.MEAN)
# Open a window for plotting
fig = plt.figure()
# Add a single subplot (axes). Could also use "ax_main = plt.subplot()"
ax_main = fig.add_subplot(1, 1, 1)
# Produce a quick plot of the 1D cube
qplt.plot(plot_line)
# Set x limits to match the data
ax_main.set_xlim(0, plot_line.coord('longitude').points.max())
# Adjust the y limits so that the inset map won't clash with main plot
ax_main.set_ylim(294, 310)
ax_main.set_title('Meridional Mean Temperature')
# Add grid lines
ax_main.grid()
# Add a second set of axes specifying the fractional coordinates within
# the figure with bottom left corner at x=0.55, y=0.58 with width
# 0.3 and height 0.25.
# Also specify the projection
ax_sub = fig.add_axes([0.55, 0.58, 0.3, 0.25],
projection=ccrs.Mollweide(central_longitude=180))
# Use iris.plot (iplt) here so colour bar properties can be specified
# Also use a sequential colour scheme to reduce confusion for those with
# colour-blindness
iplt.pcolormesh(region, cmap='Blues')
# Manually set the orientation and tick marks on your colour bar
ticklist = np.linspace(np.min(region.data), np.max(region.data), 4)
plt.colorbar(orientation='horizontal', ticks=ticklist)
ax_sub.set_title('Data Region')
# Add coastlines
ax_sub.coastlines()
# request to show entire map, using the colour mesh on the data region only
ax_sub.set_global()
qplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
gviejo/ThalamusPhysio | python/main_make_MAPinfo.py | 1 | 14284 | #!/usr/bin/env python
'''
File name: main_make_movie.py
Author: Guillaume Viejo
Date created: 09/10/2017
Python Version: 3.5.2
To make shank mapping
'''
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import neuroseries as nts
import sys
sys.exit()
###############################################################################################################
# LOADING DATA
###############################################################################################################
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
spind_mod, spind_ses = loadSpindMod('/mnt/DataGuillaume/MergedData/SPINDLE_mod.pickle', datasets, return_index=True)
spike_spindle_phase = cPickle.load(open('/mnt/DataGuillaume/MergedData/SPIKE_SPINDLE_PHASE.pickle', 'rb'))
spike_theta_phase = cPickle.load(open('/mnt/DataGuillaume/MergedData/SPIKE_THETA_PHASE.pickle', 'rb'))
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
# filtering swr_mod
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (10,)).transpose())
# Cut swr_mod from -500 to 500
swr = swr.loc[-500:500]
# CHECK FOR NAN
tmp1 = swr.columns[swr.isnull().any()].values
tmp2 = theta.index[theta.isnull().any(1)].values
# CHECK P-VALUE
tmp3 = theta.index[(theta['pvalue'] > 1).values].values
tmp = np.unique(np.concatenate([tmp1,tmp2,tmp3]))
# copy and delete
if len(tmp):
swr_modth = swr.drop(tmp, axis = 1)
theta_modth = theta.drop(tmp, axis = 0)
swr_modth_copy = swr_modth.copy()
neuron_index = swr_modth.columns
times = swr_modth.loc[-500:500].index.values
###############################################################################################################
# MOVIE + jPCA for each animal
###############################################################################################################
mouses = ['Mouse12', 'Mouse17', 'Mouse20', 'Mouse32']
# times = np.arange(0, 1005, 5) - 500 # BAD
interval_to_cut = { 'Mouse12':[89,128],
'Mouse17':[84,123],
'Mouse20':[92,131],
'Mouse32':[80,125]}
movies = dict.fromkeys(mouses)
rXX = dict.fromkeys(mouses)
maps = dict.fromkeys(mouses)
headdir = dict.fromkeys(mouses)
adnloc = dict.fromkeys(mouses)
xpos = dict.fromkeys(mouses)
ypos = dict.fromkeys(mouses)
xpos_shank = dict.fromkeys(mouses)
ypos_shank = dict.fromkeys(mouses)
xpos_phase = dict.fromkeys(mouses)
ypos_phase = dict.fromkeys(mouses)
theta_dens = dict.fromkeys(mouses)
hd_neurons_index = []
for m in mouses:
print(m)
depth = pd.DataFrame(index = np.genfromtxt(data_directory+m+"/"+m+".depth", dtype = 'str', usecols = 0),
data = np.genfromtxt(data_directory+m+"/"+m+".depth", usecols = 1),
columns = ['depth'])
neurons = np.array([n for n in neuron_index if m in n])
sessions = np.unique([n.split("_")[0] for n in neuron_index if m in n])
nb_bins = 201
swr_shank = np.zeros((len(sessions),8,nb_bins))
# nb_bins = interval_to_cut[m][1] - interval_to_cut[m][0]
theta_shank = np.zeros((len(sessions),8,30)) # that's radian bins here
spindle_shank = np.zeros((len(sessions),8,30)) # that's radian bins here
bins_phase = np.linspace(0.0, 2*np.pi+0.00001, 31)
count_total = np.zeros((len(sessions),8))
hd_neurons = np.zeros((len(sessions),8))
amplitute = np.zeros((len(sessions),8))
mod_theta = np.zeros((len(sessions),8))
###########################################################################################################
# JPCA
###########################################################################################################
rX,phi_swr,dynamical_system = jPCA(swr_modth[neurons].values.transpose(), times)
phi_swr = pd.DataFrame(index = neurons, data = phi_swr)
###########################################################################################################
# VARIOUS
###########################################################################################################
for s in sessions:
generalinfo = scipy.io.loadmat(data_directory+m+"/"+s+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
spikes,shank = loadSpikeData(data_directory+m+"/"+s+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
hd_info = scipy.io.loadmat(data_directory+m+'/'+s+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
shankIndex = np.array([shank[n] for n in spikes.keys()]).flatten()
if np.max(shankIndex) > 8 : sys.exit("Invalid shank index for thalamus" + s)
shank_to_neurons = {k:np.array(list(spikes.keys()))[shankIndex == k] for k in np.unique(shankIndex)}
for k in shank_to_neurons.keys():
count_total[np.where(sessions== s)[0][0],k] = len(shank_to_neurons[k])
hd_neurons[np.where(sessions== s)[0][0],k] = np.sum(hd_info_neuron[shankIndex == k])
mod_theta[np.where(sessions== s)[0][0],k] = (theta.loc[[s+'_'+str(i) for i in shank_to_neurons[k]]]['pvalue'] < 0.05).sum()
# amplitute[np.where(sessions==s)[0][0],k] = (swr.loc[shank_to_neurons[k]].var(1)).mean()
###########################################################################################################
# SWR MOD
###########################################################################################################
neurons_mod_in_s = np.array([n for n in neurons if s in n])
shank_to_neurons = {k:np.array([n for n in neurons_mod_in_s if shankIndex[int(n.split("_")[1])] == k]) for k in np.unique(shankIndex)}
for k in shank_to_neurons.keys():
# if np.sum(hd_info_neuron[[int(n.split("_")[1]) for n in shank_to_neurons[k]]]):
# print(s, k, len(shank_to_neurons[k]))
# if s == 'Mouse17-130204': sys.exit()
if len(shank_to_neurons[k]):
swr_shank[np.where(sessions== s)[0][0],k] = swr_modth[shank_to_neurons[k]].mean(1).values
###########################################################################################################
# THETA MOD
###########################################################################################################
for k in shank_to_neurons.keys():
if len(shank_to_neurons[k]):
for n in shank_to_neurons[k]:
phi = spike_theta_phase['rem'][n]
phi[phi<0.0] += 2*np.pi
index = np.digitize(phi, bins_phase)-1
for t in index:
theta_shank[np.where(sessions == s)[0][0],k,t] += 1.0
###########################################################################################################
# SPIND HPC MOD
###########################################################################################################
for k in shank_to_neurons.keys():
if len(shank_to_neurons[k]):
for n in shank_to_neurons[k]:
if n in list(spike_spindle_phase.keys()):
phi = spike_spindle_phase['hpc'][n]
phi[phi<0.0] += 2*np.pi
index = np.digitize(phi, bins_phase)-1
for t in index:
spindle_shank[np.where(sessions == s)[0][0],k,t] += 1.0
for t in range(len(times)):
swr_shank[:,:,t] = np.flip(swr_shank[:,:,t], 1)
for t in range(theta_shank.shape[-1]):
theta_shank[:,:,t] = np.flip(theta_shank[:,:,t], 1)
spindle_shank[:,:,t] = np.flip(spindle_shank[:,:,t], 1)
# saving
movies[m] = { 'swr' : swr_shank ,
'theta' : theta_shank ,
'spindle': spindle_shank }
hd_neurons = hd_neurons/(count_total+1.0)
mod_theta = mod_theta/(count_total+1.0)
rXX[m] = rX
maps[m] = { 'total': np.flip(count_total,1),
'x' : np.arange(0.0, 8*0.2, 0.2),
'y' : depth.loc[sessions].values.flatten()
}
headdir[m] = np.flip(hd_neurons, 1)
theta_dens[m] = np.flip(mod_theta, 1)
for m in movies.keys():
datatosave = { 'movies':movies[m],
'total':maps[m]['total'],
'x':maps[m]['x'],
'y':maps[m]['y'],
'headdir':headdir[m],
'jpc':rXX[m],
'theta_dens':theta_dens[m]
}
cPickle.dump(datatosave, open("../data/maps/"+m+".pickle", 'wb'))
sys.exit()
m = 'Mouse12'
space = 0.01
thl_lines = np.load("../figures/thalamus_lines.mat.npy").sum(2)
xlines, ylines, thl_lines = interpolate(thl_lines, np.linspace(maps[m]['x'].min(), maps[m]['x'].max(), thl_lines.shape[1]),
np.linspace(maps[m]['y'].min(), maps[m]['y'].max(), thl_lines.shape[0]), 0.001)
thl_lines -= thl_lines.min()
thl_lines /= thl_lines.max()
thl_lines[thl_lines>0.6] = 1.0
thl_lines[thl_lines<=0.6] = 0.0
xnew, ynew, total = interpolate(maps[m]['total'].copy(), maps[m]['x'], maps[m]['y'], space)
# total -= total.min()
# total /= total.max()
total = softmax(total, 20.0, 0.2)
for k in movies[m].keys():
movies[m][k] = filter_(movies[m][k], (2,2,5))
filmov = dict.fromkeys(movies[m].keys())
for k in filmov:
tmp = []
for t in range(movies[m][k].shape[-1]):
# frame = movies[m][k][:,:,t] / (maps[m]['total']+1.0)
frame = movies[m][k][:,:,t]
xnew, ynew, frame = interpolate(frame, maps[m]['x'], maps[m]['y'], space)
tmp.append(frame)
tmp = np.array(tmp)
filmov[k] = filter_(tmp, 5)
filmov[k] = filmov[k] - np.min(filmov[k])
filmov[k] = filmov[k] / np.max(filmov[k] + 1e-8)
filmov[k] = softmax(filmov[k], 10, 0.5)
xnew, ynew, head = interpolate(headdir[m].copy(), maps[m]['x'], maps[m]['y'], space)
head[head < np.percentile(head, 90)] = 0.0
# sys.exit()
# figure()
# index = np.arange(0,20,1)+90
# for i in range(len(index)):
# subplot(4,5,i+1)
# # imshow(get_rgb(filmov['swr'][index[i]].copy(), total.copy(), np.ones_like(total), 0.83),
# imshow(filmov['swr'][index[i]].copy(),
# aspect = 'auto',
# origin = 'upper',
# cmap = 'jet', vmin = 0.0, vmax = 1.0)
# # extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
# title("t = "+str(times[index[i]])+" ms")
# # contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
# # contour(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]), colors = 'white')
# # show(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]))
# show()
from matplotlib import animation, rc
from IPython.display import HTML, Image
rc('animation', html='html5')
fig, axes = plt.subplots(1,1)
images = [axes.imshow(get_rgb(filmov['swr'][0].copy(), np.ones_like(total), total, 0.65), vmin = 0.0, vmax = 1.0, aspect = 'equal', origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))]
# images = [axes.imshow(filmov['swr'][0], aspect = 'equal', origin = 'upper', cmap = 'jet', vmin = 0.0, vmax = 1.0, extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))]
axes.contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]), cmap = 'gist_gray')
axes.contour(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]), colors = 'white')
def init():
images[0].set_data(get_rgb(filmov['swr'][0].copy(), np.ones_like(total), total, 0.65))
# images[0].set_data(filmov['swr'][0])
return images
def animate(t):
images[0].set_data(get_rgb(filmov['swr'][t].copy(), np.ones_like(total), total, 0.65))
# images[0].set_data(filmov['swr'][t])
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=range(len(times)), interval=0, blit=False, repeat_delay = 5000)
anim.save('../figures/swr_mod_'+m+'.gif', writer='imagemagick', fps=60)
show()
sys.exit()
sys.exit()
from matplotlib import animation, rc
from IPython.display import HTML, Image
rc('animation', html='html5')
fig, axes = plt.subplots(1,3)
images = []
for k, i in zip(['swr', 'theta', 'spindle'], range(3)):
images.append(axes[i].imshow(filmov[k][0], aspect = 'auto', cmap = 'jet', origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0])))
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
def init():
for i in range(3): images[i].set_data(filmov[k][0])
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
return images
def animate(t):
for i in range(3): images[i].set_data(filmov[k][t])
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=range(len(times)), interval=0, blit=True, repeat_delay = 0)
sys.exit()
m = 'Mouse12'
images = []
# for i in range(len(mouses)):
# lines1.append(axes[0,i].plot([],[],'o-')[0])
# lines2.append(axes[0,i].plot([],[],'o-')[0])
# axes[0,i].set_xlim(-500, 500)
# axes[0,i].set_ylim(rXX[mouses[i]].min(), rXX[mouses[i]].max())
images.append(axes.imshow(movies[m]['spindle'][:,:,0], aspect = 'auto', cmap = 'jet'))
def init():
# for i, m in zip(range(len(mouses)), mouses):
# images[i].set_data(movies[m][0])
# lines1[i].set_data(times[0], rXX[m][0,0])
# lines2[i].set_data(times[0], rXX[m][0,1])
# return images+lines1+lines2
images[0].set_data(movies[m]['spindle'][:,:,0])
return images
def animate(t):
# for i, m in zip(range(len(mouses)), mouses):
# images[i].set_data(movies[m][t])
# lines1[i].set_data(times[0:t], rXX[m][0:t,0])
# lines2[i].set_data(times[0:t], rXX[m][0:t,1])
images[0].set_data(movies[m]['spindle'][:,:,t])
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=movies[m]['spindle'].shape[-1], interval=0, blit=True, repeat_delay = 1)
show()
# anim.save('../figures/animation_swr_mod_jpca.gif', writer='imagemagick', fps=60)
| gpl-3.0 |
piyush0609/scipy | scipy/spatial/tests/test__plotutils.py | 71 | 1463 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
sgenoud/scikit-learn | sklearn/tests/test_base.py | 4 | 3825 |
# Author: Gael Varoquaux
# License: BSD
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_equal
from nose.tools import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
#############################################################################
# The tests
def test_clone():
"""Tests that clone creates a correct deep copy.
We create an estimator, make a copy of its original state
(which, in this case, is the current state of the setimator),
and check that the obtained copy is a correct deep copy.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
"""Tests that clone doesn't copy everything.
We first create an estimator, give it an own attribute, and
make a copy of its original state. Then we check that the copy doesn't
have the specific attribute we manually added to the initial estimator.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
"""Check that clone raises an error on buggy estimators."""
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
def test_clone_empty_array():
"""Regression test for cloning estimators with empty arrays"""
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_repr():
"""Smoke test the repr of the base estimator."""
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
def test_str():
"""Smoke test the str of the base estimator"""
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
| bsd-3-clause |
PatrickOReilly/scikit-learn | sklearn/tests/test_metaestimators.py | 57 | 4958 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
AlexanderFabisch/scikit-learn | examples/missing_values.py | 71 | 3055 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
russel1237/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
yongfuyang/vnpy | vn.how/tick2trade/vn.trader_t2t/ctaAlgo/tools/multiTimeFrame/strategyBreakOut.py | 22 | 11811 | # encoding: UTF-8
"""
This file tweaks ctaTemplate Module to suit multi-TimeFrame strategies.
"""
from ctaBase import *
from ctaTemplate import CtaTemplate
import numpy as np
########################################################################
class BreakOut(CtaTemplate):
"""
"infoArray" 字典是用来储存辅助品种信息的, 可以是同品种的不同分钟k线, 也可以是不同品种的价格。
调用的方法:
价格序列:
self.infoArray["数据库名 + 空格 + collection名"]["close"]
self.infoArray["数据库名 + 空格 + collection名"]["high"]
self.infoArray["数据库名 + 空格 + collection名"]["low"]
单个价格:
self.infoBar["数据库名 + 空格 + collection名"]
返回的值为一个ctaBarData 或 None
"""
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""日内突破交易策略, 出场方式非常多, 本文件使用指标出场"""
className = 'BreakOut'
author = 'Joe'
super(BreakOut, self).__init__(ctaEngine, setting)
# 设置辅助品种数据字典
self.infoArray = {}
self.initInfobar = {}
self.infoBar = {}
# 缓存数据量
self.bufferSize = 100
self.bufferCount = 0
self.initDays = 10
# 设置参数
self.pOBO_Mult = 0.5 # 计算突破点位
# self.pProtMult = 2 # 止损的ATR倍数
# self.pProfitMult = 2 # 止盈相对于止损的倍数
# self.SlTp_On = False # 止损止盈功能
# self.EODTime = 15 # 设置日内平仓时间
self.vOBO_stretch = EMPTY_FLOAT
self.vOBO_initialpoint = EMPTY_FLOAT
self.vOBO_level_L = EMPTY_FLOAT
self.vOBO_level_S = EMPTY_FLOAT
self.orderList = []
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'pOBO_Mult',
'pProtMult',
'pProfitMult',
'SlTp_On',
'EODTime']
# 变量列表,保存了变量的名称
varList = ['vOBO_stretch',
'vOBO_initialpoint',
'vOBO_level_L',
'vOBO_level_S']
# ----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' % self.name)
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
# 推送新数据, 同时检查是否有information bar需要推送
# Update new bar, check whether the Time Stamp matching any information bar
ibar = self.checkInfoBar(bar)
self.onBar(bar, infobar=ibar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
# ----------------------------------------------------------------------
def checkInfoBar(self, bar):
"""在初始化时, 检查辅助品种数据的推送(初始化结束后, 回测时不会调用)"""
initInfoCursorDict = self.ctaEngine.initInfoCursor
# 如果"initInfobar"字典为空, 初始化字典, 插入第一个数据
# If dictionary "initInfobar" is empty, insert first data record
if self.initInfobar == {}:
for info_symbol in initInfoCursorDict:
try:
self.initInfobar[info_symbol] = next(initInfoCursorDict[info_symbol])
except StopIteration:
print "Data of information symbols is empty! Input is a list, not str."
raise
# 若有某一品种的 TimeStamp 和执行报价的 TimeStamp 匹配, 则将"initInfobar"中的数据推送,
# 然后更新该品种的数据
# If any symbol's TimeStamp is matched with execution symbol's TimeStamp, return data
# in "initInfobar", and update new data.
temp = {}
for info_symbol in self.initInfobar:
data = self.initInfobar[info_symbol]
# Update data only when Time Stamp is matched
if (data is not None) and (data['datetime'] <= bar.datetime):
try:
temp[info_symbol] = CtaBarData()
temp[info_symbol].__dict__ = data
self.initInfobar[info_symbol] = next(initInfoCursorDict[info_symbol])
except StopIteration:
self.initInfobar[info_symbol] = None
self.ctaEngine.output("No more data for initializing %s." % (info_symbol,))
else:
temp[info_symbol] = None
return temp
# ----------------------------------------------------------------------
def updateInfoArray(self, infobar):
"""收到Infomation Data, 更新辅助品种缓存字典"""
for name in infobar:
data = infobar[name]
# Construct empty array
if len(self.infoArray) < len(infobar) :
self.infoArray[name] = {
"close": np.zeros(self.bufferSize),
"high": np.zeros(self.bufferSize),
"low": np.zeros(self.bufferSize),
"open": np.zeros(self.bufferSize)
}
if data is None:
pass
else:
self.infoArray[name]["close"][0:self.bufferSize - 1] = \
self.infoArray[name]["close"][1:self.bufferSize]
self.infoArray[name]["high"][0:self.bufferSize - 1] = \
self.infoArray[name]["high"][1:self.bufferSize]
self.infoArray[name]["low"][0:self.bufferSize - 1] = \
self.infoArray[name]["low"][1:self.bufferSize]
self.infoArray[name]["open"][0:self.bufferSize - 1] = \
self.infoArray[name]["open"][1:self.bufferSize]
self.infoArray[name]["close"][-1] = data.close
self.infoArray[name]["high"][-1] = data.high
self.infoArray[name]["low"][-1] = data.low
self.infoArray[name]["open"][-1] = data.open
# ----------------------------------------------------------------------
def onBar(self, bar, **kwargs):
"""收到Bar推送(必须由用户继承实现)"""
# Update infomation data
# "infobar"是由不同时间或不同品种的品种数据组成的字典, 如果和执行品种的 TimeStamp 不匹配,
# 则传入的是"None", 当time stamp和执行品种匹配时, 传入的是"Bar"
if "infobar" in kwargs:
self.infoBar = kwargs["infobar"]
self.updateInfoArray(kwargs["infobar"])
# 若读取的缓存数据不足, 不考虑交易
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
a = np.sum(self.infoArray["TestData @GC_1D"]["close"])
if a == 0.0:
return
# Only updating indicators when information bar changes
# 只有在30min或者1d K线更新后才更新指标
TradeOn = False
if any([i is not None for i in self.infoBar]):
TradeOn = True
self.vRange = self.infoArray["TestData @GC_1D"]["high"][-1] -\
self.infoArray["TestData @GC_1D"]["low"][-1]
self.vOBO_stretch = self.vRange * self.pOBO_Mult
self.vOBO_initialpoint = self.infoArray["TestData @GC_1D"]["close"][-1]
self.vOBO_level_L = self.vOBO_initialpoint + self.vOBO_stretch
self.vOBO_level_S = self.vOBO_initialpoint - self.vOBO_stretch
self.atrValue30M = talib.abstract.ATR(self.infoArray["TestData @GC_30M"])[-1]
# 判断是否要进行交易
# 当前无仓位
if (self.pos == 0 and TradeOn == True):
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 若上一个30分钟K线的最高价大于OBO_level_L
# 且当前的价格大于OBO_level_L, 则买入
if self.infoArray["TestData @GC_30M"]["high"][-1] > self.vOBO_level_L:
if bar.close > self.vOBO_level_L:
self.buy(bar.close + 0.5, 1)
# 下单后, 在下一个30Min K线之前不交易
TradeOn = False
# 若上一个30分钟K线的最高价低于OBO_level_S
# 且当前的价格小于OBO_level_S, 则卖出
elif self.infoArray["TestData @GC_30M"]["low"][-1] < self.vOBO_level_S:
if bar.close < self.vOBO_level_S:
self.short(bar.close - 0.5, 1)
# 下单后, 在下一个30Min K线之前不交易
TradeOn = False
# 持有多头仓位
elif self.pos > 0:
# 当价格低于initialpoint水平, 出场
if bar.close < self.vOBO_initialpoint:
self.sell(bar.close - 0.5 , 1)
# 持有空头仓位
elif self.pos < 0:
# 当价格高于initialpoint水平, 出场
if bar.close > self.vOBO_initialpoint:
self.cover(bar.close + 0.5, 1)
# 发出状态更新事件
self.putEvent()
# ----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
# ----------------------------------------------------------------------
def onTrade(self, trade):
pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktestMultiTF import *
from PyQt4 import QtCore, QtGui
import time
'''
创建回测引擎
设置引擎的回测模式为K线
设置回测用的数据起始日期
载入历史数据到引擎中
在引擎中创建策略对象
Create backtesting engine
Set backtest mode as "Bar"
Set "Start Date" of data range
Load historical data to engine
Create strategy instance in engine
'''
engine = BacktestEngineMultiTF()
engine.setBacktestingMode(engine.BAR_MODE)
engine.setStartDate('20120101')
engine.setEndDate('20150101')
engine.setDatabase("TestData", "@GC_1M", info_symbol=[("TestData","@GC_30M"),
("TestData","@GC_1D")])
# Set parameters for strategy
engine.initStrategy(BreakOut, {})
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setCommission(0.3 / 10000) # 万0.3
engine.setSize(1) # 股指合约大小
# 开始跑回测
start = time.time()
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
print 'Time consumed:%s' % (time.time() - start) | mit |
hchim/stockanalyzer | simulator/TradeSimulator.py | 1 | 4978 | import pandas as pd
import numpy as np
from utils.webdata import get_close_of_symbols
class TradeSimulator(object):
def __init__(self, start_val=1000000, leverage=2.0, allow_short=True):
"""
Parameters
----------
start_val: float
start value of the portfolio
leverage: float
max leverage
allow_short: boolean
allows to sell short
"""
self.start_val = start_val
self.leverage = leverage
self.allow_short = allow_short
def compute_leverage(self, prices, shares, cash, order_type, order_share, order_symbol):
"""
Compute the leverage of the shares
Parameters
----------
prices: Series
shares: dict
contains the current shares for each symbol
cash: float
current cash
order_type: [BUY or SELL]
the type of the order
order_share: int
the number of shares of the order
order_symbol: string
the symbol of the order
Returns
----------
leverage: float
"""
if order_type == 'BUY':
shares[order_symbol] += order_share
cash -= prices[order_symbol] * order_share
else:
shares[order_symbol] -= order_share
cash += prices[order_symbol] * order_share
longs = shorts = 0
for symbol in shares.keys():
if shares[symbol] >= 0:
longs += shares[symbol] * prices[symbol]
else:
shorts -= shares[symbol] * prices[symbol]
leverage = (longs + shorts) / (longs - shorts + cash)
return leverage
def simulate(self, start_date=None, end_date=None, prices=None, orders=None, orders_file=None):
"""Simulate the trades with the given orders and the prices.
Parameters
----------
start_date: string
end_date: string
prices: DataFrame
orders: DataFrame
orders_file: string
Returns
----------
portvals: DataFrame
the daily portfolio values in the simulation
"""
if orders is None:
orders = pd.read_csv(orders_file, parse_dates=True)
symbols = list(set(orders['Symbol']))
if prices is None:
prices = get_close_of_symbols(symbols, start_date, end_date, add_spy=True) # add SPY so as to remove no-trade days
if prices is None:
return None
prices.drop('SPY', axis=1, inplace=True) # remove SPY
dates = prices.index # update dates
# init daily shares
shares = prices.copy() # record the shares every day
shares.loc[:, :] = np.nan
last_share = dict.fromkeys(shares.columns, 0) # record the total shares of each symbol
# init daily cashes
cashes = pd.Series({'Cash':np.nan}, index=dates) # record the daily cashes
last_cash = self.start_val # record total cash
# iterate orders and simulate the trades
for i in range(len(orders)):
symbol = orders.loc[i, 'Symbol']
share = orders.loc[i, 'Shares']
date = orders.loc[i, 'Date']
operate = orders.loc[i, 'Order']
price = prices.loc[date, symbol]
# check leverage
tmp_leverage = self.compute_leverage(prices.loc[date, :], last_share.copy(), last_cash,
operate, share, symbol)
if tmp_leverage > self.leverage:
continue
if operate == 'BUY':
last_share[symbol] += share
shares.loc[date, symbol] = last_share[symbol]
val = last_cash - price * share
cashes[date] = last_cash = val
else:
temp_share = last_share[symbol] - share
# short check
if not self.allow_short and temp_share < 0:
continue
shares.loc[date, symbol] = last_share[symbol] = temp_share
last_cash += price * share
cashes[date] = last_cash
# init the nan values of the first row of shares before invoking fillna
for symbol in shares.columns:
if pd.isnull(shares.loc[dates[0], symbol]):
shares.loc[dates[0], symbol] = 0
shares.fillna(method="ffill", inplace=True)
# init the nan value of the first row of cashes before invoking fillna
if pd.isnull(cashes.ix[0]):
cashes.ix[0] = self.start_val
cashes.fillna(method='ffill', inplace=True)
values = (prices * shares).sum(axis=1)
portvals = (values + cashes).to_frame()
portvals.rename(columns={portvals.columns[0]: "Portfolio"}, inplace=True)
return portvals
| mit |
aflaxman/scikit-learn | sklearn/tests/test_learning_curve.py | 33 | 12840 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorFailing(BaseEstimator):
"""Dummy classifier to test error_score in learning curve"""
def fit(self, X_subset, y_subset):
raise ValueError()
def score(self, X=None, y=None):
return None
class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter):
"""Dummy classifier that disallows repeated calls of fit method"""
def fit(self, X_subset, y_subset):
assert_false(
hasattr(self, 'fit_called_'),
'fit is called the second time'
)
self.fit_called_ = True
return super(type(self), self).fit(X_subset, y_subset)
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_error_score():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
_, _, test_scores = learning_curve(estimator, X, y, cv=3, error_score=0)
all_zeros = not np.any(test_scores)
assert(all_zeros)
def test_learning_curve_error_score_default_raise():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(max_iter=1, tol=None,
shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_validation_curve_clone_estimator():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(1, 0, 10)
_, _ = validation_curve(
MockEstimatorWithSingleFitCallAllowed(), X, y,
param_name="param", param_range=param_range, cv=2
)
| bsd-3-clause |
WillBrennan/DigitClassifier | DeepConv.py | 1 | 8479 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Will Brennan'
# Built-in Module
import os
import time
import logging
import warnings
import cPickle as pickle
from datetime import datetime
# Standard Modules
import numpy
import sklearn
import theano
import theano.tensor as T
# Custom Modules
import Scripts
import Layers
logger = logging.getLogger('main')
warnings.simplefilter("ignore", DeprecationWarning)
class DeepConv(object):
def __init__(self, debug=False, load=False, save=False):
self.args_debug = debug
self.args_load = load
self.args_save = save
if self.args_debug:
theano.exception_verbosity = 'high'
if self.args_load:
self.load()
else:
self.layers = None
self.test_model = None
self.validate_model = None
self.train_model = None
self.pred_model = None
self.index, self.x, self.y = T.lscalar(), T.matrix('x'), T.ivector('y')
def fit(self, data, labels, test_data, test_labels, learning_rate=0.1, n_epochs=250, nkerns=[20, 50], batch_size=500):
logger.info('Initialising the classifier')
rng = numpy.random.RandomState()
data, labels = Scripts.shared_dataset(data_x=data, data_y=labels)
test_data, test_labels = Scripts.shared_dataset(data_x=test_data, data_y=test_labels)
if batch_size < 1:
batch_size = data.get_value(borrow=True).shape[0]
n_train_batches = data.get_value(borrow=True).shape[0]/batch_size
n_test_batches = test_data.get_value(borrow=True).shape[0]/batch_size
logger.info('Constructing the classifier')
self.layers = []
self.layers.append(Layers.PoolingLayer(
rng,
input=self.x.reshape((batch_size, 1, 28, 28)),
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
))
self.layers.append(Layers.PoolingLayer(
rng,
input=self.layers[-1].output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
))
self.layers.append(Layers.HiddenLayer(
rng,
input=self.layers[-1].output.flatten(2),
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
))
self.layers.append(Layers.LogisticRegression(
input=self.layers[-1].output,
n_in=500,
n_out=10
))
test_givens = {self.x: test_data[self.index * batch_size: (self.index + 1) * batch_size], self.y: test_labels[self.index * batch_size: (self.index + 1) * batch_size]}
self.test_model = theano.function([self.index], self.layers[-1].errors(self.y), givens=test_givens)
params = self.layers[0].params + self.layers[1].params + self.layers[2].params + self.layers[3].params
cost = self.layers[-1].negative_log_likelihood(self.y)
grads = T.grad(cost, params)
updates = [(param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads)]
train_givens = {self.x: data[self.index * batch_size: (self.index + 1) * batch_size], self.y: labels[self.index * batch_size: (self.index + 1) * batch_size]}
self.train_model = theano.function([self.index], cost, updates=updates, givens=train_givens)
patience, patience_increase = 10000, 2
validation_frequency = min(n_train_batches, patience / 2)
epoch, count = 0, 0
start_time = time.time()
n_iters = n_epochs*n_train_batches
logger.info("Fitting Classifier")
logger.debug("{0} epochs, {1} batches, {2} iterations".format(n_epochs, n_train_batches, n_iters))
while epoch < n_epochs and patience > count:
epoch += 1
for minibatch_index in xrange(n_train_batches):
count = (epoch - 1) * n_train_batches + minibatch_index
if count % 50 == 0:
percentage = round(100.0*count/n_iters, 2)
if percentage == 0:
time_stamp = "Null"
else:
time_stamp = datetime.utcfromtimestamp((time.time()-start_time)*(100.0/percentage)+start_time)
logger.info("training is {0}% complete (Completion at {1})".format(round(percentage, 2), time_stamp))
train_cost = self.train_model(minibatch_index)
if (count + 1) % validation_frequency == 0:
testlosses = [self.test_model(i) for i in xrange(n_test_batches)]
test_score = numpy.mean(testlosses)
logger.info('Test error of {0}% achieved on Epoch {1} Iteration {2}'.format(test_score*100.0, epoch, count+1))
logger.debug("Iteration number {0}".format(count))
logger.debug('Optimization complete.')
logger.debug('Conducting final model testing')
testlosses = [self.test_model(i) for i in xrange(n_test_batches)]
test_score = numpy.mean(testlosses)
t_taken = int((time.time()-start_time)/60.0)
logger.info('Training Complete')
logger.info('Test score of {0}%, training time {1}m'.format(test_score*100.0, t_taken))
if self.args_save:
self.save()
def predict(self, x_data, batch_size=500):
assert isinstance(x_data, numpy.ndarray), "input features must be a numpy array"
assert len(x_data.shape) == 2, "it must be an array of feature vectors"
logger.info('classifier prediction called')
logger.debug('x_data shape: {0}'.format(x_data.shape))
logger.debug('forming prediction function')
x_data = Scripts.shared_dataset(data_x=x_data)
givens = {self.x: x_data[self.index * batch_size: (self.index + 1) * batch_size]}
pred_model = theano.function(inputs=[self.index], outputs=self.layers[-1].y_pred, givens=givens, on_unused_input='warn', allow_input_downcast=True)
logger.debug('input shape: {0}'.format(x_data.get_value(borrow=True).shape))
logger.info('beginning prediction on x_data')
n_batches = x_data.get_value(borrow=True).shape[0]/batch_size
result = []
for batch_index in range(n_batches):
logger.debug('processing batch {0}'.format(batch_index))
batch_result = pred_model(batch_index)
logger.debug('result generated')
result = numpy.hstack((result, batch_result))
logger.debug('output shape: {0}'.format(len(result)))
# batch size, rows, columns, channels.
return result
def score(self, test_data, test_labels, batch_size=500):
logger.info('Generating Classification Score')
logger.debug('creating shared datasets')
test_data, test_labels = Scripts.shared_dataset(data_x=test_data, data_y=test_labels)
logger.debug('producing batch information')
n_test_batches = test_data.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
logger.debug('generating theano functions')
test_givens = {self.x: test_data[self.index * batch_size: (self.index + 1) * batch_size], self.y: test_labels[self.index * batch_size: (self.index + 1) * batch_size]}
test_model = theano.function(inputs=[self.index], outputs=self.layers[-1].errors(self.y), givens=test_givens, on_unused_input='warn')
logger.debug('producing test results')
losses = [test_model(i) for i in range(n_test_batches)]
return 1.0-numpy.mean(losses)
def score_report(self, y_test, y_pred):
scores = sklearn.metrics.classification_report(y_test, y_pred)
logger.info("\n"+scores)
def save(self, path="DeepConvolution.pkl"):
path = os.path.join(os.path.split(__file__)[0], path)
logger.info("Saving layers to {0}".format(path))
with open(path, 'wb') as output:
pickle.dump(self.layers, output, pickle.HIGHEST_PROTOCOL)
logger.debug("Successfully saved")
def load(self, path="DeepConvolution.pkl"):
path = os.path.join(os.path.split(__file__)[0], path)
logger.info("Loading layers from {0}".format(path))
assert os.path.exists(path), "Specified Path is not valid"
with open(path, "rb") as input_file:
self.layers = pickle.load(input_file)
logger.debug("Successfully loaded")
| bsd-2-clause |
breeezzz/local-bitcoins-api | LocalBitcoins/market_depth.py | 1 | 6253 | '''
Created on 7 Jun 2013
@author: Jamie
'''
import urllib2
import math
import re
import itertools
import argparse
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
markets = {'UK': {'url': 'gb/united%20kingdom/', 'curr': 'GBP'},
'USA': {'url': 'us/united%20states/', 'curr': 'USD'},
'GERMANY': {'url': 'de/germany/', 'curr': 'EUR'},
'ITALY': {'url': 'it/italy/', 'curr': 'EUR'},
'SPAIN': {'url': 'es/spain/', 'curr': 'EUR'},
'AUSTRALIA': {'url': 'au/australia/', 'curr': 'AUD'},
'ARGENTINA': {'url': 'ar/argentina/', 'curr': 'ARS'},
'NETHERLANDS': {'url': 'nl/netherlands/', 'curr': 'EUR'},
'BRAZIL': {'url': 'br/brazil/', 'curr': 'BRL'},
'FRANCE': {'url': 'fr/france/', 'curr': 'EUR'},
'GBP': {'url': 'gbp/', 'curr': 'GBP'},
'USD': {'url': 'usd/', 'curr': 'USD'},
'EUR': {'url': 'eur/', 'curr': 'EUR'},
}
methods = {'NATIONAL_BANK_TRANSFER': 'national-bank-transfer/'}
method = ''
buy_url = 'https://localbitcoins.com/buy-bitcoins-online/'
sell_url = 'https://localbitcoins.com/sell-bitcoins-online/'
def get_ads_dict(soup, buy_sell):
prices = get_prices(soup)
users = get_users(soup)
amounts = get_amounts(soup)
amounts = [a/p for a,p in zip(amounts, prices)] # To give amount in BTC
currency = get_currency(soup)
methods = get_methods(soup)
lists = set(zip(prices, users, amounts, currency))
if buy_sell == 'buy':
sorted_ads = sorted(lists)
elif buy_sell == 'sell':
sorted_ads = sorted(lists)[::-1]
prices = [item[0] for item in sorted_ads]
users = [item[1] for item in sorted_ads]
amounts = [item[2] for item in sorted_ads]
currency = [item[3] for item in sorted_ads]
depth = get_depth(amounts)
ads_dict = {'users': users, 'prices': prices, 'amounts': amounts,
'depth': depth, 'currency': currency, 'methods': methods}
return ads_dict
def get_prices(soup):
''' Returns a list of prices '''
prices = soup.find_all('td', attrs={'class':"column-price"})
prices = [float(re.findall("\d+.\d+", price.get_text())[0]) for price in prices]
return prices
def get_currency(soup):
''' Returns a list of currencies '''
prices = soup.find_all('td', attrs={'class':"column-price"})
currencies = [price.get_text().split()[-1] for price in prices]
return currencies
def get_methods(soup):
''' Returns a list of payment methods '''
methods = soup.find_all('tr', attrs={'class':"clickable"})
methods = [method.get_text().split('\n')[-7].strip() for method in methods]
return methods
def get_users(soup):
''' Returns a list of users '''
users = soup.find_all('td', attrs={'class':"column-user"})
users = [user.get_text().split()[0] for user in users]
return users
def get_amounts(soup):
''' Returns a list of amounts '''
raw_amounts = soup.find_all('td', attrs={'class':"column-limit"})
amounts = []
for amount in raw_amounts:
try:
amounts += [float(amount.get_text().split()[2])]
except:
amounts += [0.0]
return amounts
def get_depth(amounts):
''' Generates the cumulative amount for each point on the curve '''
cum_amounts = []
cum_amount = 0
for amount in amounts:
cum_amount += amount
cum_amounts += [cum_amount]
return cum_amounts
def get_buy_curve(market):
response = urllib2.urlopen(buy_url + market['url'] + method)
soup = BeautifulSoup(response)
buy_ads = get_ads_dict(soup, 'buy')
buy_prices = [i for i,j in zip(buy_ads['prices'], buy_ads['currency']) if j == market['curr']]
buy_depth = [i for i,j in zip(buy_ads['depth'], buy_ads['currency']) if j == market['curr']]
buy_prices = double_list(buy_prices)[1:]
buy_depth = double_list(buy_depth)[:-1]
return buy_prices[:-2], buy_depth[:-2]
def get_sell_curve(market):
response = urllib2.urlopen(sell_url + market['url'] + method)
soup = BeautifulSoup(response)
sell_ads = get_ads_dict(soup, 'sell')
sell_prices = [i for i,j in zip(sell_ads['prices'], sell_ads['currency']) if j == market['curr']][::-1]
sell_depth = [i for i,j in zip(sell_ads['depth'], sell_ads['currency']) if j == market['curr']][::-1]
sell_prices = double_list(sell_prices)[1:]
sell_depth = double_list(sell_depth)[:-1]
return sell_prices, sell_depth
def plot_chart(ax, buy, sell):
ax.plot(buy[0], buy[1], color='r')
ax.plot(sell[0], sell[1], color='g')
def double_list(list_in):
iters = [iter(list_in), iter(list_in)]
return list(it.next() for it in itertools.cycle(iters))
def get_bid(country):
market = markets[country]
response = urllib2.urlopen(buy_url + market['url'] + method)
soup = BeautifulSoup(response)
buy_ads = get_ads_dict(soup, 'buy')
bid = buy_ads['prices'][0]
return bid
def get_ask(country):
market = markets[country]
response = urllib2.urlopen(sell_url + market['url'] + method)
soup = BeautifulSoup(response)
sell_ads = get_ads_dict(soup, 'sell')
ask = sell_ads['prices'][0]
return ask
def make_charts(*args):
if len(args[0].countries) == 0:
selection = ['UK','USA','SPAIN','FRANCE','GERMANY','BRAZIL']
else:
selection = args[0].countries
fig = plt.figure()
dim = math.ceil(len(selection)**0.5)
for x, s in enumerate(selection):
market = markets[s]
# method = methods['NATIONAL_BANK_TRANSFER']
ax = fig.add_subplot(dim, dim, x+1)
ax.set_xlabel(market['curr'])
ax.set_ylabel('BTC')
ax.set_title('Local Bitcoins online: %s' % s)
buy_curve = get_buy_curve(market)
sell_curve = get_sell_curve(market)
plot_chart(ax, buy_curve, sell_curve)
plt.tight_layout()
plt.show()
def main():
parser = argparse.ArgumentParser(description='Display charts of the Local Bitcoin market depth.')
parser.add_argument('countries', type=str, nargs='*',
help='optionally specify any number of country names')
args = parser.parse_args()
make_charts(args)
if __name__ == '__main__':
main()
| mit |
planetarymike/IDL-Colorbars | IDL_py_test/027_Eos_B.py | 1 | 5942 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 1., 1.],
[1., 1., 1.],
[0.498039, 0.498039, 0.498039],
[0., 0., 0.513725],
[0., 0., 0.533333],
[0., 0., 0.54902],
[0., 0., 0.564706],
[0., 0., 0.580392],
[0., 0., 0.6],
[0., 0., 0.615686],
[0., 0., 0.568627],
[0., 0., 0.584314],
[0., 0., 0.666667],
[0., 0., 0.682353],
[0., 0., 0.698039],
[0., 0., 0.713725],
[0., 0., 0.733333],
[0., 0., 0.74902],
[0., 0., 0.764706],
[0., 0., 0.780392],
[0., 0., 0.717647],
[0., 0., 0.733333],
[0., 0., 0.831373],
[0., 0., 0.847059],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 0.898039],
[0., 0., 0.913725],
[0., 0., 0.933333],
[0., 0., 0.94902],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 1.],
[0., 0.027451, 0.968627],
[0., 0.0588235, 0.937255],
[0., 0.0901961, 0.905882],
[0., 0.121569, 0.87451],
[0., 0.152941, 0.843137],
[0., 0.184314, 0.811765],
[0., 0.215686, 0.780392],
[0., 0.223529, 0.67451],
[0., 0.25098, 0.643137],
[0., 0.309804, 0.686275],
[0., 0.341176, 0.654902],
[0., 0.372549, 0.623529],
[0., 0.403922, 0.592157],
[0., 0.435294, 0.560784],
[0., 0.466667, 0.529412],
[0., 0.498039, 0.498039],
[0., 0.529412, 0.466667],
[0., 0.505882, 0.392157],
[0., 0.533333, 0.364706],
[0., 0.623529, 0.372549],
[0., 0.654902, 0.341176],
[0., 0.686275, 0.309804],
[0., 0.717647, 0.278431],
[0., 0.74902, 0.247059],
[0., 0.780392, 0.215686],
[0., 0.811765, 0.184314],
[0., 0.843137, 0.152941],
[0., 0.784314, 0.109804],
[0., 0.811765, 0.0823529],
[0., 0.937255, 0.0588235],
[0., 0.968627, 0.027451],
[0., 1., 0.],
[0.0352941, 1., 0.],
[0.0705882, 1., 0.],
[0.105882, 1., 0.],
[0.141176, 1., 0.],
[0.176471, 1., 0.],
[0.192157, 0.898039, 0.],
[0.223529, 0.898039, 0.],
[0.282353, 1., 0.],
[0.317647, 1., 0.],
[0.356863, 1., 0.],
[0.392157, 1., 0.],
[0.427451, 1., 0.],
[0.462745, 1., 0.],
[0.498039, 1., 0.],
[0.533333, 1., 0.],
[0.513725, 0.898039, 0.],
[0.545098, 0.898039, 0.],
[0.639216, 1., 0.],
[0.678431, 1., 0.],
[0.713725, 1., 0.],
[0.74902, 1., 0.],
[0.784314, 1., 0.],
[0.819608, 1., 0.],
[0.854902, 1., 0.],
[0.890196, 1., 0.],
[0.835294, 0.898039, 0.],
[0.866667, 0.898039, 0.],
[1., 1., 0.],
[1., 0.980392, 0.],
[1., 0.964706, 0.],
[1., 0.94902, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.898039, 0.],
[1., 0.882353, 0.],
[0.898039, 0.776471, 0.],
[0.898039, 0.764706, 0.],
[1., 0.831373, 0.],
[1., 0.815686, 0.],
[1., 0.8, 0.],
[1., 0.780392, 0.],
[1., 0.764706, 0.],
[1., 0.74902, 0.],
[1., 0.733333, 0.],
[1., 0.713725, 0.],
[0.898039, 0.627451, 0.],
[0.898039, 0.611765, 0.],
[1., 0.662745, 0.],
[1., 0.647059, 0.],
[1., 0.631373, 0.],
[1., 0.615686, 0.],
[1., 0.6, 0.],
[1., 0.580392, 0.],
[1., 0.564706, 0.],
[1., 0.54902, 0.],
[0.898039, 0.478431, 0.],
[0.898039, 0.462745, 0.],
[1., 0.498039, 0.],
[1., 0.490196, 0.],
[1., 0.482353, 0.],
[1., 0.47451, 0.],
[1., 0.466667, 0.],
[1., 0.454902, 0.],
[1., 0.447059, 0.],
[1., 0.439216, 0.],
[0.898039, 0.388235, 0.],
[0.898039, 0.380392, 0.],
[1., 0.415686, 0.],
[1., 0.407843, 0.],
[1., 0.4, 0.],
[1., 0.388235, 0.],
[1., 0.380392, 0.],
[1., 0.372549, 0.],
[1., 0.364706, 0.],
[1., 0.356863, 0.],
[0.898039, 0.313725, 0.],
[0.898039, 0.305882, 0.],
[1., 0.329412, 0.],
[1., 0.321569, 0.],
[1., 0.313725, 0.],
[1., 0.305882, 0.],
[1., 0.298039, 0.],
[1., 0.290196, 0.],
[1., 0.282353, 0.],
[1., 0.27451, 0.],
[0.898039, 0.239216, 0.],
[0.898039, 0.231373, 0.],
[1., 0.247059, 0.],
[1., 0.239216, 0.],
[1., 0.231373, 0.],
[1., 0.223529, 0.],
[1., 0.215686, 0.],
[1., 0.207843, 0.],
[1., 0.196078, 0.],
[1., 0.188235, 0.],
[0.898039, 0.164706, 0.],
[0.898039, 0.156863, 0.],
[1., 0.164706, 0.],
[1., 0.156863, 0.],
[1., 0.14902, 0.],
[1., 0.141176, 0.],
[1., 0.129412, 0.],
[1., 0.121569, 0.],
[1., 0.113725, 0.],
[1., 0.105882, 0.],
[0.898039, 0.0862745, 0.],
[0.898039, 0.0823529, 0.],
[1., 0.0823529, 0.],
[1., 0.0745098, 0.],
[1., 0.0627451, 0.],
[1., 0.054902, 0.],
[1., 0.0470588, 0.],
[1., 0.0509804, 0.],
[1., 0.0313725, 0.],
[1., 0.0235294, 0.],
[0.898039, 0.0117647, 0.],
[0.898039, 0.00392157, 0.],
[1., 0., 0.],
[0.992157, 0., 0.],
[0.984314, 0., 0.],
[0.976471, 0., 0.],
[0.968627, 0., 0.],
[0.960784, 0., 0.],
[0.952941, 0., 0.],
[0.945098, 0., 0.],
[0.843137, 0., 0.],
[0.839216, 0., 0.],
[0.921569, 0., 0.],
[0.917647, 0., 0.],
[0.909804, 0., 0.],
[0.901961, 0., 0.],
[0.894118, 0., 0.],
[0.886275, 0., 0.],
[0.878431, 0., 0.],
[0.870588, 0., 0.],
[0.776471, 0., 0.],
[0.768627, 0., 0.],
[0.847059, 0., 0.],
[0.843137, 0., 0.],
[0.835294, 0., 0.],
[0.827451, 0., 0.],
[0.819608, 0., 0.],
[0.811765, 0., 0.],
[0.803922, 0., 0.],
[0.796078, 0., 0.],
[0.709804, 0., 0.],
[0.701961, 0., 0.],
[0.772549, 0., 0.],
[0.768627, 0., 0.],
[0.760784, 0., 0.],
[0.752941, 0., 0.],
[0.745098, 0., 0.],
[0.737255, 0., 0.],
[0.729412, 0., 0.],
[0.721569, 0., 0.],
[0.643137, 0., 0.],
[0.635294, 0., 0.],
[0.698039, 0., 0.],
[0.690196, 0., 0.],
[0.686275, 0., 0.],
[0.678431, 0., 0.],
[0.670588, 0., 0.],
[0.662745, 0., 0.],
[0.654902, 0., 0.],
[0.647059, 0., 0.],
[0.576471, 0., 0.],
[0.568627, 0., 0.],
[0.623529, 0., 0.],
[0.615686, 0., 0.],
[0.611765, 0., 0.],
[0.603922, 0., 0.],
[0.596078, 0., 0.],
[0.588235, 0., 0.],
[0.580392, 0., 0.],
[0.572549, 0., 0.],
[0.509804, 0., 0.],
[0.501961, 0., 0.],
[0.54902, 0., 0.],
[0.541176, 0., 0.],
[0.537255, 0., 0.],
[0.529412, 0., 0.],
[0.521569, 0., 0.],
[0.513725, 0., 0.],
[0.505882, 0., 0.],
[0.498039, 0., 0.],
[0.443137, 0., 0.],
[0.435294, 0., 0.],
[0.47451, 0., 0.],
[0.466667, 0., 0.],
[0.458824, 0., 0.],
[0.458824, 0., 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
williamleif/histwords | statutils/plothelper.py | 2 | 5401 | import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
def trendline(xd, yd, order=1, c='r', alpha=1, plot_r=False, text_pos=None):
"""Make a line of best fit"""
#Calculate trendline
coeffs = np.polyfit(xd, yd, order)
intercept = coeffs[-1]
slope = coeffs[-2]
if order == 2: power = coeffs[0]
else: power = 0
minxd = np.min(xd)
maxxd = np.max(xd)
xl = np.array([minxd, maxxd])
yl = power * xl ** 2 + slope * xl + intercept
#Plot trendline
plt.plot(xl, yl, color=c, alpha=alpha)
#Calculate R Squared
r = sp.stats.pearsonr(xd, yd)[0]
if plot_r == False:
#Plot R^2 value
if text_pos == None:
text_pos = (0.9 * maxxd + 0.1 * minxd, 0.9 * np.max(yd) + 0.1 * np.min(yd),)
plt.text(text_pos[0], text_pos[1], '$R = %0.2f$' % r)
else:
#Return the R^2 value:
return r
def plot_nice_err(x, y, y_err, color='blue', ls='-', lw=1):
plt.plot(x, y, color=color, ls=ls, lw=lw)
plt.fill_between(x, y-y_err, y+y_err, alpha=0.1, color=color)
def plot_word_dist(info, words, start_year, end_year, one_minus=False, legend_loc='upper left'):
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
plot_info = {}
for word in words:
plot_info[word] = info[word]
for title, data_dict in plot_info.iteritems():
x = []; y = []
for year, val in data_dict.iteritems():
if year >= start_year and year <= end_year:
x.append(year)
if one_minus:
val = 1 - val
y.append(val)
color = colors.pop()
plt.plot(x, smooth(np.array(y)), color=color)
plt.scatter(x, y, marker='.', color=color)
plt.legend(plot_info.keys(), loc=legend_loc)
return plt
def get_ccdf(deg_hist, x_min=1):
cum_counts = [0]
degs = range(x_min, np.max(deg_hist.keys()))
total_sum = 0
for deg in degs:
if deg in deg_hist:
deg_count = deg_hist[deg]
else:
deg_count = 0
total_sum += deg_count
cum_counts.append((cum_counts[-1] + deg_count))
return np.array(degs), 1 - np.array(cum_counts[1:]) / float(total_sum)
def plot_word_basic(info, words, start_year, end_year, datatype):
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
plot_info = {}
for word in words:
plot_info[word] = info[word]
for title, data_dict in plot_info.iteritems():
x = []; y = []
for year, val in data_dict[datatype].iteritems():
if year >= start_year and year <= end_year:
x.append(year)
y.append(val)
color = colors.pop()
plt.plot(x, smooth(np.array(y)), color=color)
plt.scatter(x, y, marker='.', color=color)
plt.legend(plot_info.keys())
plt.show()
def plot_basic(plot_info, start_year, end_year):
for title, data_dict in plot_info.iteritems():
x = []; y = []
for year, val in data_dict.iteritems():
if year >= start_year and year <= end_year:
x.append(year)
y.append(val)
plt.plot(x, y)
plt.legend(plot_info.keys())
plt.show()
def plot_smooth(x, y, color='blue', window_len=7, window='hanning', ax=None, lw=1.0, ls="-", **kwargs):
if ax == None:
_, ax = plt.subplots(1,1)
ax.plot(x, smooth(np.array(y), window_len=window_len), color=color, lw=lw, ls=ls)
ax.scatter(x, y, color=color, **kwargs)
return ax
def smooth(x, window_len=7, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
y = y[(window_len/2 - 1):-(window_len/2 + 1)]
return y
| apache-2.0 |
c-PRIMED/puq | test/UniformPDF_test.py | 1 | 4485 | #! /usr/bin/env python
'''
Testsuite for the UniformPDF class
'''
from __future__ import absolute_import, division, print_function
import numpy as np
from puq import *
import scipy.stats as stats
def _hisplot(y, nbins):
n, bins = np.histogram(y, nbins, normed=True)
mids = bins[:-1] + np.diff(bins) / 2.0
return mids, n
def compare_curves(x1, y1, x2, y2, **args):
ay = np.interp(x2, x1, y1)
rmse = np.sqrt(np.sum((ay - y2)**2))
print("maximum difference is", np.max(np.abs(ay - y2)))
print("RMSE=%s" % rmse)
# assert rmse < .002
assert np.allclose(ay, y2, **args)
def _test_updf(min, max):
options['pdf']['samples'] = 1000
c = UniformPDF(min=min, max=max)
assert isinstance(c, PDF)
x = c.x
y = stats.uniform(min, max-min).pdf(x)
rmse = np.sqrt(np.sum((c.y - y)**2))
print("RMSE=%s" % rmse)
print("MaxError=", np.max(abs(c.y - y)))
assert rmse < 1e-11
def _test_ucdf(min, max):
options['pdf']['samples'] = 1000
c = UniformPDF(min=min, max=max)
cdfy = stats.uniform(min, max-min).cdf(c.x)
rmse = np.sqrt(np.sum((c.cdfy - cdfy)**2))
print("RMSE=%s" % rmse)
print("MaxError=", np.max(abs(c.cdfy - cdfy)))
assert rmse < 1e-11
"""
import matplotlib.pyplot as plt
plt.plot(c.x, c.cdfy, color='green')
plt.plot(c.x, cdfy, color='red')
plt.show()
"""
# test mean, min, max and deviation
def _test_uniform_minmeanmax(min, mean, max):
c = UniformPDF(min=min, mean=mean, max=max)
cmin, cmax = c.range
print("min=%s mean=%s max=%s" % (cmin, c.mean, cmax))
if min is not None:
assert min == cmin
else:
assert cmin == mean - (max - mean)
if max is not None:
assert max == cmax
else:
assert cmax == mean + (mean - min)
if mean is not None:
assert np.allclose(mean, c.mean)
else:
assert np.allclose(c.mean, (min + max) / 2.0)
# test lhs()
def _test_uniform_lhs(min, max):
c = UniformPDF(min=min, max=max)
# test the lhs() function to see if the curve it generates is
# close enough
data = c.ds(10000)
assert len(data) == 10000
assert np.min(data) >= min
assert np.max(data) <= max
dx, dy = _hisplot(data, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.0001)
"""
import matplotlib.pyplot as plt
plt.plot(x, y, color='red')
plt.plot(dx, dy, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
# test lhs1()
def _test_uniform_lhs1(min, max):
c = UniformPDF(min=min, max=max)
data = c.ds1(1000)
xs = data
assert len(xs) == 1000
assert min, max == c.range
# scale [-1,1] back to original size
mean = (min + max)/2.0
xs *= max - mean
xs += mean
dx, dy = _hisplot(xs, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.001)
"""
import matplotlib.pyplot as plt
plt.plot(x, y, color='green')
plt.plot(dx, dy, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
def _test_uniform_random(min, max):
c = UniformPDF(min=min, max=max)
data = c.random(1000000)
assert len(data) == 1000000
dx, dy = _hisplot(data, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.02)
assert np.min(data) >= min
assert np.max(data) <= max
"""
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(x, y, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
def test_updf():
_test_updf(10,20)
_test_updf(-20,-10)
def test_ucdf():
_test_ucdf(100,105)
_test_ucdf(-1,2)
def test_uniform_minmeanmax():
_test_uniform_minmeanmax(0,None,20)
_test_uniform_minmeanmax(None,0.5,2)
_test_uniform_minmeanmax(5,10,15)
_test_uniform_minmeanmax(5,10,None)
def test_uniform_lhs():
_test_uniform_lhs(10,20)
_test_uniform_lhs(-100, -50)
def test_uniform_lhs1():
_test_uniform_lhs1(10,20)
_test_uniform_lhs1(-100, -50)
def test_uniform_random():
_test_uniform_random(10,20)
if __name__ == "__main__":
test_updf()
test_ucdf()
test_uniform_minmeanmax()
test_uniform_lhs()
test_uniform_lhs1()
test_uniform_random()
| mit |
aolindahl/streaking | process_hdf5.py | 1 | 46151 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 15:37:51 2015
@author: Anton O Lindahl
"""
import h5py
import argparse
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import sys
import lmfit
import warnings
from aolPyModules import wiener, wavelet_filter
import time_to_energy_conversion as tof_to_energy
from aolPyModules import plotting as aol_plotting
import area_fill
prompt_roi = [1.508, 1.535]
streak_time_roi = [1.57, 1.66]
wt_th = 0.03
energy_scale_eV = np.linspace(40, 160, 2**9)
time_stamp = 'time_stamp'
data_dir = 'h5_files'
h5_file_name_template = data_dir + '/run{}_all.h5'
response_file_name = data_dir + '/response.h5'
nois_file_name = data_dir + '/noise.h5'
tof_to_energy_conversion_file_name = data_dir + '/time_to_energy.h5'
def h5_file_name_funk(run):
return h5_file_name_template.format(run)
def update_progress(i_evt, n_events, verbose=True):
if (verbose and
((i_evt % (n_events / 100) == 0) or (i_evt == n_events-1))):
progress = (100 * i_evt) / (n_events - 1)
num_squares = 40
base_string = '\r[{:' + str(num_squares) + '}] {}%'
print base_string.format('#'*(progress * num_squares / 100), progress),
sys.stdout.flush()
def list_hdf5_content(group, indent=' '):
for k, v in group.iteritems():
print '{}"{}"'.format(indent, k),
if isinstance(v, h5py.Group):
print 'group with members:'
list_hdf5_content(v, indent=indent + ' ')
elif isinstance(v, h5py.Dataset):
print '\t{} {}'.format(v.shape, v.dtype)
def make_dataset(h5, name, shape, dtype=np.float):
try:
dset = h5.require_dataset(name, shape=shape,
dtype=dtype, exact=True)
except TypeError:
del h5[name]
dset = h5.create_dataset(name, shape=shape, dtype=np.float)
if time_stamp not in dset.attrs.keys():
dset.attrs.create(time_stamp, 0)
return dset
def make_group(h5, name):
try:
group = h5.require_group(name)
except TypeError:
del h5[name]
group = h5.create_group(name)
if time_stamp not in group.attrs.keys():
group.attrs.create(time_stamp, 0)
return group
def older(dset, dset_list):
if (isinstance(dset_list, h5py.Dataset) or
isinstance(dset_list, h5py.Group)):
return dset.attrs[time_stamp] < dset_list.attrs[time_stamp]
return np.any([dset.attrs[time_stamp] < d.attrs[time_stamp] for
d in dset_list])
class Timer_object:
def __init__(self, t):
self.attrs = {'time_stamp': t}
class Tims_stamp_warning(Warning):
pass
def time_stamp_object(h5_object):
try:
h5_object.attrs['time_stamp'] = time.time()
except:
warnings.warn('Could not time stamp the object {}.'.format(
repr(h5_object)))
def get_response(plot=False, verbose=0):
try:
with h5py.File(response_file_name, 'r') as f:
response = f['signal'].value
t = f['signal'].attrs[time_stamp]
except IOError:
if verbose > 0:
print 'Could not open response file. Trying to make it.'
response, t = construct_response(verbose=verbose)
if plot:
with h5py.File(response_file_name, 'r') as f:
time_scale = f['time_scale'].value
plt.figure('response')
plt.clf()
plt.plot(time_scale, response)
return response, t
def construct_response(plot=False, verbose=0):
# The Kr runs
runs = [132, 133, 134, 135, 136]
if verbose > 0:
print 'Loading Kr files for prompt determination.'
h5_file_names = [h5_file_name_template.format(run) for run in runs]
h5_list = []
for file_name in h5_file_names:
update_run_contained_derived_data(file_name, verbose=verbose)
h5_list.append(h5py.File(file_name, 'r+'))
time_scale = h5_list[0]['raw/time_scale'].value
response = np.zeros_like(time_scale)
n_shots = 0
sl = slice(time_scale.searchsorted(prompt_roi[0]),
time_scale.searchsorted(prompt_roi[1], side='right'))
for h5 in h5_list:
response[sl] += h5['raw/time_signal'][:, sl].sum(0)
n_shots += h5['raw/event_time_s'].shape[0]
response /= n_shots
response[sl] = wiener.edgeSmoothing(response[sl], smoothPoints=15)
response /= response.sum()
with h5py.File(response_file_name, 'w') as res_file:
dset = res_file.create_dataset('signal', data=response)
dset.attrs.create(time_stamp, time.time())
res_file.create_dataset('time_scale', data=time_scale)
return get_response(plot=plot, verbose=verbose)
def get_file_names_for_noise_spectrum():
return ['/'.join([data_dir, f]) for f in os.listdir(data_dir) if
f.startswith('run') and f.endswith('_all.h5')]
def get_nois_spectrum(plot=False, verbose=0):
try:
with h5py.File(nois_file_name, 'r') as f:
pass
new_noise = False
except IOError:
if verbose > 0:
print 'Could not open response file. Trying to make it.',
print 'In "get_nois_spectrum()".'
construct_nois_spectrum(plot=plot, verbose=verbose)
new_noise = True
if not new_noise:
make_new_noise = False
with h5py.File(nois_file_name, 'r') as f:
noise = f['noise']
h5_file_names = get_file_names_for_noise_spectrum()
for h5_name in h5_file_names:
with h5py.File(h5_name, 'r') as h5:
if older(noise, h5['raw']):
make_new_noise = True
if verbose > 0:
print 'Noise was made earlier than the raw data',
print 'in the file', h5_name, 'Make new noise.'
break
elif False:
print 'Noise was made later than the raw data in',
print 'the file', h5_name
if make_new_noise:
construct_nois_spectrum(plot=plot, verbose=verbose)
with h5py.File(nois_file_name, 'r') as f:
noise = f['noise']
return noise.value, noise.attrs['time_stamp']
def construct_nois_spectrum(plot=False, verbose=0):
h5_file_names = get_file_names_for_noise_spectrum()
for file_name in h5_file_names:
update_run_contained_derived_data(file_name)
empty_shots = []
for i, h5_name in enumerate(h5_file_names):
with h5py.File(h5_name, 'r') as h5:
time_signal_dset = h5['raw/time_signal']
try:
max_signal = h5['max_signal'].value
except KeyError:
max_signal = np.max(time_signal_dset.value, axis=1)
no_x_rays = max_signal < 0.04
if no_x_rays.sum() > 0:
empty_shots.extend(time_signal_dset[no_x_rays, :])
if i == 0:
time_scale = h5['raw/time_scale'].value
if verbose > 0:
print h5_name, 'has', no_x_rays.sum(), 'empty shots'
empty_shots = np.array(empty_shots)
# print len(empty_shots)
# plt.figure('snr')
# plt.clf()
# for shot in empty_shots[:]:
# plt.plot(time_scale, shot)
freq = (np.linspace(0., 1., len(time_scale)) *
1e-3/(time_scale[1] - time_scale[0]))
fft_empty_shots = np.fft.fft(empty_shots, axis=1)
amp = np.mean(np.abs(fft_empty_shots)**2, axis=0)
wt_amp = amp[:]
wt_amp = wavelet_filter.wavelet_filt(amp[1:], thresh=wt_th)
wt_amp[1:] = (wt_amp[1:] + wt_amp[-1:0:-1]) / 2
# plt.figure('fft')
# plt.clf()
# plt.plot(freq, amp)
# plt.plot(freq, wt_amp, 'r')
with h5py.File(nois_file_name, 'w') as f:
dset = f.create_dataset('noise', data=wt_amp)
dset.attrs.create('time_stamp', time.time())
f.create_dataset('freq', data=freq)
return get_nois_spectrum()
def construct_snr_spectrum(h5, plot=False):
noise, t = get_nois_spectrum()
sig_spec = h5['fft_spectrum_mean'].value
freq = h5['fft_freq_axis'].value
wt_spec = wavelet_filter.wavelet_filt(sig_spec, thresh=wt_th)
wt_spec[1:] = (wt_spec[1:] + wt_spec[-1:0:-1]) / 2
snr = (wt_spec - noise) / noise
if plot:
plt.figure('signal and noise')
plt.clf()
plt.semilogy(freq, sig_spec, label='signal')
plt.semilogy(freq, noise, label='noise')
plt.semilogy(freq, wt_spec, label='wt signal')
plt.semilogy(freq, snr, label='snr')
plt.legend(loc='best')
return snr
def check_tof_to_energy_conversion_matrix(plot=False, verbose=0):
try:
with h5py.File(tof_to_energy_conversion_file_name, 'r'):
pass
except IOError:
if verbose > 0:
print 'Could not open the file. Making the conversion matrix.'
construc_tof_to_energy_conversion_matrix(plot=plot, verbose=verbose)
_, h5_dict, _ = tof_to_energy.load_tof_to_energy_data(verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'r') as trans_h5:
if not older(
trans_h5['matrix'],
[h5['streak_peak_integral'] for h5 in h5_dict.itervalues()] +
[Timer_object(1437117486)]):
return
if verbose > 0:
print 'Conversion to old, remaking it.'
construc_tof_to_energy_conversion_matrix(plot=plot, verbose=verbose)
def construc_tof_to_energy_conversion_matrix(plot=False, verbose=0):
M, t, E, time_to_energy_params, tof_prediction_params = \
tof_to_energy.make_tof_to_energy_matrix(
energy_scale_eV=energy_scale_eV, plot=plot, verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'w') as h5:
dset = h5.create_dataset('matrix', data=M)
dset.attrs.create('time_stamp', time.time())
dset = h5.create_dataset('time_scale', data=t)
dset.attrs.create('time_stamp', time.time())
dset = h5.create_dataset('energy_scale_eV', data=E)
dset.attrs.create('time_stamp', time.time())
for k in time_to_energy_params:
dset = h5.create_dataset(k, data=time_to_energy_params[k].value)
dset.attrs.create('time_stamp', time.time())
for k in tof_prediction_params:
dset = h5.require_dataset(k, (), np.float)
dset[()] = tof_prediction_params[k].value
dset.attrs.create('time_stamp', time.time())
def open_hdf5_file(file_name, plot=False, verbose=0):
try:
# Open the file
h5 = h5py.File(file_name, 'r+')
except BaseException as e:
print 'Could not open the specified hdf5 file "{}".'.format(
file_name)
print 'Message was: {}'.format(e.message)
return -1
return h5
def get_com(x, y):
idx_l, idx_h = fwxm(x, y, 0.0, return_data='idx')
sl = slice(idx_l, idx_h)
return ((x[sl] * y[sl]).sum()) / (y[sl].sum())
def fwxm(x, y, fraction=0.5, return_data=''):
y_max = y.max()
idx_max = y.argmax()
y_f = y_max * fraction
for i in range(idx_max, -1, -1):
if y[i] < y_f:
idx_low = i
break
else:
idx_low = idx_max
for i in range(idx_max, len(x)):
if y[i] < y_f:
idx_high = i
break
else:
idx_high = idx_max
if return_data == 'idx':
return idx_low, idx_high
if return_data == 'limits':
return x[idx_low], x[idx_high]
return (x[idx_low] + x[idx_high]) / 2, x[idx_high] - x[idx_low]
def get_trace_bounds(x, y,
threshold=0.0, min_width=2,
energy_offset=0,
useRel=False, threshold_rel=0.5,
roi=slice(None)):
amp = y[roi]
scale = x[roi]
dx = np.mean(np.diff(x))
if useRel:
threshold_temp = threshold_rel * np.max(amp[np.isfinite(amp)])
if threshold_temp < threshold:
return [np.nan] * 3
else:
threshold_V = threshold_temp
else:
threshold_V = threshold
nPoints = np.round(min_width/dx)
i_min = 0
for i in range(1, amp.size):
if amp[i] < threshold_V:
i_min = i
continue
if i-i_min >= nPoints:
break
else:
return [np.nan] * 3
i_max = amp.size - 1
for i in range(amp.size-1, -1, -1):
if amp[i] < threshold_V:
i_max = i
continue
if i_max-i >= nPoints:
break
else:
return [np.nan] * 3
if i_min == 0 and i_max == amp.size - 1:
return [np.nan] * 3
# print 'min =', min, 'max =', max
val_max = (scale[i_max] + (threshold_V - amp[i_max]) *
(scale[i_max] - scale[i_max - 1]) /
(amp[i_max] - amp[i_max - 1]))
val_min = (scale[i_min] + (threshold_V - amp[i_min]) *
(scale[i_min + 1] - scale[i_min]) /
(amp[i_min + 1] - amp[i_min]))
return val_min, val_max, threshold_V
def update_run_contained_derived_data(file_name, plot=False, verbose=0):
"""Update derived data based on information only in given file.
Add some derived datasetd to the hdf5 file based on the raw data in the
file. The added datasets are:
- Mean of the FEE gas detectors for each shot: fee_mean
- Maximum TOF waveform signal for each shot: max_signal
- Frequency spectrum averaged over all shots: fft_spectrum_mean
- The corresponding frequency axis: fft_freq_axis
- BC2 energy calculated from the beam position: energy_BC2_MeV
- L3 energy corrected based on the BC2 energy: energy_L3_corrected_MeV
"""
if verbose > 0:
print 'Entering "update_run_contained_derived_data()" ',
print 'with file_name={}'.format(file_name)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
# Make the fee data set
raw_fee_dset = raw_group['FEE_energy_mJ']
fee_mean_dset = make_dataset(h5, 'fee_mean', (n_events,))
if older(fee_mean_dset, raw_group):
if verbose > 0:
print 'Updating fee mean dataset'
fee_mean_dset[:] = raw_fee_dset[:, 0: 4].mean(1)
fee_mean_dset.attrs[time_stamp] = time.time()
# Make max signal dataset
time_signal_dset = raw_group['time_signal']
max_sig_dset = make_dataset(h5, 'max_signal', (n_events,))
if older(max_sig_dset, raw_group):
if verbose > 0:
print 'Get the maximum signal for each shot.'
max_sig_dset[:] = np.max(time_signal_dset, axis=1)
max_sig_dset.attrs['time_stamp'] = time.time()
# Make the frequency spectrum
time_scale = raw_group['time_scale'].value
spectrum_dset = make_dataset(h5, 'fft_spectrum_mean', time_scale.shape)
if older(spectrum_dset, [raw_group, max_sig_dset]):
if verbose > 0:
print 'Compute the frequency spectrum of the data.'
max_signal = max_sig_dset.value
use = max_signal > np.sort(max_signal)[-500:][0]
signal = time_signal_dset[use, :]
spectrum_dset[:] = np.mean(np.abs(np.fft.fft(signal, axis=1))**2,
axis=0)
spectrum_dset.attrs['time_stamp'] = time.time()
freq_axis_dset = make_dataset(h5, 'fft_freq_axis', time_scale.shape)
if older(freq_axis_dset, raw_group):
if verbose > 0:
print 'Updating the frequency axis.'
freq_axis_dset[:] = (np.linspace(0., 1e-3, len(time_scale)) /
(time_scale[1] - time_scale[0]))
freq_axis_dset.attrs['time_stamp'] = time.time()
# Calculate the BC2 energy
bc2_energy_dset = make_dataset(h5, 'energy_BC2_MeV', (n_events, ))
if older(bc2_energy_dset, raw_group):
if verbose > 0:
print 'Calculating BC2 energy for the bpm reading.'
# Values comes from a mail from Timothy Maxwell
# The nominal BC2 energy is 5 GeV (was at least when this data was
# recorded). The measurement is the relative offset of the beam
# position in a BPM. The dispersion value is -364.7 mm.
bc2_energy_dset[:] = 5e3 * (1. - raw_group['position_BC2_mm'][:] /
364.7)
bc2_energy_dset.attrs['time_stamp'] = time.time()
# Calculate the corrected L3 energy
l3_energy_cor_dset = make_dataset(h5, 'energy_L3_corrected_MeV',
(n_events, ))
if older(l3_energy_cor_dset, [raw_group, bc2_energy_dset,
Timer_object(1434096408)]):
if verbose > 0:
print 'Calculating corrected L3 energy.'
l3_energy_cor_dset[:] = (raw_group['energy_L3_MeV'][:] -
(bc2_energy_dset[:] - 5000))
l3_energy_cor_dset.attrs['time_stamp'] = time.time()
# Make the phase cavity time filter
pct_filter_dset = make_dataset(h5, 'pct_filter', (n_events, ),
dtype=bool)
if older(pct_filter_dset, [raw_group, Timer_object(0)]):
print h5.filename
pct0 = raw_group['phase_cavity_times'][:, 0]
pct_filter_dset[:] = (0.4 < pct0) & (pct0 < 1.2)
pct_filter_dset.attrs[time_stamp] = time.time()
h5.close()
def update_with_noise_and_response(file_name, plot=False, verbose=0):
"""Update derived data based on noise and response spectra.
Noise spectrum and detector response are determined form many runs. With
these spectra a number of new paramters can be derived. These are:
- snr_spectrum: Signal to Noise ratio spectrum based on the given noise \
spectrum and the average spectrum in the current run.
- filtered_time_signal: Wiegner deconvolution of the time signal based on \
the signal to noise ratio and the detector response function.
- streak_peak_center: Center of the streaking peak in the sense of the \
center of mass of the peak in a given ROI. Based on the deconvoluted \
signal.
- streak_peak_integral: Photoline intensity by integration of the \
deconvoluted spectrum in time domain.
"""
# Make sure that the run contained information is up to date.
update_run_contained_derived_data(file_name, plot, verbose-1)
# Open the file.
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
time_scale = raw_group['time_scale'].value
# Make signal to noise ratio.
snr_dset = make_dataset(h5, 'snr_spectrum', time_scale.shape)
spectrum_dset = h5['fft_spectrum_mean']
if older(snr_dset, [spectrum_dset, raw_group, Timer_object(1434015914)]):
if verbose > 0:
print 'Updating the signal to noise ratio.',
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name)
snr_dset[:] = construct_snr_spectrum(h5, plot=plot)
snr_dset.attrs['time_stamp'] = time.time()
# Deconvolute the response function
time_signal_dset = raw_group['time_signal']
deconv_time_signal_dset = make_dataset(h5, 'filtered_time_signal',
time_signal_dset.shape)
if older(deconv_time_signal_dset, [raw_group, snr_dset]):
response, t_response = get_response(plot=plot, verbose=verbose-1)
if verbose > 0:
print 'Deconvolving traces.'
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name),
print ' {} events to process.'.format(n_events)
deconvolver = wiener.Deconcolver(snr_dset.value, response)
for i_evt in range(n_events):
deconv_time_signal_dset[i_evt, :] = deconvolver.deconvolve(
time_signal_dset[i_evt, :])
update_progress(i_evt, n_events, verbose)
print ''
deconv_time_signal_dset.attrs['time_stamp'] = time.time()
# Calculate the center of mass of the streak peak
time_com_dset = make_dataset(h5, 'streak_peak_center', (n_events, ))
photo_line_intensity_dset = make_dataset(h5, 'streak_peak_integral',
(n_events, ))
if older(time_com_dset, [deconv_time_signal_dset,
Timer_object(1443006988)]):
if verbose > 0:
print 'Calculating streak peak center in time.',
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name)
streak_sl = slice(np.searchsorted(time_scale, streak_time_roi[0]),
np.searchsorted(time_scale, streak_time_roi[1],
side='right'))
time_scale_streak = time_scale[streak_sl]
####
# Center of mass calculation
# for i_evt in range(n_events):
# time_com_dset[i_evt] = get_com(
# time_scale_streak,
# deconv_time_signal_dset[i_evt, streak_sl])
# update_progress(i_evt, n_events, verbose)
####
# Fit of Gaussian
deconv_time_signal = deconv_time_signal_dset.value
time_com = np.zeros(time_com_dset.shape)
photo_line_intensity = np.zeros(photo_line_intensity_dset.shape)
mean_signal = deconv_time_signal[:, streak_sl].mean(axis=0)
mod = lmfit.models.GaussianModel()
params = lmfit.Parameters()
params.add_many(('amplitude', 1, True, 0),
('center', time_scale_streak[np.argmax(mean_signal)],
True, min(time_scale_streak), max(time_scale_streak)),
('sigma', 1e-3, True, 0))
# fit to mean in order to get start parameters for the shot fits
out = mod.fit(mean_signal, x=time_scale_streak, params=params)
for k in params:
params[k].value = out.params[k].value
for i_evt in range(n_events):
out = mod.fit(deconv_time_signal[i_evt, streak_sl],
params, x=time_scale_streak)
time_com[i_evt] = out.params['center'].value
photo_line_intensity[i_evt] = out.params['amplitude'].value
update_progress(i_evt, n_events, verbose)
if plot:
time_scale_streak = time_scale[streak_sl]
plt.figure('peak finding time domain')
plt.clf()
plt.plot(time_scale_streak, mean_signal)
plt.plot(time_scale_streak, out.best_fit)
if verbose > 0:
print ''
time_com_dset[:] = time_com
time_com_dset.attrs['time_stamp'] = time.time()
photo_line_intensity_dset[:] = photo_line_intensity
photo_line_intensity_dset.attrs['time_stamp'] = time.time()
h5.close()
def update_with_time_to_energy_conversion(file_name, plot=False, verbose=0):
""" Make derived data based on time to energy conversion."""
update_with_noise_and_response(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
deconv_time_signal_dset = h5['filtered_time_signal']
energy_scale_dset = make_dataset(h5, 'energy_scale_eV',
energy_scale_eV.shape)
energy_trace_dset = make_dataset(h5, 'energy_signal',
(n_events, len(energy_scale_eV)))
check_tof_to_energy_conversion_matrix(verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'r') as tof_to_e_h5:
if older(energy_scale_dset, [tof_to_e_h5['matrix'],
deconv_time_signal_dset,
Timer_object(1443190000)]):
if verbose > 0:
print 'Updating time to energy conversion.',
print ' In "update_with_time_to_energy_conversion()"',
print ' with {}'.format(file_name)
# Get the transformation matrix from file
M = tof_to_e_h5['matrix'].value
# Update the energy scale
energy_scale_dset[:] = tof_to_e_h5['energy_scale_eV'].value
energy_scale_dset.attrs['time_stamp'] = time.time()
# Get the photon energy prediction parameters
params = (tof_to_energy.photon_energy_params() +
tof_to_energy.tof_prediction_params())
for k in params:
params[k].value = tof_to_e_h5[k].value
if verbose > 0:
print 'Computing energy spectra.'
for i_evt in range(n_events):
# Energy spectra
energy_trace_dset[i_evt, :] = M.dot(
deconv_time_signal_dset[i_evt, :])
update_progress(i_evt, n_events, verbose)
if verbose > 0:
print ''
energy_trace_dset.attrs['time_stamp'] = time.time()
# Calculate energy trace properties
spectral_properties_group = h5.require_group('spectral_properties')
spectral_center_dset = make_dataset(spectral_properties_group,
'center_eV', (n_events, ))
spectral_width_dset = make_dataset(spectral_properties_group,
'width_eV', (n_events, ))
spectral_threshold_dset = make_dataset(spectral_properties_group,
'threshold', (n_events, ))
spectral_gaussian_center_dset = make_dataset(spectral_properties_group,
'gaussian_center',
(n_events,))
if older(spectral_center_dset, [energy_trace_dset,
Timer_object(1443421560)]):
energy_scale = energy_scale_dset[:]
sl = slice(np.searchsorted(energy_scale, 75),
np.searchsorted(energy_scale, 125))
energy_scale = energy_scale[sl]
model = lmfit.models.GaussianModel()
if verbose > 0:
print 'Calculating spectral center and width:',
print 'In "update_with_time_to_energy_conversion()"',
print 'with {}'.format(file_name)
for i_evt in range(n_events):
energy_trace = energy_trace_dset[i_evt, sl]
t_start, t_end, spectral_threshold_dset[i_evt] = \
get_trace_bounds(energy_scale,
energy_trace,
threshold=8e-5,
min_width=3,
# useRel=True,
# threshold_rel=0.3
)
center = (t_start + t_end) / 2
spectral_center_dset[i_evt] = center
width = t_end - t_start
spectral_width_dset[i_evt] = width
# Calculate center of mass
peak_sl = slice(energy_scale.searchsorted(t_start - width/2),
energy_scale.searchsorted(t_end + width/2,
side='right'))
peak_trace = energy_trace[peak_sl]
peak_scale = energy_scale[peak_sl]
# spectral_com_dset[i_evt] = (np.sum(peak_scale * peak_trace) /
# np.sum(peak_trace))
if len(peak_trace) > 3:
out = model.fit(peak_trace, x=peak_scale,
center=center, sigma=width/4,
amplitude=peak_trace.max() * width / 2)
spectral_gaussian_center_dset[i_evt] = out.values['center']
else:
spectral_gaussian_center_dset[i_evt] = np.nan
update_progress(i_evt, n_events, verbose)
spectral_center_dset.attrs['time_stamp'] = time.time()
spectral_width_dset.attrs['time_stamp'] = time.time()
spectral_threshold_dset.attrs['time_stamp'] = time.time()
spectral_gaussian_center_dset.attrs['time_stamp'] = time.time()
if plot:
selected_shots = list(np.linspace(0, n_events, 16, endpoint=False))
plt.figure('peak properties')
plt.clf()
_, ax_list = plt.subplots(4, 4, sharex=True, sharey=True,
num='peak properties')
energy_scale = energy_scale_dset[:]
sl = slice(np.searchsorted(energy_scale, 75),
np.searchsorted(energy_scale, 130))
energy_scale = energy_scale[sl]
for i, shot in enumerate(selected_shots):
energy_trace = energy_trace_dset[shot, :]
ax = ax_list.flatten()[i]
# plt.plot(energy_scale - pe_energy_prediction_dset[shot],
ax.plot(energy_scale, energy_trace[sl])
c = spectral_center_dset[shot]
w = spectral_width_dset[shot]
th = spectral_threshold_dset[shot]
ax.plot([c-w/2, c+w/2], [th] * 2)
# Calculate main photoline area
main_photoline_area = make_dataset(spectral_properties_group,
'main_photoline_area', (n_events, ))
if older(main_photoline_area, energy_trace_dset):
if verbose:
print 'Computing photoline area'
e_scale = energy_scale_dset.value
dE = np.mean(np.diff(e_scale))
e_slice = slice(np.searchsorted(e_scale, 55), None)
for i_evt in range(n_events):
raw_A, _ = area_fill.zero_crossing_area(
energy_trace_dset[i_evt, e_slice])
main_photoline_area[i_evt] = raw_A * dE
update_progress(i_evt, n_events, verbose)
time_stamp_object(main_photoline_area)
##########
# Calculate electron energy prediction
e_energy_prediction_params_group = make_group(h5,
'e_energy_prediction_params')
if older(e_energy_prediction_params_group, [spectral_gaussian_center_dset,
Timer_object(1444931900)]):
if verbose > 0:
print 'Fit the electron energy prediction parameters.',
print 'In "update_with_time_to_energy_conversion()"',
print 'with {}'.format(file_name)
selection = np.isfinite(spectral_gaussian_center_dset.value)
# &
# (0.4 < raw_group['phase_cavity_times'][:, 0]) &
# (raw_group['phase_cavity_times'][:, 0] < 1.1))
spectral_gaussian_center = spectral_gaussian_center_dset[selection]
if len(spectral_gaussian_center) == 0:
return
var_dict = {
'l3_energy': raw_group['energy_L3_MeV'][selection],
'bc2_energy': h5['energy_BC2_MeV'][selection],
# 'fee': h5['fee_mean'][selection],
'e_energy': spectral_gaussian_center
}
prediction_params = \
tof_to_energy.e_energy_prediction_model_start_params(**var_dict)
try:
res = lmfit.minimize(tof_to_energy.e_energy_prediction_model,
prediction_params,
kws=var_dict)
fit_worked = True
except:
fit_worked = False
if verbose > 0 and fit_worked:
print '\nPrediction params:'
lmfit.report_fit(res)
# Create or update the parameters from the fit in the group
for k, v in prediction_params.iteritems():
d = e_energy_prediction_params_group.require_dataset(
k, (), np.float)
d[()] = v.value if fit_worked else np.nan
# Remove old parameters that should not be there
for k in set(e_energy_prediction_params_group.keys()).difference(
set(prediction_params.keys())):
del e_energy_prediction_params_group[k]
e_energy_prediction_params_group.attrs[time_stamp] = time.time()
if plot:
deviation = tof_to_energy.e_energy_prediction_model(
prediction_params, **var_dict)
plt.figure('e energy prediction {}'.format(
h5.filename.split('/')[-1]))
plt.clf()
plt.subplot(221)
# plt.plot(spectral_gaussian_center, deviation, '.')
plt.scatter(spectral_gaussian_center, deviation,
s=4, c=h5['energy_BC2_MeV'][selection],
linewidths=(0,), alpha=1)
plt.xlabel('electron energy (eV)')
plt.ylabel('prediction residual (eV)')
x_range = plt.xlim()
y_range = plt.ylim()
img, _, _ = np.histogram2d(spectral_gaussian_center, deviation,
bins=2**7, range=[x_range, y_range])
img = img.T
plt.subplot(222)
plt.imshow(img, aspect='auto', interpolation='none',
origin='lower', extent=x_range + y_range)
hist, hist_edges = np.histogram(deviation,
bins=2**5, range=(-3, 3))
hist_centers = (hist_edges[: -1] + hist_edges[1:])/2
plt.subplot(223)
gauss_model = lmfit.models.GaussianModel()
fit_out = gauss_model.fit(hist, x=hist_centers)
lmfit.report_fit(fit_out)
plt.bar(hist_edges[:-1], hist, width=np.diff(hist_edges))
plt.plot(hist_centers, fit_out.best_fit, 'r', linewidth=2)
plt.subplot(224)
plt.plot(spectral_gaussian_center, h5['energy_BC2_MeV'][selection],
'.')
def update_with_energy_prediction(file_name, plot=False, verbose=0):
update_with_time_to_energy_conversion(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
prediction_map = {'117': 'h5_files/run118_all.h5',
'114': 'h5_files/run115_all.h5',
'113': 'h5_files/run112_all.h5',
'108': 'h5_files/run109_all.h5',
'101': 'h5_files/run100_all.h5',
'102': 'h5_files/run100_all.h5'}
pe_energy_prediction_dset = make_dataset(
h5, 'photoelectron_energy_prediction_eV', (n_events,))
spectral_properties_group = h5['spectral_properties']
# spectral_gaussian_center_dset = spectral_properties_group[
# 'gaussian_center']
fee_dset = h5['fee_mean']
energy_BC2_dset = h5['energy_BC2_MeV']
energy_L3_dset = raw_group['energy_L3_MeV']
for k, v in prediction_map.iteritems():
if k in file_name:
update_with_time_to_energy_conversion(v, plot=False,
verbose=verbose-1)
ref_h5 = open_hdf5_file(file_name)
e_energy_prediction_params_group = \
ref_h5['e_energy_prediction_params']
break
else:
e_energy_prediction_params_group = h5['e_energy_prediction_params']
if older(pe_energy_prediction_dset, [e_energy_prediction_params_group,
fee_dset,
energy_BC2_dset,
raw_group,
Timer_object(1444981500)]):
if verbose > 0:
print 'Updating energy prediction.',
print ' In "update_with_energy_prediction()" with {}'.format(
file_name)
prediction_params = lmfit.Parameters()
for k in e_energy_prediction_params_group:
prediction_params.add(k, e_energy_prediction_params_group[k][()])
var_dict = {
'l3_energy': energy_L3_dset.value,
'bc2_energy': energy_BC2_dset.value,
'fee': fee_dset.value
}
try:
pe_energy_prediction_dset[:] = \
tof_to_energy.e_energy_prediction_model(prediction_params,
**var_dict)
except:
pe_energy_prediction_dset[:] = np.nan
pe_energy_prediction_dset.attrs[time_stamp] = time.time()
##########
# Make the christmas three histogram
n_spectral_center_bins = 2**7
n_spectral_width_bins = 2**7
spectral_center_axis_dset = make_dataset(spectral_properties_group,
'center_axis_eV',
(n_spectral_center_bins, ))
spectral_width_axis_dset = make_dataset(spectral_properties_group,
'width_axis_eV',
(n_spectral_width_bins, ))
spectral_histogram_dset = make_dataset(spectral_properties_group,
'histogram',
(n_spectral_width_bins,
n_spectral_center_bins))
spectral_center_dset = spectral_properties_group['center_eV']
spectral_width_dset = spectral_properties_group['width_eV']
pct_filter_dset = h5['pct_filter']
if older(spectral_histogram_dset, [spectral_center_dset,
spectral_width_dset,
pe_energy_prediction_dset,
pct_filter_dset,
Timer_object(2444203160)]):
if verbose > 0:
print 'Making the christmas tree plot.',
print ' In "update_with_energy_prediction()"',
print ' with {}'.format(file_name)
spectral_width_axis_dset[:] = np.linspace(0, 35, n_spectral_width_bins)
spectral_width_axis_dset.attrs['time_stamp'] = time.time()
spectral_center_axis_dset[:] = np.linspace(-20, 20,
n_spectral_center_bins)
spectral_center_axis_dset.attrs['time_stamp'] = time.time()
# I = (pct_filter_dset.value &
# (-0.1 < raw_group['phase_cavity_times'][:, 1]) &
## (raw_group['phase_cavity_times'][:, 1] < 0.05) &
## (0.75 < raw_group['phase_cavity_times'][:, 0]) &
## (raw_group['phase_cavity_times'][:, 0] < 0.85) &
# (0.065 < raw_group['power_meter_V'].value) &
# (raw_group['power_meter_V'].value < 0.1))
I = np.ones(pct_filter_dset.shape, dtype=bool)
hist = aol_plotting.center_histogram_2d(
spectral_center_dset[I] - pe_energy_prediction_dset[I],
spectral_width_dset[I],
spectral_center_axis_dset[:],
spectral_width_axis_dset[:])
hist[hist == 0] = np.nan
spectral_histogram_dset[:] = hist
spectral_histogram_dset.attrs['time_stamp'] = time.time()
if plot:
plt.figure('christmas tree {}'.format(h5.filename.split('/')[-1]))
plt.clf()
plt.imshow(spectral_histogram_dset[:], aspect='auto',
interpolation='none', origin='lower',
extent=(np.min(spectral_center_axis_dset),
np.max(spectral_center_axis_dset),
np.min(spectral_width_axis_dset),
np.max(spectral_width_axis_dset)))
plt.xlabel('center (eV)')
plt.ylabel('width (eV)')
plt.colorbar()
plt.savefig('figures/christmas_tree_{}.png'.format(
h5.filename.split('/')[-1].split('.')[0]))
h5.close()
def load_file(file_name, plot=False, verbose=0):
""" Load file and make sure it is up to date."""
# if verbose > 0:
# print 'Entering "load_file()" with file_name={}'.format(file_name)
update_with_energy_prediction(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
if verbose > 0:
print 'File {} processed.'.format(h5.file)
print 'It contains', n_events, 'events.'
if verbose > 1:
list_hdf5_content(h5)
return h5
def touch_all_files(verbose=2):
file_names = ['/'.join([data_dir, f]) for f in os.listdir(data_dir) if
f.startswith('run') and f.endswith('_all.h5')]
for name in file_names:
load_file(name, verbose=verbose)
if __name__ == '__main__':
# Parset the command line.
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--hdf5_file', type=str,
default='h5_files/run108_all.h5',
help='Path to hdf5 file to process')
parser.add_argument('--plot', action='store_true',
help='Display plots. Default: no plots.')
parser.add_argument('-v', '--verbose', action='count',
help='increase output verbosity')
args = parser.parse_args()
# Unpack the parser arguments.
hdf5_file = args.hdf5_file
plot = args.plot
verbose = args.verbose
# If plotting is requested, ryn pyplot in the interactive mode.
if plot:
plt.ion()
if verbose > 0:
print 'Get the noise spectrum just to make sure it is up to date.'
get_nois_spectrum(plot=plot, verbose=verbose)
# Load the given file.
if verbose > 0:
print 'Load the requested file: {}'.format(hdf5_file)
h5 = load_file(hdf5_file, verbose=verbose, plot=plot)
# Get the raw group of the file.
raw_group = h5['raw']
# Number of events in the file.
n_events = len(raw_group['event_time_s'])
# Time trace rellated information.
raw_time = raw_group['time_scale'].value
raw_traces_dset = raw_group['time_signal']
filtered_traces = h5['filtered_time_signal']
# Pulse energy
raw_fee_dset = raw_group['FEE_energy_mJ']
n_fee = raw_fee_dset.shape[1]
# frequency domain
freq_axis = h5['fft_freq_axis'].value
fft_mean = h5['fft_spectrum_mean'].value
snr = h5['snr_spectrum'].value
if plot and False:
if verbose > 0:
print 'Plotting fee correlations.'
plt.figure('fee')
plt.clf()
ax = None
for i in range(n_fee):
for k in range(n_fee):
ax = plt.subplot(n_fee, n_fee, i + k*n_fee + 1,
sharex=ax, sharey=ax)
ax.plot(raw_fee_dset[:, i], raw_fee_dset[:, k], '.')
if i > 0:
plt.setp(ax.get_yticklabels(), visible=False)
if k < n_fee-1:
plt.setp(ax.get_xticklabels(), visible=False)
plt.xlim(xmin=0)
plt.ylim(ymin=0)
if verbose > 0:
print 'Plotting fee histogram.'
plt.figure('fee histogram')
plt.clf()
plt.hist(h5['fee_mean'].value, bins=100)
if plot:
if verbose > 0:
print 'Plot signal maximium histogram.'
plt.figure('signal hist')
plt.clf()
plt.hist(h5['max_signal'], bins=100)
if plot:
if verbose > 0:
print 'Plot spectr'
plt.figure('fft')
plt.clf()
plt.semilogy(freq_axis, fft_mean, label='average spectrum')
plt.semilogy(freq_axis, snr, label='snr')
plt.legend(loc='best')
# Plot some traces
if plot:
if verbose > 0:
print 'Plotting traces'
trace_fig = plt.figure('traces {}'.format(hdf5_file))
trace_fig.clf()
raw_mean_tr = raw_traces_dset.value.mean(0)
deconv_mean_tr = filtered_traces.value.mean(0)
rand_event = np.random.randint(n_events)
response, _ = get_response(plot=False, verbose=verbose)
plt.plot(raw_time, raw_traces_dset[rand_event, :],
label='single trace')
plt.plot(raw_time, filtered_traces[rand_event, :],
label='Deconv single trace')
plt.plot(raw_time, raw_mean_tr, label='mean trace')
plt.plot(raw_time, deconv_mean_tr,
label='Deconv mean')
plt.legend(loc='best')
# Plot the phase cavity times
pct = raw_group['phase_cavity_times']
plt.figure('Phase cavity times')
plt.clf()
# pc_selection = (np.isfinite(np.sum(pct, axis=1)) &
# (pct[:, 0] > -2) & (pct[:, 0] < 2) &
# (pct[:, 1] > -2) & (pct[:, 1] < 2))
# (pct[:, 0] > -50) & (pct[:, 0] < 50))
pc_selection = h5['pct_filter'].value
for i in range(2):
plt.subplot(1, 3, i+1)
plt.title('Time {}'.format(i))
hist, hist_edges = np.histogram(pct[pc_selection, i], bins=100)
plt.bar(hist_edges[: -1], hist, width=np.diff(hist_edges))
plt.subplot(133)
plt.plot(pct[pc_selection, 0], pct[pc_selection, 1], '.')
# Plot energy traces and photon energy diagnostics
pe_energy_dset = h5['photoelectron_energy_prediction_eV']
energy_scale = h5['energy_scale_eV'][:]
energy_signal_dset = h5['energy_signal']
selected_shots = np.linspace(0, n_events, 100, endpoint=False, dtype=int)
plt.figure('Energy spectra')
plt.clf()
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
dy = 1e-5
for i, shot in enumerate(selected_shots):
ax1.plot(energy_scale, energy_signal_dset[shot, :] + dy * i)
ax2.plot(energy_scale - pe_energy_dset[shot],
energy_signal_dset[shot, :] + dy * i)
ax2.set_xlim(-20, 25)
# %%
# Plot the photoline area
plt.figure('photoline area')
plt.clf()
spectral_properties_group = h5['spectral_properties']
main_photoline_area = spectral_properties_group[
'main_photoline_area'].value
fee = h5['fee_mean'].value
I = np.isfinite(main_photoline_area) & np.isfinite(fee)
p = np.polyfit(fee[I], main_photoline_area[I], 2)
fee_ax = np.linspace(min(fee[I]), max(fee[I]), 2**5)
plt.subplot(121)
plt.plot(fee, main_photoline_area, '.')
plt.plot(fee_ax, np.polyval(p, fee_ax), 'r')
plt.subplot(122)
plt.hist2d(fee[I], main_photoline_area[I], bins=2**7)
plt.plot(fee_ax, np.polyval(p, fee_ax), 'r')
| gpl-2.0 |
TiKunze/CanMics | src/python/01_SingleChannel/3pop/EIN/HeHiVariation/RUM_Detektor_HeHi_2ndversion_cluster.py | 1 | 5917 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 17:15:03 2015
@author: Tim Kunze
Copyright (C) 2015, Tim Kunze. All rights reserved.
This script is a modified version of the RUM Detector:
instead of sweeping over He and Hi in every diagram, we sweep over lenge and intensity of the impulse (as in the actiation plot)
"""
###############################################################################
#
# Imports
#
###############################################################################
import numpy as np
import sys
import scipy as sc
import os # to enable some C commands (cwd,listdir)
currpath = '/usr/wrk/people9/tiku2449/EI_RUM/001_Unifying_Framework/RUM_Exploration'
os.chdir(currpath)
import sys
sys.path.append("/usr/wrk/people9/tiku2449/EI_RUM/001_Unifying_Framework")
import Models.JuRClass_fin_006 as FCV
import Simulation_And_Analysis.Sim_Simulation_003 as simulate
while len(sys.argv) > 1:
option = sys.argv[1]; del sys.argv[1]
if option == '-he': he = float(sys.argv[1].replace(',','.')); del sys.argv[1]
elif option == '-hi': hi = float(sys.argv[1].replace(',','.')); del sys.argv[1]
else:
print 'Options invalides :',option,'->',sys.argv[0]
#%%
###############################################################################
#
# Main
#
###############################################################################
dt = 1000e-6
JR = FCV.JuR()
JR.integ_stepsize = dt
JR.n=2
JR.coupling = np.array([[0.0,0.0],[0.0,0.0]]) #
JR.distanceMatrix = np.array([[0.0,0.01],[0.0,0.0]]) # important!!
JR.init = np.zeros((8,JR.n))
JR.c_e=0 # only relevant for connected areas
JR.c_i=0 # only relevant for connected areas
JR.c_py=30 # only relevant for connected areas
JR.configure()
#%%
###############################################################################
#
## Activation Diagram RUM with modulation of input to II
#
###############################################################################
t_simulation = 5
N=t_simulation/dt
time = np.arange(0,N*dt,dt)
JR.H_e=he
JR.H_i=hi
p_sim_py = np.zeros((N,JR.n))
p_sim_e = np.zeros((N,JR.n))
p_sim_i = np.zeros((N,JR.n))
length_range = np.arange(500,1501,10)
intensity_range = np.arange(50,251,2)
state_grid = np.zeros((len(intensity_range),len(length_range),3))
i=0
j=0
for ins in intensity_range:
j=0
for le in length_range:
p_sim_e = np.zeros((N,JR.n))
p_sim_e[1000:1000+le,:] = ins
signal,sig_ei,sig_ii,impact,data = simulate.simulate_network_006(JR,p_sim_py,p_sim_e,p_sim_i,t_simulation)
state_grid[i,j,0] = np.mean(signal[999,0])
state_grid[i,j,1] = np.mean(signal[4000:,0])
state_grid[i,j,2] = np.max(signal[900:,0])
print "len: %.0f | int: %.0f | he: %.2fmV | hi: %2.fmV" %(le, ins, he*1000,hi*1000)
j+=1
i+=1
#dataa=length_range,intensity_range,state_grid
np.save('RUM_Dec_meas_full2_le500t1500i10msInt50t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),state_grid)
#np.save('RUM_Dec_sim_le500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),signal)
#np.save('RUM_Dec_data_le500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),dataa)
#
#
#def cleargrid(state_grid):
# [x,y,z]=np.shape(state_grid)
# for i in range(x):
# for j in range(y):
# if state_grid[i,j,1] > 0.004:
# state_grid[i,j,1] = 0.006
# elif state_grid[i,j,1] < 0.004:
# state_grid[i,j,1] = -0.002
# else:
# raise ValueError('Error')
# print "ERROR"
#
# return state_grid
###
#%% Analysis
#import matplotlib.pyplot as plt
#hirange = np.arange(19,26,1)*1e-3
#herange = np.arange(2.5,4.1,0.25)*1e-3
#
#
#glob_low_val=1e3
#glob_high_val=-1e3
#
#for he in herange:
# for hi in hirange:
# a=np.load('RUM_Detector2_Imple500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000))
#
#
# length_range=a[0]
# intensity_range=a[1]
# state_grid=a[2]
#
# low_lenge=np.min(length_range)
# high_lenge=np.max(length_range)
# low_inte=np.min(intensity_range)
# high_inte=np.max(intensity_range)
#
# if np.min(state_grid[:,:,1]) < glob_low_val:
# glob_low_val=np.min(state_grid[:,:,1])
# print he,hi,glob_low_val
# if np.max(state_grid[:,:,1]) > glob_high_val:
# glob_high_val=np.max(state_grid[:,:,1])
# print he,hi,glob_high_val,1
#
# plt.figure(2)
# plt.clf()
# state_grid=cleargrid(state_grid)
# plt.imshow(np.flipud(state_grid[:,:,1]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
# plt.ylabel('intensity')
# plt.xlabel('length')
# plt.title('Detektor Diagram,he:%.0fms, hi:%.0fpps' %(he*1000,hi*1000))
# cb=plt.colorbar()
# plt.savefig('RUM_Detektor2_Imple500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.pdf' %(he*1000,hi*1000), format='pdf', dpi=1000)
# plt.close()
# #
#
# # baselevel plot hier zwecklos, da baselevel bei allen stimuli gleich
# plt.figure(2)
# plt.clf()
# #state_grid=cleargrid(state_grid)
# plt.clf()
# plt.imshow(np.flipud(state_grid[:,:,0]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
# plt.ylabel('intensity')
# plt.xlabel('length')
# plt.title('Detektor Diagram,Baselevels,he:%.0fmV, hi:%.0fmV' %(he*1000,hi*1000))
# plt.colorbar()
# #plt.savefig('RUM_Detektor_Baselevel_Imple%.0fmsInt%.0f_He2.5t7.0i0k05_Hi10t25i0k1_1.pdf' %(lenge,inte), format='pdf', dpi=1000)
#
# plt.close('all')
| gpl-3.0 |
GitYiheng/reinforcement_learning_test | test00_previous_files/mountaincar_q_learning.py | 1 | 4304 | import gym
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from gym import wrappers
from datetime import datetime
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDRegressor
class FeatureTransformer:
def __init__(self, env, n_components=500):
observation_examples = np.array([env.observation_space.sample() for x in range(1000)])
scaler = StandardScaler()
scaler.fit(observation_examples)
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=n_components)),
("rbf2", RBFSampler(gamma=4.0, n_components=n_components)),
("rbf3", RBFSampler(gamma=3.0, n_components=n_components)),
("rbf4", RBFSampler(gamma=2.0, n_components=n_components)),
("rbf5", RBFSampler(gamma=1.0, n_components=n_components)),
("rbf6", RBFSampler(gamma=0.5, n_components=n_components)),
])
example_features = featurizer.fit_transform(scaler.transform(observation_examples))
self.dimensions = example_features.shape[1]
self.scaler = scaler
self.featurizer = featurizer
def transform(self, observation):
scaled = self.scaler.transform(observation)
return self.featurizer.transform(scaled)
class Model:
def __init__(self, env, feature_transformer, learning_rate):
self.env = env
self.models = []
self.feature_transformer = feature_transformer
for i in range(env.action_space.n):
model = SGDRegressor(learning_rate)
model.partial_fit(feature_transformer.transform([env.reset()]), [0])
self.models.append(model)
def predict(self, s):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
return np.array([m.predict(X)[0] for m in self.models])
def update(self, s, a, G):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
self.models[a].partial_fit(X, [G])
def sample_action(self, s, eps):
if np.random.random() < eps:
return self.env.action_space.sample()
else:
return np.argmax(self.predict(s))
def play_one(model, eps, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 1000:
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
# Update the model
G = reward + gamma*np.max(model.predict(observation)[0])
model.update(prev_observation, action, G)
totalreward += reward
iters += 1
return totalreward
def plot_cost_to_go(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Cost-To-Go == -V(s)')
ax.set_title("Cost-To-Go Function")
fig.colorbar(surf)
plt.show()
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.show()
def main():
env = gym.make('MountainCar-v0')
ft = FeatureTransformer(env)
model = Model(env, ft, "constant")
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 300
totalrewards = np.empty(N)
for n in range(N):
eps = 0.1*(0.97**n)
totalreward = play_one(model, eps, gamma)
totalrewards[n] = totalreward
print("episode:", n, "total reward:", totalreward)
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", -totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
plot_cost_to_go(env, model)
if __name__ == '__main__':
main() | mit |
aemerick/galaxy_analysis | particle_analysis/sn_rate.py | 1 | 9054 | #import yt.mods as yt
import yt
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import glob
__all__ = ['future_snr', 'snr']
_core_collapse_labels = ["SNII", "II", "2", "SN_II", "TypeII", "Type 2",
"Type II", "type II", "typeII", 'core collapse']
_snia_labels = ["SN1a", "SNIa", "Type1a", "TypeIa", "Type Ia", "Type 1a",
"type 1a", "type Ia", "type ia", "type1a", "typeIa"]
_agb_labels = ['AGB', 'agb']
def future_snr(ds, data, times = None, sn_type = 'II'):
"""
Looks forward from current time to compute future (projected) SN
rate
"""
current_time = ds.current_time.convert_to_units('Myr').value
if times is None:
bin_spacing = 2.0* yt.units.Myr
times = np.arange(np.min(creation_time) - bin_spacing*2.0, currentTime, bin_spacing)*yt.units.Myr
elif np.size(times) == 1:
bin_spacing = times
if not hasattr(bin_spacing, 'value'):
bin_spacing = bin_spacing * yt.units.Myr
times = np.linspace(current_time, current_time + 2000.0, bin_spacing)
times = times * yt.units.Myr
birth_mass = data['birth_mass'].value
mass = data['particle_mass'].convert_to_units('Msun').value
creation_time = data['creation_time'].convert_to_units('Myr').value
lifetimes = data['dynamical_time'].convert_to_units('Myr').value
pt = data['particle_type']
if any( [sn_type in x for x in _core_collapse_labels]):
collapse_threshold = ds.parameters['IndividualStarDirectCollapseThreshold']
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
pcut = (pt == 11) * (birth_mass <= collapse_threshold) *\
(birth_mass > agb_threshold)
elif any( [sn_type in x for x in _snia_labels]):
pcut = (pt == 12) * (mass > 0.0)
elif any( [sn_type in x for x in _agb_labels]):
pcut = (pt == 11) * (birth_mass < agb_threshold)
explosion_times = creation_time[pcut] + lifetimes[pcut]
explosion_times = explosion_times * yt.units.Myr
times = times.convert_to_units('yr')
snr = np.zeros(np.size(times.value) - 1)
# compute SNR
for i in np.arange(np.size(times) - 1):
dt = times[i+1] - times[i]
dN = np.size( explosion_times[explosion_times <= times[i+1]]) -\
np.size( explosion_times[explosion_times <= times[i]])
snr[i] = dN / dt
return times, snr
def snr(ds, data, times = None, sn_type = 'II'):
"""
Computes the supernova rate of the desired time for a given dataset
as a function of time. The way the particle types and particle lifetimes
are handled, this can be done for the entire galaxy history using a single
snapshot, rather than having to sort through each dump.
One can provide sample times using "times" argument, or leave it alone for
a 10 Myr sample spacing from t = 0 to t = current_time. If a single value
is provided, this is taken to be the sample spacing (dt), sampled over
t = 0 to t = current_time. Units are assumed to be Myr if not provided.
Accounts for direct collapse model in computing SNII rates using
parameter file.
"""
current_time = ds.current_time.convert_to_units('Myr').value
if times is None:
bin_spacing = 10.0 * yt.units.Myr
times = np.linspace(np.min(creation_time), current_time, bin_spacing)*yt.units.Myr
elif np.size(times) == 1:
bin_spacing = times
if not hasattr(bin_spacing, 'value'):
bin_spacing = bin_spacing * yt.units.Myr
times = np.linspace(np.min(creation_time), current_time, bin_spacing)
times = times *yt.units.Myr
# load particle properties
birth_mass = data['birth_mass'].value
mass = data['particle_mass'].convert_to_units("Msun").value
creation_time = data['creation_time'].convert_to_units('Myr').value
metallicity = data['metallicity_fraction'].value
# lifetimes = data['dynamical_time'].convert_to_units('Myr').value
lifetimes = data[('io','particle_model_lifetime')].convert_to_units('Myr').value
pt = data['particle_type'].value
# check to see if there are any SN candidates in the first place
# if not any([ == x for x in np.unique(pt)]):
# print "no supernova of type " + sn_type + " found"
# return times, np.zeros(np.size(times.value) - 1)
# looking for core collapse supernova rate
if any( [sn_type in x for x in _core_collapse_labels]):
pcut = (pt == 13)
# ignore stars that did not actually go supernova
collapse_threshold = ds.parameters['IndividualStarDirectCollapseThreshold']
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
if not any([(x <= collapse_threshold)*(x > agb_threshold) for x in birth_mass[pcut]]):
print("no core collapse supernova present, only direct collapse")
return times, np.zeros(np.size(times.value) - 1)
# slice!
pcut *= (birth_mass <= collapse_threshold)*(birth_mass > agb_threshold)
elif any( [sn_type in x for x in _snia_labels]):
pcut = (pt == 12)
if np.size(mass[pcut]) < 1:
return times, np.zeros(np.size(times))
# SNIa are the ones that are just masless tracers, rest are WD
if not any(mass[pcut] == 0.0):
print("no Type Ia supernova, only white dwarfs")
print("N_WD = %i -- Lowest mass = %.3f Msun"%(np.size(mass[pcut]), np.min(mass[pcut])))
print("Current time = %.2E Myr - Next to explode at t = %.2E Myr"%(current_time, np.min(lifetimes[pcut] + creation_time[pcut])))
return times, np.zeros(np.size(times.value) - 1)
# slice!
pcut *= (mass == 0.0)
elif any( [sn_type in x for x in _agb_labels]):
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
pcut = (pt > 11) # all dead stars
pcut = pcut * (birth_mass <= agb_threshold)
# pcut = (pt == 12)
# pcut *= (mass > 0.0)
# pcut = pcut + ( (pt == 13) * (birth_mass <= agb_threshold))
else:
print("sn_type :" + sn_type + " not a valid option - check spelling")
return -1
#
# now get the explosion times for all supernova
# when stars go SN, lifetime is set to be lifetime*huge_number
# therefore, explosion time can be backed out as:
#
explosion_times = creation_time[pcut] + lifetimes[pcut]/ds.parameters['huge_number']
explosion_times = explosion_times * yt.units.Myr
times = times.convert_to_units('yr')
snr = np.zeros(np.size(times.value) - 1)
# compute SNR
for i in np.arange(np.size(times) - 1):
dt = times[i+1] - times[i]
dN = np.size( explosion_times[explosion_times <= times[i+1]]) -\
np.size( explosion_times[explosion_times <= times[i]])
snr[i] = dN / dt
return times, snr
if __name__ == '__main__':
# example usage - uses most recent data file
log = False
ds_list = np.sort( glob.glob('./DD????/DD????'))
ds = yt.load(ds_list[-1])
data = ds.all_data()
dt = 25.0
times = np.arange(0.0, ds.current_time.convert_to_units('Myr').value + dt, dt)
times = times*yt.units.Myr
times, snrII = snr(ds, data, times = times, sn_type = 'TypeII')
times, snrIa = snr(ds, data, times = times, sn_type = "TypeIa")
center = 0.5 * (times[1:] + times[:-1])
fig, ax = plt.subplots(figsize=(8,8))
snialabel = 'Type Ia x 10'
sniilabel = 'Core Collapse'
ftimes = np.arange(ds.current_time.convert_to_units('Myr').value,
ds.current_time.convert_to_units('Myr').value + 800.0 + 10, 10)
ftimes = ftimes * yt.units.Myr
ftimes, fsnrII = future_snr(ds, data, times = ftimes, sn_type = 'TypeII')
ftimes, fsnrIa = future_snr(ds, data, times = ftimes, sn_type = 'TypeIa')
if log:
ax.plot(center/1.0E6, snrII*1.0E6, color = 'black', lw = 3, ls = '-', label = sniilabel)
ax.plot(center/1.0E6, snrIa*1.0E6*10, color = 'black', lw = 3, ls = '--', label = snialabel)
x.semilogy()
else:
ax.step(times[:-1]/1.0E6, snrII*1.0E6, color ='black', lw = 3, ls = '-', label = sniilabel)
ax.step(times[:-1]/1.0E6, snrIa*1.0E6 * 10, color ='orange', lw = 3, ls = '-', label = snialabel)
ax.step(ftimes[:-1]/1.0E6, fsnrII*1.0E6, color = 'black', lw = 3, ls = ':')
ax.step(ftimes[:-1]/1.0E6, fsnrIa*1.0E6 * 10, color = 'orange', lw = 3, ls = ':')
ax.set_xlabel('Time (Myr)')
ax.set_ylabel(r'SNR (Myr$^{-1}$)')
ax.set_ylim( np.min( [np.min(snrIa), np.min(snrII)])*1.0E6,
np.max( [np.max(snrIa), np.max(snrII)])*1.25*1.0E6)
ax.plot( [ds.current_time.convert_to_units('Myr').value]*2, ax.get_ylim(), ls = '--', lw = 3, color = 'black')
ax.legend(loc ='best')
plt.tight_layout()
ax.minorticks_on()
plt.savefig('snr.png')
| mit |
CallaJun/hackprince | indico/matplotlib/tests/test_style.py | 10 | 1977 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import shutil
import tempfile
from contextlib import contextmanager
import matplotlib as mpl
from matplotlib import style
from matplotlib.style.core import USER_LIBRARY_PATHS, STYLE_EXTENSION
import six
PARAM = 'image.cmap'
VALUE = 'pink'
DUMMY_SETTINGS = {PARAM: VALUE}
@contextmanager
def temp_style(style_name, settings=None):
"""Context manager to create a style sheet in a temporary directory."""
settings = DUMMY_SETTINGS
temp_file = '%s.%s' % (style_name, STYLE_EXTENSION)
# Write style settings to file in the temp directory.
tempdir = tempfile.mkdtemp()
with open(os.path.join(tempdir, temp_file), 'w') as f:
for k, v in six.iteritems(settings):
f.write('%s: %s' % (k, v))
# Add temp directory to style path and reload so we can access this style.
USER_LIBRARY_PATHS.append(tempdir)
style.reload_library()
try:
yield
finally:
shutil.rmtree(tempdir)
style.reload_library()
def test_available():
with temp_style('_test_', DUMMY_SETTINGS):
assert '_test_' in style.available
def test_use():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
def test_use_url():
with temp_style('test', DUMMY_SETTINGS):
with style.context('https://gist.github.com/adrn/6590261/raw'):
assert mpl.rcParams['axes.facecolor'] == "#adeade"
def test_context():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
# Check that this value is reset after the exiting the context.
assert mpl.rcParams[PARAM] == 'gray'
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| lgpl-3.0 |
rhattersley/cartopy | lib/cartopy/tests/mpl/test_ticker.py | 3 | 8574 | # (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from matplotlib.axes import Axes
import pytest
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
def test_LatitudeFormatter_bad_axes():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LatitudeFormatter_bad_projection():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LongitudeFormatter_bad_axes():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LongitudeFormatter_bad_projection():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LatitudeFormatter():
formatter = LatitudeFormatter()
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'90\u00B0N']
assert result == expected
def test_LatitudeFormatter_degree_symbol():
formatter = LatitudeFormatter(degree_symbol='')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90S', u'60S', u'30S', u'0',
u'30N', u'60N', u'90N']
assert result == expected
def test_LatitudeFormatter_number_format():
formatter = LatitudeFormatter(number_format='.2f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90.00\u00B0S', u'60.00\u00B0S', u'30.00\u00B0S',
u'0.00\u00B0', u'30.00\u00B0N', u'60.00\u00B0N',
u'90.00\u00B0N']
assert result == expected
def test_LatitudeFormatter_mercator():
formatter = LatitudeFormatter()
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-15496570.739707904, -8362698.548496634,
-3482189.085407435, 0.0, 3482189.085407435,
8362698.548496634, 15496570.739707898]
result = [formatter(tick) for tick in test_ticks]
expected = [u'80\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'80\u00B0N']
assert result == expected
def test_LatitudeFormatter_small_numbers():
formatter = LatitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [40.1275150, 40.1275152, 40.1275154]
result = [formatter(tick) for tick in test_ticks]
expected = [u'40.1275150\u00B0N', u'40.1275152\u00B0N',
u'40.1275154\u00B0N']
assert result == expected
def test_LongitudeFormatter_central_longitude_0():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert result == expected
def test_LongitudeFormatter_central_longitude_180():
formatter = LongitudeFormatter(zero_direction_label=True)
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'0\u00B0E', u'60\u00B0E', u'120\u00B0E', u'180\u00B0',
u'120\u00B0W', u'60\u00B0W', u'0\u00B0W']
assert result == expected
def test_LongitudeFormatter_central_longitude_120():
formatter = LongitudeFormatter()
p = ccrs.PlateCarree(central_longitude=120)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'60\u00B0W', u'0\u00B0', u'60\u00B0E', u'120\u00B0E',
u'180\u00B0', u'120\u00B0W', u'60\u00B0W']
assert result == expected
def test_LongitudeFormatter_degree_symbol():
formatter = LongitudeFormatter(degree_symbol='',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180W', u'120W', u'60W', u'0', u'60E', u'120E', u'180E']
assert result == expected
def test_LongitudeFormatter_number_format():
formatter = LongitudeFormatter(number_format='.2f',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180.00\u00B0W', u'120.00\u00B0W', u'60.00\u00B0W',
u'0.00\u00B0', u'60.00\u00B0E', u'120.00\u00B0E',
u'180.00\u00B0E']
assert result == expected
def test_LongitudeFormatter_mercator():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-20037508.342783064, -13358338.895188706,
-6679169.447594353, 0.0, 6679169.447594353,
13358338.895188706, 20037508.342783064]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert result == expected
def test_LongitudeFormatter_small_numbers_0():
formatter = LongitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree(central_longitude=0)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'17.1142343\u00B0W', u'17.1142340\u00B0W',
u'17.1142337\u00B0W']
assert result == expected
def test_LongitudeFormatter_small_numbers_180():
formatter = LongitudeFormatter(zero_direction_label=True,
number_format='.7f')
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'162.8857657\u00B0E', u'162.8857660\u00B0E',
u'162.8857663\u00B0E']
assert result == expected
| lgpl-3.0 |
gibiansky/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 30 | 2249 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.