repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
andim/scipy | scipy/spatial/_plotutils.py | 11 | 4843 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
was_held = ax.ishold()
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
ptp_bound = points.ptp(axis=0)
ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0],
points[:,0].max() + 0.1*ptp_bound[0])
ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1],
points[:,1].max() + 0.1*ptp_bound[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
ax.plot(tri.points[:,0], tri.points[:,1], 'o')
ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = []
for simplex in hull.simplices:
line_segments.append([(x, y) for x, y in hull.points[simplex]])
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
ax.plot(vor.points[:,0], vor.points[:,1], '.')
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
line_segments = []
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
line_segments.append([(x, y) for x, y in vor.vertices[simplex]])
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
ptp_bound = vor.points.ptp(axis=0)
line_segments = []
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
line_segments.append([(vor.vertices[i, 0], vor.vertices[i, 1]),
(far_point[0], far_point[1])])
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
lawson-wheatley/Machine-Prediction-py | Linear Regression/preprocess.py | 3 | 3107 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# untitled.py
#
# Copyright 2017 LWHEATLEY <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import matplotlib.pyplot as plt
import numbers
import decimal
import math
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import time
np.random.seed(8)
nmapa={}
def converta(mtrx):
a=0
c=1
nr,nc=mtrx.shape
print nr
for col in range(0,nc):
c=0
try:
val= float(mtrx[0,col])
#a+=1
except ValueError:
for row in range(0,nr):
if mtrx[row,col] in nmapa:
mtrx[row,col] = nmapa[mtrx[row,col]]
#b+=1
else:
nmapa[mtrx[row,col]]=c
mtrx[row,col]=c
c+=1
#b+=1
return(mtrx)
data = pd.read_csv("pima.csv",index_col=0,delimiter=',',)
data2 = pd.read_csv("pima2.csv",index_col=0)
#SPLIT
#xa = data[['AGE','WORKCLASS','FNLWGT','EDUCATION','EDUCATION_NUM','MARITAL_STATUS','OCCUPATION','RELATIONSHIP','RACE','SEX','CAPITAL_GAIN','CAPITAL_LOSS','HOURS_PER_WEEK','NATIVE_COUNTRY']]
xa=converta(data.as_matrix(columns=['AGE','WORKCLASS','FNLWGT','EDUCATION','EDUCATION_NUM','MARITAL_STATUS','OCCUPATION','RELATIONSHIP','RACE','SEX','CAPITAL_GAIN','CAPITAL_LOSS','HOURS_PER_WEEK','NATIVE_COUNTRY', 'INCOME']))
xaa = converta(data2.as_matrix(columns=['AGE','WORKCLASS','FNLWGT','EDUCATION','EDUCATION_NUM','MARITAL_STATUS','OCCUPATION','RELATIONSHIP','RACE','SEX','CAPITAL_GAIN','CAPITAL_LOSS','HOURS_PER_WEEK','NATIVE_COUNTRY', 'INCOME']))
x,y=xa.shape
with open('Processed_DATA.csv','w') as f:
f.write("INDEX,AGE,WORKCLASS,FNLWGT,EDUCATION,EDUCATION_NUM,MARITAL_STATUS,OCCUPATION,RELATIONSHIP,RACE,SEX,CAPITAL_GAIN,CAPITAL_LOSS,HOURS_PER_WEEK,NATIVE_COUNTRY,INCOME\n")
for col in range(x):
f.write(str(col+1)+","+str(xa[col][0])+","+str(xa[col][1])+","+str(xa[col][2])+","+str(xa[col][3])+","+str(xa[col][4])+","+str(xa[col][5])+","+str(xa[col][6])+","+str(xa[col][7])+","+str(xa[col][8])+","+str(xa[col][9])+","+str(xa[col][10])+","+str(xa[col][11])+","+str(xa[col][12])+","+str(xa[col][13])+","+str(xa[col][14])+"\n")
print(str(col+1)+","+str(xa[col][0])+","+str(xa[col][1])+","+str(xa[col][2])+","+str(xa[col][3])+","+str(xa[col][4])+","+str(xa[col][5])+","+str(xa[col][6])+","+str(xa[col][7])+","+str(xa[col][8])+","+str(xa[col][9])+","+str(xa[col][10])+","+str(xa[col][11])+","+str(xa[col][12])+","+str(xa[col][13])+","+str(xa[col][14]))
time.sleep(1)
f.close()
| gpl-3.0 |
WangDequan/fast-rcnn | lib/roi_data_layer/minibatch.py | 44 | 7337 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs = {'data': im_blob,
'rois': rois_blob,
'labels': labels_blob}
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)
return labels, overlaps, rois, bbox_targets, bbox_loss_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
jefffohl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| gpl-3.0 |
jjberry/Autotrace | matlab-version/image_diversity_nogui.py | 3 | 12973 | #!/usr/bin/env python
'''
image_diversity.py
Written by Jeff Berry on Dec 21 2010
purpose:
This script measures the distance from average for each image in the
input set, and copies the specified number of highest scoring images
to a new folder called 'diverse'. If ROI_config.txt is present in the
same folder as the input images, the ROI in that file will be used to
do the measurement. If not present, it will use a hard-coded default ROI.
usage:
python image_diversity.py <num_images> <num_testset> <num_batches>
parameters:
<num_images>: The number of images to use in the diverse set. This number
represents the most diverse images. The script will automatically
add the 50 least diverse images to the set.
<num_testset>: The number of images to save out of the diverse set as a
test set. These images will be stored in 'diverse-test'.
<num_batches>: The number of groups to organize the remaining images into
example:
python image_diversity.py 300 100 5
#This command will result in 250 images in 'diverse' and 100 test images
#in 'diverse-test'. The remaining images will be split into 5 groups in
#'batch1', 'batch2', etc.
---------------------------------------------
Modified by Jeff Berry on Feb 18 2011
reason:
Updated the script to use ROI_config.txt. This made the initial ROI selection
window unnecessary. ROI is now selected using SelectROI.py
---------------------------------------------
Modified by Jeff Berry on Feb 25 2011
reason:
added support for unique tracer codes on .traced.txt files
---------------------------------------------
Modified by Jeff Berry on Jan 26 2012
reason:
added support for splitting diverse images into train and test sets. The script
is no longer interactive due to problems with the raw_input() function interacting
with GTK. Instead, the numbers of train and test images are passed to the script
as arguments (see usage above).
'''
import cv
import os, sys
import operator
import subprocess
from numpy import *
import matplotlib.pyplot as plot
import multiprocessing
CopyQueue = multiprocessing.Queue()
FinishQueue = multiprocessing.Queue()
class CopyThread(multiprocessing.Process):
def run(self):
flag = 'ok'
while (flag != 'stop'):
cmd = CopyQueue.get()
if cmd == None:
flag = 'stop'
else:
#print ' '.join(cmd)
p = subprocess.Popen(cmd)
p.wait()
FinishQueue.put(cmd)
#print "CopyThread stopped"
class ImageWindow:
def __init__(self, data_dir, n, n_test, n_batches, add_lower50='y', make_testset='y'):
#self.onOpen()
files = os.listdir(data_dir)
datafiles = []
for i in files:
if i[-3:] == 'jpg':
datafiles.append(os.path.join(data_dir,i))
self.datafiles = datafiles
self.makeDest()
self.get_tracenames()
# get an image and open it to see the size
img = cv.LoadImageM(self.datafiles[0], iscolor=False)
self.csize = shape(img)
self.img = asarray(img)
#open up the ROI_config.txt and parse
self.pathtofiles = '/'.join(self.datafiles[0].split('/')[:-1]) + '/'
self.config = self.pathtofiles + 'ROI_config.txt'
if (os.path.isfile(self.config)):
print "Found ROI_config.txt"
c = open(self.config, 'r').readlines()
self.top = int(c[1][:-1].split('\t')[1])
self.bottom = int(c[2][:-1].split('\t')[1])
self.left = int(c[3][:-1].split('\t')[1])
self.right = int(c[4][:-1].split('\t')[1])
print "using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right)
else:
print "ROI_config.txt not found"
self.top = 140 #default settings for the Sonosite Titan
self.bottom = 320
self.left = 250
self.right = 580
print "using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right)
roi = img[self.top:self.bottom, self.left:self.right]
self.roisize = shape(roi)
self.get_diverse(n, n_test, n_batches, add_lower50, make_testset)
#def onOpen(self):
#fc = gtk.FileChooserDialog(title='Open Image Files', parent=None,
# action=gtk.FILE_CHOOSER_ACTION_OPEN,
# buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
# gtk.STOCK_OPEN, gtk.RESPONSE_OK))
#g_directory = fc.get_current_folder()
#fc.set_current_folder(g_directory)
#fc.set_default_response(gtk.RESPONSE_OK)
#fc.set_select_multiple(True)
#ffilter = gtk.FileFilter()
#ffilter.set_name('Image Files')
#ffilter.add_pattern('*.jpg')
#ffilter.add_pattern('*.png')
#fc.add_filter(ffilter)
#response = fc.run()
#if response == gtk.RESPONSE_OK:
# self.datafiles = fc.get_filenames()
# g_directory = fc.get_current_folder()
#fc.destroy()
def makeDest(self):
s = self.datafiles[0].split('/')
self.rootdir = '/'.join(s[:-1]) + '/'
self.destpath = '/'.join(s[:-1]) + '/diverse/'
print "images will be saved in", self.destpath
if not os.path.isdir(self.destpath):
os.mkdir(self.destpath)
print "created directory", self.destpath
def get_tracenames(self):
'''This method will look for existing trace files and create a dictionary to corresponding
image files. It will only work if all image files are in the same directory
'''
self.tracenames = {}
tracedir = '/'.join(self.datafiles[0].split('/')[:-1])+ '/'
files = os.listdir(tracedir)
traces = []
for i in files:
if ('traced.txt' in i):
traces.append(tracedir+i)
for i in self.datafiles:
for j in traces:
if i in j:
self.tracenames[i] = j
def get_average_image(self):
files = self.datafiles
ave_img = zeros(self.roisize)
for i in range(len(files)):
img = cv.LoadImageM(files[i], iscolor=False)
roi = img[self.top:self.bottom, self.left:self.right]
roi = asarray(roi)/255.
ave_img += roi
ave_img /= len(files)
return ave_img, files
def get_diverse(self, n, n_test, n_batches, add_lower50='y', make_testset='y'):
'''gets the n most diverse images from the data set and copies them into path_to_save'''
if os.path.isdir(self.destpath):
print "calculating average image"
ave_img, files = self.get_average_image()
print "measuring distances from average"
results = {}
for i in range(len(files)):
img = cv.LoadImageM(files[i], iscolor=False)
roi = img[self.top:self.bottom, self.left:self.right]
roi = asarray(roi)/255.
dif_img = abs(roi - ave_img)
results[files[i]] = sum(sum(dif_img))
sorted_results = sorted(results.iteritems(), key=operator.itemgetter(1), reverse=True)
#show rank vs. energy plot
count = 1
for (i,j) in sorted_results:
plot.plot(count, j, 'b.')
count += 1
plot.savefig(self.destpath+'rankVenergy.png')
#plot.show()
#cmd = ['open', self.destpath+'rankVenergy.png']
#p = subprocess.Popen(cmd)
#n = int(raw_input("Enter number of images to move: "))
#print n # for some reason, these raw_input calls don't work anymore
#add_lower50 = raw_input("Should I also add the 50 least different images? [Y/n]: ")
#make_testset = raw_input("Should I save out some images as a test set? [Y/n]: ")
if (make_testset == '') or (make_testset.lower() == 'y'):
TESTSET = True
#n_test = int(raw_input("Enter the number of test images to save out: "))
self.testdir = self.destpath[:-1]+'-test/'
if not os.path.isdir(self.testdir):
os.mkdir(self.testdir)
else:
TESTSET = False
#n_test = 0
numThreads = 4
for i in range(numThreads):
thread = CopyThread()
thread.start()
filenames = []
for (i,j) in sorted_results[:n]:
filenames.append(i)
if (add_lower50 == '') or (add_lower50.lower() == 'y'):
for (i,j) in sorted_results[-50:]:
filenames.append(i)
filenames = array(filenames)
if TESTSET:
inds = arange(len(filenames))
random.shuffle(inds)
testinds = inds[:n_test]
traininds = inds[n_test:]
trainfiles = filenames[traininds]
testfiles = filenames[testinds]
else:
trainfiles = filenames
count = 0
print "saving most diverse images to:", self.destpath
for i in trainfiles:
fname = i.split('/')[-1]
cmd = ['mv', i, self.destpath+fname]
#print count
count += 1
CopyQueue.put(cmd)
if self.tracenames.has_key(i):
cmd2 = ['mv', self.tracenames[i], self.destpath]
count += 1
CopyQueue.put(cmd2)
if TESTSET:
for i in testfiles:
fname = i.split('/')[-1]
cmd = ['mv', i, self.testdir+fname]
CopyQueue.put(cmd)
#print count
count += 1
if self.tracenames.has_key(i):
cmd2 = ['mv', self.tracenames[i], self.testdir]
count += 1
CopyQueue.put(cmd2)
remaining = []
for (i,j) in sorted_results[n:-50]:
remaining.append(i)
remaining = array(remaining)
inds = arange(len(remaining))
random.shuffle(inds)
breaks = linspace(0, len(remaining), n_batches+1).astype(integer)
for i in range(n_batches):
batch_inds = inds[breaks[i]:breaks[i+1]]
batch_files = remaining[batch_inds]
batch_dir = "batch%03d" % (i+1)
dest = os.path.join(self.rootdir, batch_dir)
if not os.path.isdir(dest):
os.mkdir(dest)
for j in batch_files:
fname = j.split('/')[-1]
cmd = ['mv', j, os.path.join(dest, fname)]
count += 1
CopyQueue.put(cmd)
if self.tracenames.has_key(j):
cmd2 = ['mv', self.tracenames[j], dest]
count += 1
CopyQueue.put(cmd2)
# stop the threads
for i in range(numThreads):
CopyQueue.put(None)
# write sorted_results to a .txt file for future reference
# added Mar 10 2011 by Jeff Berry
o = open(self.destpath+'SortedResults.txt', 'w')
for (i,j) in sorted_results:
o.write("%s\t%.4f\n" %(i, j))
o.close()
for i in range(count):
Fcmd = FinishQueue.get()
print ' '.join(Fcmd)
print "done"
roifile = '/'.join(self.datafiles[0].split('/')[:-1]) + '/ROI_config.txt'
if os.path.isfile(roifile):
p = subprocess.Popen(['cp', roifile, self.destpath])
p.wait()
#p = subprocess.Popen(['rm', self.destpath+'/rankVenergy.png'])
#p.wait()
#try:
# gtk.main_quit() #for some reason this is not exiting gracefully
#except RuntimeError:
# #print "press ctrl+c to quit"
# p1 = subprocess.Popen(['ps', '-ef'], stdout=subprocess.PIPE)
# p2 = subprocess.Popen(['grep', '-i', 'image_diversity'], stdin=p1.stdout, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(['awk', "{print $2}"], stdin=p2.stdout, stdout=subprocess.PIPE)
# pid = p3.communicate()[0][:-1]
# print pid
# p = subprocess.Popen(['kill', pid])
if __name__ == "__main__":
ImageWindow(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
#gtk.main()
| mit |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/axes.py | 2 | 301792 | from __future__ import division, generators
import math, sys, warnings, datetime, new
from operator import itemgetter
import itertools
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
from matplotlib import docstring
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import matplotlib.mlab as mlab
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.spines as mspines
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.tri as mtri
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
iterable = cbook.iterable
is_string_like = cbook.is_string_like
is_sequence_of_strings = cbook.is_sequence_of_strings
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def _process_plot_format(fmt):
"""
Process a MATLAB style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
# We need to differentiate grayscale '1.0' from tri_down marker '1'
try:
fmtint = str(int(fmt))
except ValueError:
return linestyle, marker, color # Yes
else:
if fmt != fmtint:
# user definitely doesn't want tri_down marker
return linestyle, marker, color # Yes
else:
# ignore converted color
color = None
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers.
See also: :meth:`~matplotlib.axes.Axes.set_color_cycle`.
.. Note:: Deprecated 2010/01/03.
Set rcParams['axes.color_cycle'] directly.
"""
rcParams['axes.color_cycle'] = clist
warnings.warn("Set rcParams['axes.color_cycle'] directly",
DeprecationWarning)
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self.set_color_cycle()
def set_color_cycle(self, clist=None):
if clist is None:
clist = rcParams['axes.color_cycle']
self.color_cycle = itertools.cycle(clist)
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
if self.axes.name == 'polar':
xunits = kwargs.pop( 'thetaunits', xunits )
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if self.axes.name == 'polar':
yunits = kwargs.pop( 'runits', yunits )
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
if self.command!='plot':
# the Line2D class can handle unitized data, with
# support for post hoc unit changes etc. Other mpl
# artists, eg Polygon which _process_plot_var_args
# also serves on calls to fill, cannot. So this is a
# hack to say: if you are not "plot", which is
# creating Line2D, then convert the data now to
# floats. If you are plot, pass the raw data through
# to Line2D which will handle the conversion. So
# polygons will not support post hoc conversions of
# the unit type since they are not storing the orig
# data. Hopefully we can rationalize this at a later
# date - JDH
if bx:
x = self.axes.convert_xunits(x)
if by:
y = self.axes.convert_yunits(y)
x = np.atleast_1d(x) #like asanyarray, but converts scalar to array
y = np.atleast_1d(y)
if x.shape[0] != y.shape[0]:
raise ValueError("x and y must have same first dimension")
if x.ndim > 2 or y.ndim > 2:
raise ValueError("x and y can be no greater than 2-D")
if x.ndim == 1:
x = x[:,np.newaxis]
if y.ndim == 1:
y = y[:,np.newaxis]
return x, y
def _makeline(self, x, y, kw, kwargs):
kw = kw.copy() # Don't modify the original kw.
if not 'color' in kw and not 'color' in kwargs.keys():
kw['color'] = self.color_cycle.next()
# (can't use setdefault because it always evaluates
# its second argument)
seg = mlines.Line2D(x, y,
axes=self.axes,
**kw
)
self.set_lineprops(seg, **kwargs)
return seg
def _makefill(self, x, y, kw, kwargs):
try:
facecolor = kw['color']
except KeyError:
facecolor = self.color_cycle.next()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=kw['closed']
)
self.set_patchprops(seg, **kwargs)
return seg
def _plot_args(self, tup, kwargs):
ret = []
if len(tup) > 1 and is_string_like(tup[-1]):
linestyle, marker, color = _process_plot_format(tup[-1])
tup = tup[:-1]
elif len(tup) == 3:
raise ValueError, 'third arg must be a format string'
else:
linestyle, marker, color = None, None, None
kw = {}
for k, v in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if v is not None:
kw[k] = v
y = np.atleast_1d(tup[-1])
if len(tup) == 2:
x = np.atleast_1d(tup[0])
else:
x = np.arange(y.shape[0], dtype=float)
x, y = self._xy_from_xy(x, y)
if self.command == 'plot':
func = self._makeline
else:
kw['closed'] = kwargs.get('closed', True)
func = self._makefill
ncx, ncy = x.shape[1], y.shape[1]
for j in xrange(max(ncx, ncy)):
seg = func(x[:,j%ncx], y[:,j%ncy], kw, kwargs)
ret.append(seg)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0:
return
if len(remaining) <= 3:
for seg in self._plot_args(remaining, kwargs):
yield seg
return
if is_string_like(remaining[2]):
isplit = 3
else:
isplit = 2
for seg in self._plot_args(remaining[:isplit], kwargs):
yield seg
remaining=remaining[isplit:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
xscale=None,
yscale=None,
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' | 'box-forced']
*alpha* float: the alpha transparency (can be None)
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
self.set_axes_locator(kwargs.get("axes_locator", None))
self.spines = self._gen_axes_spines()
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._rasterization_zorder = -30000
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if xscale:
self.set_xscale(xscale)
if yscale:
self.set_yscale(yscale)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
"""
get the axes bounding box in display space; *args* and
*kwargs* are empty
"""
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.spines['bottom'].register_axis(self.xaxis)
self.spines['top'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
.. note::
This method is primarily used by rectilinear projections
of the :class:`~matplotlib.axes.Axes` class, and is meant
to be overridden by new kinds of projection axes that need
different transformations and limits. (See
:class:`~matplotlib.projections.polar.PolarAxes` for an
example.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor.
# It is assumed that this part will have non-linear components
# (e.g. for a log scale).
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def get_xaxis_transform(self,which='grid'):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which=='grid':
return self._xaxis_transform
elif which=='tick1':
# for cartesian projection, this is bottom spine
return self.spines['bottom'].get_spine_transform()
elif which=='tick2':
# for cartesian projection, this is top spine
return self.spines['top'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_xaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_xaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self,which='grid'):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which=='grid':
return self._yaxis_transform
elif which=='tick1':
# for cartesian projection, this is bottom spine
return self.spines['left'].get_spine_transform()
elif which=='tick2':
# for cartesian projection, this is top spine
return self.spines['right'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_yaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_yaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
try:
line._transformed_path.invalidate()
except AttributeError:
pass
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
"""Make the original position the active position"""
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def set_axes_locator(self, locator):
"""
set axes_locator
ACCEPT : a callable object which takes an axes instance and renderer and
returns a bbox.
"""
self._axes_locator = locator
def get_axes_locator(self):
"""
return axes_locator
"""
return self._axes_locator
def _set_artist_props(self, a):
"""set the boilerplate props for artists added to axes"""
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
"""
Returns a dict whose keys are spine names and values are
Line2D or Patch instances. Each element is used to draw a
spine of the axes.
In the standard axes, this is a single line segment, but in
other projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return {
'left':mspines.Spine.linear_spine(self,'left'),
'right':mspines.Spine.linear_spine(self,'right'),
'bottom':mspines.Spine.linear_spine(self,'bottom'),
'top':mspines.Spine.linear_spine(self,'top'),
}
def cla(self):
"""Clear the current axes"""
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
for name,spine in self.spines.iteritems():
spine.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry()
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False, auto=None)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False, auto=None)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleXon = True
self._autoscaleYon = True
self._xmargin = 0
self._ymargin = 0
self._tight = False
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self._current_image = None # strictly for pyplot via _sci, _gci
self.legend_ = None
self.collections = [] # collection.Collection instances
self.containers = [] #
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='baseline',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def get_frame(self):
raise AttributeError('Axes.frame was removed in favor of Axes.spines')
frame = property(get_frame)
def clear(self):
"""clear the axes"""
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
*clist* is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
self._get_patches_for_fill.set_color_cycle(clist)
def ishold(self):
"""return the HOLD status of the axes"""
return self._hold
def hold(self, b=None):
"""
Call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples::
# toggle hold
hold()
# turn hold on
hold(True)
# turn hold off
hold(False)
When hold is *True*, subsequent plot commands will be added to
the current axes. When hold is *False*, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
============ =====================================
value description
============ =====================================
'box' change physical size of axes
'datalim' change xlim or ylim
'box-forced' same as 'box', but axes can be shared
============ =====================================
'box' does not allow axes sharing, as this can cause
unintended side effect. For cases when sharing axes is
fine, use 'box-forced'.
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' | 'box-forced']
"""
if adjustable in ('box', 'datalim', 'box-forced'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def get_data_ratio_log(self):
"""
Returns the aspect ratio of the raw data in log scale.
Will be used when both axis scales are in log.
"""
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
xsize = max(math.fabs(math.log10(xmax)-math.log10(xmin)), 1e-30)
ysize = max(math.fabs(math.log10(ymax)-math.log10(ymin)), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
"""
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
"""
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if self.name != 'polar':
xscale, yscale = self.get_xscale(), self.get_yscale()
if xscale == "linear" and yscale == "linear":
aspect_scale_mode = "linear"
elif xscale == "log" and yscale == "log":
aspect_scale_mode = "log"
elif (xscale == "linear" and yscale == "log") or \
(xscale == "log" and yscale == "linear"):
if aspect is not "auto":
warnings.warn(
'aspect is not supported for Axes with xscale=%s, yscale=%s' \
% (xscale, yscale))
aspect = "auto"
else: # some custom projections have their own scales.
pass
else:
aspect_scale_mode = "linear"
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable in ['box', 'box-forced']:
if aspect_scale_mode == "log":
box_aspect = A * self.get_data_ratio_log()
else:
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
if aspect_scale_mode == "log":
xmin, xmax = math.log10(xmin), math.log10(xmax)
ymin, ymax = math.log10(ymin), math.log10(ymax)
xsize = max(math.fabs(xmax-xmin), 1e-30)
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
if aspect_scale_mode == "log":
dL = self.dataLim
dL_width = math.log10(dL.x1) - math.log10(dL.x0)
dL_height = math.log10(dL.y1) - math.log10(dL.y0)
xr = 1.05 * dL_width
yr = 1.05 * dL_height
else:
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
if aspect_scale_mode == "log":
self.set_ybound((10.**y0, 10.**y1))
else:
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
if aspect_scale_mode == "log":
self.set_xbound((10.**x0, 10.**x1))
else:
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
"""
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot. For details, see
:func:`~matplotlib.pyplot.axis`.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
"""
if len(v) == 0 and len(kwargs) == 0:
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view(tight=False)
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
emit = kwargs.get('emit', True)
try:
v[0]
except IndexError:
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
auto = False # turn off autoscaling, unless...
if xmin is None and xmax is None:
auto = None # leave autoscaling state alone
xmin, xmax = self.set_xlim(xmin, xmax, emit=emit, auto=auto)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
auto = False # turn off autoscaling, unless...
if ymin is None and ymax is None:
auto = None # leave autoscaling state alone
ymin, ymax = self.set_ylim(ymin, ymax, emit=emit, auto=auto)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]], emit=emit, auto=False)
self.set_ylim([v[2], v[3]], emit=emit, auto=False)
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
"""Return the axes Rectangle frame"""
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
"""Return the legend.Legend instance, or None if no legend is defined"""
return self.legend_
def get_images(self):
"""return a list of Axes images contained by the Axes"""
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
"""Return a list of lines contained by the Axes"""
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
"""Return the XAxis instance"""
return self.xaxis
def get_xgridlines(self):
"""Get the x grid lines as a list of Line2D instances"""
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
"""Get the xtick lines as a list of Line2D instances"""
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
"""Return the YAxis instance"""
return self.yaxis
def get_ygridlines(self):
"""Get the y grid lines as a list of Line2D instances"""
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
"""Get the ytick lines as a list of Line2D instances"""
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def _sci(self, im):
"""
helper for :func:`~matplotlib.pyplot.sci`;
do not use elsewhere.
"""
if isinstance(im, matplotlib.contour.ContourSet):
if im.collections[0] not in self.collections:
raise ValueError(
"ContourSet must be in current Axes")
elif im not in self.images and im not in self.collections:
raise ValueError(
"Argument must be an image, collection, or ContourSet in this Axes")
self._current_image = im
def _gci(self):
"""
Helper for :func:`~matplotlib.pyplot.gci`;
do not use elsewhere.
"""
return self._current_image
def has_data(self):
"""
Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
"""
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
"""
Add any :class:`~matplotlib.artist.Artist` to the axes.
Returns the artist.
"""
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
return a
def add_collection(self, collection, autolim=True):
"""
Add a :class:`~matplotlib.collections.Collection` instance
to the axes.
Returns the collection.
"""
label = collection.get_label()
if not label:
collection.set_label('_collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
if collection.get_clip_path() is None:
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
return collection
def add_line(self, line):
"""
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
Returns the line.
"""
self._set_artist_props(line)
if line.get_clip_path() is None:
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
return line
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
Returns the patch.
"""
self._set_artist_props(p)
if p.get_clip_path() is None:
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
return p
def _update_patch_limits(self, patch):
"""update the data limits for patch *p*"""
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
# cannot check for '==0' since unitized data may not compare to zero
if (isinstance(patch, mpatches.Rectangle) and
((not patch.get_width()) or (not patch.get_height()))):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
"""
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
Returns the table.
"""
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
return tab
def add_container(self, container):
"""
Add a :class:`~matplotlib.container.Container` instance
to the axes.
Returns the collection.
"""
label = container.get_label()
if not label:
container.set_label('_container%d'%len(self.containers))
self.containers.append(container)
container.set_remove_method(lambda h: self.containers.remove(container))
return container
def relim(self):
"""
Recompute the data limits based on current artists.
At present, :class:`~matplotlib.collections.Collection`
instances are not supported.
"""
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
"""Update the data lim bbox with seq of xy tups or equiv. 2-D array"""
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
"""Update the data lim bbox with seq of xy tups"""
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
"""
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
"""
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
"""Look for unit *kwargs* and update the axis instances as necessary"""
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if self.name == 'polar':
xunits = kwargs.pop( 'thetaunits', xunits )
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if self.name == 'polar':
yunits = kwargs.pop( 'runits', yunits )
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
"""
Return *True* if the given *mouseevent* (in display coords)
is in the Axes
"""
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied for both axes on plot commands
"""
return self._autoscaleXon and self._autoscaleYon
def get_autoscalex_on(self):
"""
Get whether autoscaling for the x-axis is applied on plot commands
"""
return self._autoscaleXon
def get_autoscaley_on(self):
"""
Get whether autoscaling for the y-axis is applied on plot commands
"""
return self._autoscaleYon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleXon = b
self._autoscaleYon = b
def set_autoscalex_on(self, b):
"""
Set whether autoscaling for the x-axis is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleXon = b
def set_autoscaley_on(self, b):
"""
Set whether autoscaling for the y-axis is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleYon = b
def set_xmargin(self, m):
"""
Set padding of X data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
"""
if m < 0 or m > 1:
raise ValueError("margin must be in range 0 to 1")
self._xmargin = m
def set_ymargin(self, m):
"""
Set padding of Y data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
"""
if m < 0 or m > 1:
raise ValueError("margin must be in range 0 to 1")
self._ymargin = m
def margins(self, *args, **kw):
"""
Convenience method to set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin
::
margins(margin)
margins(xmargin, ymargin)
margins(x=xmargin, y=ymargin)
margins(..., tight=False)
All three forms above set the xmargin and ymargin parameters.
All keyword parameters are optional. A single argument
specifies both xmargin and ymargin. The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
"""
if not args and not kw:
return self._xmargin, self._ymargin
tight = kw.pop('tight', True)
mx = kw.pop('x', None)
my = kw.pop('y', None)
if len(args) == 1:
mx = my = args[0]
elif len(args) == 2:
mx, my = args
else:
raise ValueError("more than two arguments were supplied")
if mx is not None:
self.set_xmargin(mx)
if my is not None:
self.set_ymargin(my)
scalex = (mx is not None)
scaley = (my is not None)
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def set_rasterization_zorder(self, z):
"""
Set zorder value below which artists will be rasterized
"""
self._rasterization_zorder = z
def get_rasterization_zorder(self):
"""
Get zorder value below which artists will be rasterized
"""
return self._rasterization_zorder
def autoscale(self, enable=True, axis='both', tight=None):
"""
Convenience method for simple axis view autoscaling.
It turns autoscaling on or off, and then,
if autoscaling for either axis is on, it performs
the autoscaling on the specified axis or axes.
*enable*: [True | False | None]
True (default) turns autoscaling on, False turns it off.
None leaves the autoscaling state unchanged.
*axis*: ['x' | 'y' | 'both']
which axis to operate on; default is 'both'
*tight*: [True | False | None]
If True, set view limits to data limits;
if False, let the locator and margins expand the view limits;
if None, use tight scaling if the only artist is an image,
otherwise treat *tight* as False.
The *tight* setting is retained for future autoscaling
until it is explicitly changed.
Returns None.
"""
if enable is None:
scalex = True
scaley = True
else:
scalex = False
scaley = False
if axis in ['x', 'both']:
self._autoscaleXon = bool(enable)
scalex = self._autoscaleXon
if axis in ['y', 'both']:
self._autoscaleYon = bool(enable)
scaley = self._autoscaleYon
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def autoscale_view(self, tight=None, scalex=True, scaley=True):
"""
Autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
The data limits are not updated automatically when artist
data are changed after the artist has been added to an
Axes instance. In that case, use
:meth:`matplotlib.axes.Axes.relim`
prior to calling autoscale_view.
"""
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
xlocator = self.xaxis.get_major_locator()
try:
# e.g. DateLocator has its own nonsingular()
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
# Default nonsingular for, e.g., MaxNLocator
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
#### Drawing
@allow_rasterization
def draw(self, renderer=None, inframe=False):
"""Draw everything (plot lines, axes, labels)"""
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
artists = []
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.extend(self.spines.itervalues())
dsu = [ (a.zorder, a) for a in artists
if not a.get_animated() ]
# add images to dsu if the backend support compositing.
# otherwise, does the manaul compositing without adding images to dsu.
if len(self.images)<=1 or renderer.option_image_nocomposite():
dsu.extend([(im.zorder, im) for im in self.images])
_do_composite = False
else:
_do_composite = True
dsu.sort(key=itemgetter(0))
# rasterize artists with negative zorder
# if the minimum zorder is negative, start rasterization
rasterization_zorder = self._rasterization_zorder
if len(dsu) > 0 and dsu[0][0] < rasterization_zorder:
renderer.start_rasterizing()
dsu_rasterized = [l for l in dsu if l[0] < rasterization_zorder]
dsu = [l for l in dsu if l[0] >= rasterization_zorder]
else:
dsu_rasterized = []
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
if _do_composite:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
zorder_images = [(im.zorder, im) for im in self.images \
if im.get_visible()]
zorder_images.sort(key=lambda x: x[0])
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0) for z,im in zorder_images]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
gc = renderer.new_gc()
gc.set_clip_rectangle(self.bbox)
gc.set_clip_path(mtransforms.TransformedPath(
self.patch.get_path(),
self.patch.get_transform()))
renderer.draw_image(gc, round(l), round(b), im)
gc.restore()
if dsu_rasterized:
for zorder, a in dsu_rasterized:
a.draw(renderer)
renderer.stop_rasterizing()
for zorder, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort(key=lambda x: x[0])
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
@docstring.dedent_interpd
def grid(self, b=None, which='major', axis='both', **kwargs):
"""
Call signature::
grid(self, b=None, which='major', axis='both', **kwargs)
Set the axes grids on or off; *b* is a boolean. (For MATLAB
compatibility, *b* may also be a string, 'on' or 'off'.)
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*.
*which* can be 'major' (default), 'minor', or 'both' to control
whether major tick grids, minor tick grids, or both are affected.
*axis* can be 'both' (default), 'x', or 'y' to control which
set of gridlines are drawn.
*kwargs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs):
b = True
b = _string_to_bool(b)
if axis == 'x' or axis == 'both':
self.xaxis.grid(b, which=which, **kwargs)
if axis == 'y' or axis == 'both':
self.yaxis.grid(b, which=which, **kwargs)
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =========================================
Keyword Description
============ =========================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*useOffset* [True | False | offset]; if True,
the offset will be calculated as needed;
if False, no offset will be used; if a
numeric offset is specified, it will be
used.
*axis* [ 'x' | 'y' | 'both' ]
*useLocale* If True, format the number according to
the current locale. This affects things
such as the character used for the
decimal separator. If False, use
C-style (English) formatting. The
default setting is controlled by the
axes.formatter.use_locale rcparam.
============ =========================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
useOffset = kwargs.pop('useOffset', None)
useLocale = kwargs.pop('useLocale', None)
axis = kwargs.pop('axis', 'both').lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useOffset(useOffset)
if useLocale is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useLocale(useLocale)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useLocale(useLocale)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs):
"""
Convenience method for controlling tick locators.
Keyword arguments:
*axis*
['x' | 'y' | 'both'] Axis on which to operate;
default is 'both'.
*tight*
[True | False | None] Parameter passed to :meth:`autoscale_view`.
Default is None, for no change.
Remaining keyword arguments are passed to directly to the
:meth:`~matplotlib.ticker.MaxNLocator.set_params` method.
Typically one might want to reduce the maximum number
of ticks and use tight bounds when plotting small
subplots, for example::
ax.locator_params(tight=True, nbins=4)
Because the locator is involved in autoscaling,
:meth:`autoscale_view` is called automatically after
the parameters are changed.
This presently works only for the
:class:`~matplotlib.ticker.MaxNLocator` used
by default on linear axes, but it may be generalized.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y)
def tick_params(self, axis='both', **kwargs):
"""
Convenience method for changing the appearance of ticks and
tick labels.
Keyword arguments:
*axis* : ['x' | 'y' | 'both']
Axis on which to operate; default is 'both'.
*reset* : [True | False]
If *True*, set all parameters to defaults
before processing other keyword arguments. Default is
*False*.
*which* : ['major' | 'minor' | 'both']
Default is 'major'; apply arguments to *which* ticks.
*direction* : ['in' | 'out']
Puts ticks inside or outside the axes.
*length*
Tick length in points.
*width*
Tick width in points.
*color*
Tick color; accepts any mpl color spec.
*pad*
Distance in points between tick and label.
*labelsize*
Tick label font size in points or as a string (e.g. 'large').
*labelcolor*
Tick label color; mpl color spec.
*colors*
Changes the tick color and the label color to the same value:
mpl color spec.
*zorder*
Tick and label zorder.
*bottom*, *top*, *left*, *right* : [bool | 'on' | 'off']
controls whether to draw the respective ticks.
*labelbottom*, *labeltop*, *labelleft*, *labelright*
Boolean or ['on' | 'off'], controls whether to draw the
respective tick labels.
Example::
ax.tick_params(direction='out', length=6, width=2, colors='r')
This will make all major ticks be red, pointing out of the box,
and with dimensions 6 points by 2 points. Tick labels will
also be red.
"""
if axis in ['x', 'both']:
xkw = dict(kwargs)
xkw.pop('left', None)
xkw.pop('right', None)
xkw.pop('labelleft', None)
xkw.pop('labelright', None)
self.xaxis.set_tick_params(**xkw)
if axis in ['y', 'both']:
ykw = dict(kwargs)
ykw.pop('top', None)
ykw.pop('bottom', None)
ykw.pop('labeltop', None)
ykw.pop('labelbottom', None)
self.yaxis.set_tick_params(**ykw)
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
"""Return the axis background color"""
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
"""Returns *True* if the x-axis is inverted."""
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the _autoscaleXon attribute.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower, auto=None)
else:
self.set_xlim(lower, upper, auto=None)
else:
if lower < upper:
self.set_xlim(lower, upper, auto=None)
else:
self.set_xlim(upper, lower, auto=None)
def get_xlim(self):
"""
Get the x-axis range [*left*, *right*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, left=None, right=None, emit=True, auto=False, **kw):
"""
Call signature::
set_xlim(self, *args, **kwargs):
Set the data limits for the xaxis
Examples::
set_xlim((left, right))
set_xlim(left, right)
set_xlim(left=1) # right unchanged
set_xlim(right=1) # left unchanged
Keyword arguments:
*left*: scalar
The left xlim; *xmin*, the previous name, may still be used
*right*: scalar
The right xlim; *xmax*, the previous name, may still be used
*emit*: [ *True* | *False* ]
Notify observers of limit change
*auto*: [ *True* | *False* | *None* ]
Turn *x* autoscaling on (*True*), off (*False*; default),
or leave unchanged (*None*)
Note, the *left* (formerly *xmin*) value may be greater than
the *right* (formerly *xmax*).
For example, suppose *x* is years before present.
Then one might use::
set_ylim(5000, 0)
so 5000 years ago is on the left of the plot and the
present is on the right.
Returns the current xlimits as a length 2 tuple
ACCEPTS: length 2 sequence of floats
"""
if 'xmin' in kw:
left = kw.pop('xmin')
if 'xmax' in kw:
right = kw.pop('xmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if right is None and iterable(left):
left,right = left
self._process_unit_info(xdata=(left, right))
if left is not None:
left = self.convert_xunits(left)
if right is not None:
right = self.convert_xunits(right)
old_left, old_right = self.get_xlim()
if left is None: left = old_left
if right is None: right = old_right
if left==right:
warnings.warn(('Attempting to set identical left==right results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return left, right
def get_xscale(self):
return self.xaxis.get_scale()
get_xscale.__doc__ = "Return the xaxis scale string: %s""" % (
", ".join(mscale.get_scale_names()))
@docstring.dedent_interpd
def set_xscale(self, value, **kwargs):
"""
Call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view(scaley=False)
self._update_transScale()
def get_xticks(self, minor=False):
"""Return the x ticks as a list of locations"""
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
"""
Get the xtick labels as a list of :class:`~matplotlib.text.Text`
instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
"""
Get the x minor tick labels as a list of
:class:`matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
"""
Get the x tick labels as a list of :class:`~matplotlib.text.Text`
instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
@docstring.dedent_interpd
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
def invert_yaxis(self):
"Invert the y-axis."
bottom, top = self.get_ylim()
self.set_ylim(top, bottom)
def yaxis_inverted(self):
"""Returns *True* if the y-axis is inverted."""
bottom, top = self.get_ylim()
return top < bottom
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
bottom, top = self.get_ylim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_ybound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the _autoscaleYon attribute.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower, auto=None)
else:
self.set_ylim(lower, upper, auto=None)
else:
if lower < upper:
self.set_ylim(lower, upper, auto=None)
else:
self.set_ylim(upper, lower, auto=None)
def get_ylim(self):
"""
Get the y-axis range [*bottom*, *top*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Call signature::
set_ylim(self, *args, **kwargs):
Set the data limits for the yaxis
Examples::
set_ylim((bottom, top))
set_ylim(bottom, top)
set_ylim(bottom=1) # top unchanged
set_ylim(top=1) # bottom unchanged
Keyword arguments:
*bottom*: scalar
The bottom ylim; the previous name, *ymin*, may still be used
*top*: scalar
The top ylim; the previous name, *ymax*, may still be used
*emit*: [ *True* | *False* ]
Notify observers of limit change
*auto*: [ *True* | *False* | *None* ]
Turn *y* autoscaling on (*True*), off (*False*; default),
or leave unchanged (*None*)
Note, the *bottom* (formerly *ymin*) value may be greater than
the *top* (formerly *ymax*).
For example, suppose *y* is depth in the ocean.
Then one might use::
set_ylim(5000, 0)
so 5000 m depth is at the bottom of the plot and the
surface, 0 m, is at the top.
Returns the current ylimits as a length 2 tuple
ACCEPTS: length 2 sequence of floats
"""
if 'ymin' in kw:
bottom = kw.pop('ymin')
if 'ymax' in kw:
top = kw.pop('ymax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and iterable(bottom):
bottom,top = bottom
if bottom is not None:
bottom = self.convert_yunits(bottom)
if top is not None:
top = self.convert_yunits(top)
old_bottom, old_top = self.get_ylim()
if bottom is None: bottom = old_bottom
if top is None: top = old_top
if bottom==top:
warnings.warn(('Attempting to set identical bottom==top results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return bottom, top
def get_yscale(self):
return self.yaxis.get_scale()
get_yscale.__doc__ = "Return the yaxis scale string: %s""" % (
", ".join(mscale.get_scale_names()))
@docstring.dedent_interpd
def set_yscale(self, value, **kwargs):
"""
Call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view(scalex=False)
self._update_transScale()
def get_yticks(self, minor=False):
"""Return the y ticks as a list of locations"""
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ *False* | *True* ]
Sets the minor ticks if *True*
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
"""
Get the major y tick labels as a list of
:class:`~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
"""
Get the minor y tick labels as a list of
:class:`~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
"""
Get the y tick labels as a list of :class:`~matplotlib.text.Text`
instances
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
@docstring.dedent_interpd
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the y tick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
def xaxis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
"""
# should be enough to inform the unit conversion interface
# dates are coming in
self.xaxis.axis_date(tz)
def yaxis_date(self, tz=None):
"""
Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
"""
self.yaxis.axis_date(tz)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
"""Return a format string formatting the *x*, *y* coord"""
if x is None:
xs = '???'
else:
xs = self.format_xdata(x)
if y is None:
ys = '???'
else:
ys = self.format_ydata(y)
return 'x=%s y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
"""
return True
def can_pan(self) :
"""
Return *True* if this axes supports any pan/zoom button functionality.
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ *True* | *False* ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = np.array([p.x, p.y])
oldpoints = p.lim.transformed(p.trans)
newpoints = start + alpha * (oldpoints - start)
result = mtransforms.Bbox(newpoints) \
.transformed(p.trans_inverse)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
Return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
"""disconnect from the Axes event."""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
"""return a list of child artists"""
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.extend(self.spines.itervalues())
return children
def contains(self,mouseevent):
"""
Test whether the mouse event occured in the axes.
Returns *True* / *False*, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def contains_point(self, point):
"""
Returns *True* if the point (tuple of x,y) is inside the axes
(the area defined by the its patch). A pixel coordinate is
required.
"""
return self.patch.contains_point(point, radius=1.0)
def pick(self, *args):
"""
Call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
@docstring.dedent_interpd
def set_title(self, label, fontdict=None, **kwargs):
"""
Call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'baseline',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Call signature::
set_xlabel(xlabel, fontdict=None, labelpad=None, **kwargs)
Set the label for the xaxis.
*labelpad* is the spacing in points between the label and the x-axis
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
if labelpad is not None: self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Call signature::
set_ylabel(ylabel, fontdict=None, labelpad=None, **kwargs)
Set the label for the yaxis
*labelpad* is the spacing in points between the label and the y-axis
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
if labelpad is not None: self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
@docstring.dedent_interpd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ *False* | *True* ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'baseline',
'horizontalalignment' : 'left',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
@docstring.dedent_interpd
def annotate(self, *args, **kwargs):
"""
Call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
a._remove_method = lambda h: self.texts.remove(h)
return a
#### Lines and spans
@docstring.dedent_interpd
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
Call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange::
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axhline generates its own transform.")
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info( ydata=y, kwargs=kwargs )
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
@docstring.dedent_interpd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
Call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
ylim settings, even if you change them, eg. with the
:meth:`set_ylim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange::
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axvline generates its own transform.")
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info( xdata=x, kwargs=kwargs )
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
@docstring.dedent_interpd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
Call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes::
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
self.autoscale_view(scalex=False)
return p
@docstring.dedent_interpd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
Call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes::
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
self.autoscale_view(scaley=False)
return p
@docstring.dedent
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
# process the unit information
self._process_unit_info( [xmin, xmax], y, kwargs=kwargs )
y = self.convert_yunits( y )
xmin = self.convert_xunits(xmin)
xmax = self.convert_xunits(xmax)
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors* :
A line collection's color args, either a single color
or a ``len(x)`` list of colors
*linestyles* : [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=[ymin, ymax], kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
#### Basic plotting
@docstring.dedent_interpd
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
@docstring.dedent_interpd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
Call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ *None* | timezone string | :class:`tzinfo` instance]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ *True* | *False* ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ *False* | *True* ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.dates.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.dates.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.dates.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.dates.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates` for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange` for help on creating the required
floating point dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
@docstring.dedent_interpd
def loglog(self, *args, **kwargs):
"""
Call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
Base of the *x*/*y* logarithm
*subsx*/*subsy*: [ *None* | sequence ]
The location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
*nonposx*/*nonposy*: ['mask' | 'clip' ]
Non-positive values in *x* or *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogx(self, *args, **kwargs):
"""
Call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
Base of the *x* logarithm
*subsx*: [ *None* | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
*nonposx*: [ 'mask' | 'clip' ]
Non-positive values in *x* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ *None* | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
*nonposy*: [ 'mask' | 'clip' ]
Non-positive values in *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def acorr(self, x, **kwargs):
"""
Call signature::
acorr(x, normed=True, detrend=mlab.detrend_none, usevlines=True,
maxlags=10, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
``(2*len(x)-1)`` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`
For documentation on valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` is top graph, and
:func:`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
@docstring.dedent_interpd
def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
Call signature::
xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs)
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` is top graph, and
:func:`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
def _get_legend_handles(self, legend_handler_map=None):
"return artists that will be used as handles for legend"
handles_original = self.lines + self.patches + \
self.collections + self.containers
# collections
handler_map = mlegend.Legend.get_default_handler_map()
if legend_handler_map is not None:
handler_map = handler_map.copy()
handler_map.update(legend_handler_map)
handles = []
for h in handles_original:
if h.get_label() == "_nolegend_": #.startswith('_'):
continue
if mlegend.Legend.get_legend_handler(handler_map, h):
handles.append(h)
return handles
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
handles = []
labels = []
for handle in self._get_legend_handles(legend_handler_map):
label = handle.get_label()
#if (label is not None and label != '' and not label.startswith('_')):
if label and not label.startswith('_'):
handles.append(handle)
labels.append(label)
return handles, labels
def legend(self, *args, **kwargs):
"""
Call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
Users can specify any arbitrary location for the legend using the
*bbox_to_anchor* keyword argument. bbox_to_anchor can be an instance
of BboxBase(or its derivatives) or a tuple of 2 or 4 floats.
For example,
loc = 'upper right', bbox_to_anchor = (0.5, 0.5)
will place the legend so that the upper right corner of the legend at
the center of the axes.
The legend location can be specified in other coordinate, by using the
*bbox_transform* keyword.
The loc itslef can be a 2-tuple giving x,y of the lower-left corner of
the legend in axes coords (*bbox_to_anchor* is ignored).
Keyword arguments:
*prop*: [ *None* | FontProperties | dict ]
A :class:`matplotlib.font_manager.FontProperties`
instance. If *prop* is a dictionary, a new instance will be
created with *prop*. If *None*, use rc settings.
*numpoints*: integer
The number of points in the legend for line
*scatterpoints*: integer
The number of points in the legend for scatter plot
*scatteroffsets*: list of floats
a list of yoffsets for scatter symbols in legend
*markerscale*: [ *None* | scalar ]
The relative size of legend markers vs. original. If *None*,
use rc settings.
*frameon*: [ *True* | *False* ]
if *True*, draw a frame around the legend.
The default is set by the rcParam 'legend.frameon'
*fancybox*: [ *None* | *False* | *True* ]
if *True*, draw a frame with a round fancybox. If *None*,
use rc settings
*shadow*: [ *None* | *False* | *True* ]
If *True*, draw a shadow behind legend. If *None*,
use rc settings.
*ncol* : integer
number of columns. default is 1
*mode* : [ "expand" | *None* ]
if mode is "expand", the legend will be horizontally expanded
to fill the axes area (or *bbox_to_anchor*)
*bbox_to_anchor* : an instance of BboxBase or a tuple of 2 or 4 floats
the bbox that the legend will be anchored.
*bbox_transform* : [ an instance of Transform | *None* ]
the transform for the bbox. transAxes if *None*.
*title* : string
the legend title
Padding and spacing between various elements use following
keywords parameters. These values are measure in font-size
units. E.g., a fontsize of 10 points and a handlelength=5
implies a handlelength of 50 points. Values from rcParams
will be used if None.
================ ==================================================================
Keyword Description
================ ==================================================================
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
.. Note:: Not all kinds of artist are supported by the legend command.
See LINK (FIXME) for details.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
.. seealso::
:ref:`plotting-guide-legend`.
"""
if len(args)==0:
handles, labels = self.get_legend_handles_labels()
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(self._get_legend_handles(),
labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(self._get_legend_handles(),
labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
# Why do we need to call "flatten" here? -JJL
# handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
"""
Call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i+1]
If 'post', that interval has level y[i]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
"""
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
@docstring.dedent_interpd
def bar(self, left, height, width=0.8, bottom=None, **kwargs):
"""
Call signature::
bar(left, height, width=0.8, bottom=0, **kwargs)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*error_kw* dictionary of kwargs to be passed to
errorbar method. *ecolor* and *capsize*
may be specified here rather than as
independent kwargs.
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: *xerr* and *yerr* are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
color = kwargs.pop('color', None)
edgecolor = kwargs.pop('edgecolor', None)
linewidth = kwargs.pop('linewidth', None)
# Because xerr and yerr will be passed to errorbar,
# most dimension checking and processing will be left
# to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', dict())
ecolor = kwargs.pop('ecolor', None)
capsize = kwargs.pop('capsize', 3)
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
align = kwargs.pop('align', 'edge')
orientation = kwargs.pop('orientation', 'vertical')
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) == 0: # until to_rgba_array is changed
color = [[0,0,0,0]]
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) == 0: # until to_rgba_array is changed
edgecolor = [[0,0,0,0]]
if len(edgecolor) < nbars:
edgecolor *= nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "incompatible sizes: argument 'left' must be length %d or scalar" % nbars
assert len(height)==nbars, ("incompatible sizes: argument 'height' must be length %d or scalar" %
nbars)
assert len(width)==nbars, ("incompatible sizes: argument 'width' must be length %d or scalar" %
nbars)
assert len(bottom)==nbars, ("incompatible sizes: argument 'bottom' must be length %d or scalar" %
nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
left = self.convert_xunits( left )
width = self.convert_xunits( width )
if xerr is not None:
xerr = self.convert_xunits( xerr )
if self.yaxis is not None:
bottom = self.convert_yunits( bottom )
height = self.convert_yunits( height )
if yerr is not None:
yerr = self.convert_yunits( yerr )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label='_nolegend_'
)
r.update(kwargs)
r.get_path()._interpolation_steps = 100
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
if "label" not in error_kw:
error_kw["label"] = '_nolegend_'
errorbar = self.errorbar(x, y,
yerr=yerr, xerr=xerr,
fmt=None, **error_kw)
else:
errorbar = None
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin([w for w in width if w > 0])
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin([h for h in height if h > 0])
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
bar_container = BarContainer(patches, errorbar, label=label)
self.add_container(bar_container)
return bar_container
@docstring.dedent_interpd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
Call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
@docstring.dedent_interpd
def broken_barh(self, xranges, yrange, **kwargs):
"""
Call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-',
bottom=None, label=None):
"""
Call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
This `document <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/stem_plot.py
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt, label="_nolegend_")
if bottom is None:
bottom = 0
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [bottom, thisy], linefmt,
label="_nolegend_")
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [bottom,bottom],
basefmt, label="_nolegend_")
self.hold(remember_hold)
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_container(stem_container)
return stem_container
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
Call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ *None* | len(x) sequence ]
If not *None*, is a ``len(x)`` array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ *None* | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ *None* | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ *None* | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ *False* | *True* ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is *None*, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
shad.set_label('_nolegend_')
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
@docstring.dedent_interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
Call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, or 2xN array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a sequence of shape 2xN, errorbars are drawn at -row1 and
+row2
*fmt*: '-'
The plot format symbol. If *fmt* is *None*, only the
errorbars are plotted. This is used for adding
errorbars to a bar plot, for example.
*ecolor*: [ *None* | mpl color ]
a matplotlib color arg which gives the color the errorbar lines;
if *None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ *True* | *False* ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims* / *uplims* / *xlolims* / *xuplims*: [ *False* | *True* ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Returns (*plotline*, *caplines*, *barlinecols*):
*plotline*: :class:`~matplotlib.lines.Line2D` instance
*x*, *y* plot markers and/or line
*caplines*: list of error bar cap
:class:`~matplotlib.lines.Line2D` instances
*barlinecols*: list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
holdstate = self._hold
self._hold = True
label = kwargs.pop("label", None)
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,label="_nolegend_", **kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines.color_cycle.next()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer((l0, tuple(caplines), tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None):
"""
Call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None, patch_artist=False)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Function Arguments:
*x* :
Array or a sequence of vectors.
*notch* : [ 0 (default) | 1]
If 0, produce a rectangular box plot.
If 1, produce a notched box plot
*sym* :
(default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
*vert* : [1 (default) | 0]
If 1, make the boxes vertical.
If 0, make horizontal boxes. (Odd, but kept for compatibility
with MATLAB boxplots)
*whis* : (default 1.5)
Defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*bootstrap* : [ *None* (default) | integer ]
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If *None*, no
bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation
(see McGill, R., Tukey, J.W., and Larsen, W.A.,
1978, and Kendall and Stuart, 1967). Otherwise, bootstrap
specifies the number of times to bootstrap the median to
determine its 95% confidence intervals. Values between 1000
and 10000 are recommended.
*positions* : (default 1,2,...,n)
Sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* : [ scalar | array ]
Either a scalar or a vector to set the width of each box.
The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*patch_artist* : boolean
If *False* (default), produce boxes with the
:class:`~matplotlib.lines.Line2D` artist.
If *True*, produce boxes with the
:class:`~matplotlib.patches.Patch` artist.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`~matplotlib.lines.Line2D`
instances created (unless *patch_artist* was *True*. See above.).
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
if row==0:
# no data, skip this position
continue
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
def bootstrapMedian(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentile = [2.5,97.5]
estimate = np.zeros(N)
for n in range(N):
bsIndex = np.random.random_integers(0,M-1,M)
bsData = data[bsIndex]
estimate[n] = mlab.prctile(bsData, 50)
CI = mlab.prctile(estimate, percentile)
return CI
# get conf. intervals around median
CI = bootstrapMedian(d, N=bootstrap)
notch_max = CI[1]
notch_min = CI[0]
else:
# Estimate notch locations using Gaussian-based
# asymptotic approximation.
#
# For discussion: McGill, R., Tukey, J.W.,
# and Larsen, W.A. (1978) "Variations of
# Boxplots", The American Statistician, 32:12-16.
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
def to_vc(xs,ys):
# convert arguments to verts and codes
verts = []
#codes = []
for xi,yi in zip(xs,ys):
verts.append( (xi,yi) )
verts.append( (0,0) ) # ignored
codes = [mpath.Path.MOVETO] + \
[mpath.Path.LINETO]*(len(verts)-2) + \
[mpath.Path.CLOSEPOLY]
return verts,codes
def patch_list(xs,ys):
verts,codes = to_vc(xs,ys)
path = mpath.Path( verts, codes )
patch = mpatches.PathPatch(path)
self.add_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
def dopatch(xs,ys):
return patch_list(xs,ys)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
def dopatch(xs,ys):
xs,ys = ys,xs # flip X, Y
return patch_list(xs,ys)
if patch_artist:
median_color = 'k'
else:
median_color = 'r'
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
if patch_artist:
boxes.extend(dopatch(box_x, box_y))
else:
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, median_color+'-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
@docstring.dedent_interpd
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
Call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are
converted to 1-D sequences which must be of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
%(MarkerTable)s
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance or registered
name. If *None*, defaults to rc ``image.cmap``. *cmap* is
only used if *c* is an array of floats.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are *None*, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: ``0 <= scalar <= 1`` or *None*
The alpha value for the patches
*linewidths*: [ *None* | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
The string 'none' to plot faces with no outlines
*facecolors*:
The string 'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
c_is_stringy = is_string_like(c) or is_sequence_of_strings(c)
if not c_is_stringy:
c = np.asanyarray(c)
if c.size == x.size:
c = np.ma.ravel(c)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
if c_is_stringy:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if c.size == x.size:
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = 'face'
collection = mcoll.PathCollection(
(path,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0 :
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0 :
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear', extent = None,
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none',
reduce_C_function = np.mean, mincnt=None, marginals=False,
**kwargs):
"""
Call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none'
reduce_C_function = np.mean, mincnt=None, marginals=True
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is *None*
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ *None* | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
*mincnt*: [ *None* | a positive integer ]
If not *None*, only display cells with more than *mincnt*
number of points in the cell
*marginals*: [ *True* | *False* ]
if marginals is *True*, plot the marginal density as
colormapped rectagles along the bottom of the x-axis and
left of the y-axis
*extent*: [ *None* | scalars (left, right, bottom, top) ]
The limits of the bins. The default assigns the limits
based on gridsize, x, y, xscale and yscale.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ *None* | Colormap ]
a :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ *None* | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin* / *vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar between 0 and 1, or *None*
the alpha value for the patches
*linewidths*: [ *None* | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ *None* | ``'none'`` | mpl color | color sequence ]
If ``'none'``, draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collections.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon. If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
if np.any(x <= 0.0):
raise ValueError("x contains non-positive values, so can not"
" be log-scaled")
x = np.log10(x)
if yscale=='log':
if np.any(y <= 0.0):
raise ValueError("y contains non-positive values, so can not"
" be log-scaled")
y = np.log10(y)
if extent is not None:
xmin, xmax, ymin, ymax = extent
else:
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
if marginals:
xorig = x.copy()
yorig = y.copy()
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]]+=1
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]]+=1
# threshold
if mincnt is not None:
for i in xrange(nx1):
for j in xrange(ny1):
if lattice1[i,j]<mincnt:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
if lattice2[i,j]<mincnt:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
else:
if mincnt is None:
mincnt = 0
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals)>mincnt:
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals)>mincnt:
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
if isinstance(norm, mcolors.LogNorm):
if (accum==0).any():
# make sure we have not zeros
accum += 1
# autoscale the norm with curren accum values if it hasn't
# been set
if norm is not None:
if norm.vmin is None and norm.vmax is None:
norm.autoscale(accum)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view(tight=True)
# add the collection last
self.add_collection(collection)
if not marginals:
return collection
if C is None:
C = np.ones(len(x))
def coarse_bin(x, y, coarse):
ind = coarse.searchsorted(x).clip(0, len(coarse)-1)
mus = np.zeros(len(coarse))
for i in range(len(coarse)):
mu = reduce_C_function(y[ind==i])
mus[i] = mu
return mus
coarse = np.linspace(xmin, xmax, gridsize)
xcoarse = coarse_bin(xorig, C, coarse)
valid = ~np.isnan(xcoarse)
verts, values = [], []
for i,val in enumerate(xcoarse):
thismin = coarse[i]
if i<len(coarse)-1:
thismax = coarse[i+1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]: continue
verts.append([(thismin, 0), (thismin, 0.05), (thismax, 0.05), (thismax, 0)])
values.append(val)
values = np.array(values)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
hbar.set_array(values)
hbar.set_cmap(cmap)
hbar.set_norm(norm)
hbar.set_alpha(alpha)
hbar.update(kwargs)
self.add_collection(hbar)
coarse = np.linspace(ymin, ymax, gridsize)
ycoarse = coarse_bin(yorig, C, coarse)
valid = ~np.isnan(ycoarse)
verts, values = [], []
for i,val in enumerate(ycoarse):
thismin = coarse[i]
if i<len(coarse)-1:
thismax = coarse[i+1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]: continue
verts.append([(0, thismin), (0.0, thismax), (0.05, thismax), (0.05, thismin)])
values.append(val)
values = np.array(values)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
vbar.set_array(values)
vbar.set_cmap(cmap)
vbar.set_norm(norm)
vbar.set_alpha(alpha)
vbar.update(kwargs)
self.add_collection(vbar)
collection.hbar = hbar
collection.vbar = vbar
def on_changed(collection):
hbar.set_cmap(collection.get_cmap())
hbar.set_clim(collection.get_clim())
vbar.set_cmap(collection.get_cmap())
vbar.set_clim(collection.get_clim())
collection.callbacksSM.connect('changed', on_changed)
return collection
@docstring.dedent_interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
Call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
@docstring.dedent_interpd
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
@docstring.dedent_interpd
def fill(self, *args, **kwargs):
"""
Call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
@docstring.dedent_interpd
def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
**kwargs):
"""
Call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x* :
An N-length array of the x data
*y1* :
An N-length array (or scalar) of the y data
*y2* :
An N-length array (or scalar) of the y data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is an N-length numpy boolean array and the fill will
only happen over the regions where ``where==True``.
*interpolate* :
If *True*, interpolate between the two lines to find the
precise point of intersection. Otherwise, the start and
end points of the filled region will only occur on explicit
values in the *x* array.
*kwargs* :
Keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`.
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between_demo.py
.. seealso::
:meth:`fill_betweenx`
for filling between two sets of x-values
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asanyarray(self.convert_xunits(x))
y1 = np.asanyarray(self.convert_yunits(y1))
y2 = np.asanyarray(self.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x)*y1
if y2.ndim == 0:
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or,
[ma.getmask(x), ma.getmask(y1), ma.getmask(y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind-1, 0)
x_values = x[im1:ind+1]
diff_values = y1[im1:ind+1] - y2[im1:ind+1]
y1_values = y1[im1:ind+1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x[im1], y1[im1]
elif np.ma.is_masked(diff_values[0]):
return x[ind], y1[ind]
diff_order = diff_values.argsort()
diff_root_x = np.interp(
0, diff_values[diff_order], x_values[diff_order])
diff_root_y = np.interp(diff_root_x, x_values, y1_values)
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N+1] = end
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def fill_betweenx(self, y, x1, x2=0, where=None, **kwargs):
"""
Call signature::
fill_between(y, x1, x2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *x1* and *x2* where
``where==True``
*y* :
An N-length array of the y data
*x1* :
An N-length array (or scalar) of the x data
*x2* :
An N-length array (or scalar) of the x data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs* :
keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_betweenx_demo.py
.. seealso::
:meth:`fill_between`
for filling between two sets of y-values
"""
# Handle united data, such as dates
self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
self._process_unit_info(xdata=x2)
# Convert the arrays so we can work with them
y = np.asanyarray(self.convert_yunits(y))
x1 = np.asanyarray(self.convert_xunits(x1))
x2 = np.asanyarray(self.convert_xunits(x2))
if x1.ndim == 0:
x1 = np.ones_like(y)*x1
if x2.ndim == 0:
x2 = np.ones_like(y)*x2
if where is None:
where = np.ones(len(y), np.bool)
else:
where = np.asarray(where, np.bool)
if not (y.shape == x1.shape == x2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or,
[ma.getmask(y), ma.getmask(x1), ma.getmask(x2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
yslice = y[ind0:ind1]
x1slice = x1[ind0:ind1]
x2slice = x2[ind0:ind1]
if not len(yslice):
continue
N = len(yslice)
Y = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when x2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the x1 sample points do
Y[0] = x2slice[0], yslice[0]
Y[N+1] = x2slice[-1], yslice[-1]
Y[1:N+1,0] = x1slice
Y[1:N+1,1] = yslice
Y[N+2:,0] = x2slice[::-1]
Y[N+2:,1] = yslice[::-1]
polys.append(Y)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
X1Y = np.array([x1[where], y[where]]).T
X2Y = np.array([x2[where], y[where]]).T
self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
#### plotting z(x,y): imshow, pcolor and relatives, contour
@docstring.dedent_interpd
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
Call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=None, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ *None* | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'none', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos'
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
If *interpolation* is ``'none'``, then no interpolation is
performed on the Agg, ps and pdf backends. Other backends
will fall back to 'nearest'.
*norm*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ *None* | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
or *None*
*origin*: [ *None* | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ *None* | scalars (left, right, bottom, top) ]
Data limits for the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ *None* | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
self.images.append(im)
im._remove_method = lambda h: self.images.remove(h)
return im
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
@docstring.dedent_interpd
def pcolor(self, *args, **kwargs):
"""
Call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
norm: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
MATLAB.
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='none'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ *None* | ``'none'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'none'``, edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the MATLAB convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to::
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
MATLAB :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collections.PolyCollection` properties:
%(PolyCollection)s
Note: the default *antialiaseds* is False if the default
*edgecolors*="none" is used. This eliminates artificial lines
at patch boundaries, and works regardless of the value of
alpha. If *edgecolors* is not "none", then the default
*antialiaseds* is taken from
rcParams['patch.antialiased'], which defaults to *True*.
Stroking the edges may be preferred if *alpha* is 1, but
will cause artifacts otherwise.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if shading == 'faceted':
edgecolors = 'k',
else:
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
# aa setting will default via collections to patch.antialiased
# unless the boundary is not stroked, in which case the
# default will be False; with unstroked boundaries, aa
# makes artifacts that are often disturbing.
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and ec.lower() == "none":
kwargs['antialiaseds'] = False
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
@docstring.dedent_interpd
def pcolormesh(self, *args, **kwargs):
"""
Call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' | 'gouraud' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
MATLAB.
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ *None* | ``'None'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'None'``, edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh` properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat').lower()
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
if shading != 'gouraud':
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
else:
C = C.ravel()
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased, shading=shading, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
@docstring.dedent_interpd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance from cm. If *None*,
use rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to scale
luminance data to 0,1. If *None*, defaults to normalize()
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max
of the color array *C* is used. If you pass a norm instance,
*vmin* and *vmax* will be *None*.
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a :class:`~matplotlib.collections.QuadMesh`
collection in the general quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.QuadContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.QuadContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.QuadContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.QuadContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
@docstring.dedent_interpd
def table(self, **kwargs):
"""
Call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
def twinx(self):
"""
Call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right.
.. note::
For those who are 'picking' artists while using twinx, pick
events are only called for the artists in the top-most axes.
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
ax2.yaxis.set_offset_position('right')
self.yaxis.tick_left()
ax2.xaxis.set_visible(False)
return ax2
def twiny(self):
"""
Call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top.
.. note::
For those who are 'picking' artists while using twiny, pick
events are only called for the artists in the top-most axes.
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
ax2.yaxis.set_visible(False)
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
@docstring.dedent_interpd
def hist(self, x, bins=10, range=None, normed=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None,
**kwargs):
"""
Call signature::
hist(x, bins=10, range=None, normed=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None,
**kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Multiple data can be provided via *x* as a list of datasets
of potentially different length ([*x0*, *x1*, ...]), or as
a 2-D ndarray in which each column is a dataset. Note that
the ndarray form is transposed relative to the list form.
Masked arrays are not supported at present.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. If *bins* is an integer, *bins* + 1 bin edges
will be returned, consistent with :func:`numpy.histogram`
for numpy version >= 1.3, and with the *new* = True argument
in earlier versions.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling
is based on the specified bin range instead of the
range of x.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
.. note::
Until numpy release 1.5, the underlying numpy
histogram function was incorrect with *normed*=*True*
if bin sizes were unequal. MPL inherited that
error. It is now corrected within MPL when using
earlier numpy versions
*weights*:
An array of weights, of the same shape as *x*. Each value in
*x* only contributes its associated weight towards the bin
count (instead of 1). If *normed* is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
*color*:
Color spec or sequence of color specs, one per
dataset. Default (*None*) uses the standard line
color sequence.
*label*:
String, or sequence of strings to match multiple
datasets. Bar charts yield multiple patches per
dataset, but only the first gets the label, so
that the legend command will work as expected::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
kwargs are used to update the properties of the
:class:`~matplotlib.patches.Patch` instances returned by *hist*:
%(Patch)s
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in numpy !!!
# Validate string inputs here so we don't have to clutter
# subsequent code.
if histtype not in ['bar', 'barstacked', 'step', 'stepfilled']:
raise ValueError("histtype %s is not recognized" % histtype)
if align not in ['left', 'mid', 'right']:
raise ValueError("align kwarg %s is not recognized" % align)
if orientation not in [ 'horizontal', 'vertical']:
raise ValueError(
"orientation kwarg %s is not recognized" % orientation)
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
# Massage 'x' for processing.
# NOTE: Be sure any changes here is also done below to 'weights'
if isinstance(x, np.ndarray) or not iterable(x[0]):
# TODO: support masked arrays;
x = np.asarray(x)
if x.ndim == 2:
x = x.T # 2-D input with columns as datasets; switch to rows
elif x.ndim == 1:
x = x.reshape(1, x.shape[0]) # new view, single row
else:
raise ValueError("x must be 1D or 2D")
if x.shape[1] < x.shape[0]:
warnings.warn('2D hist input should be nsamples x nvariables;\n '
'this looks transposed (shape is %d x %d)' % x.shape[::-1])
else:
# multiple hist with data of different length
x = [np.asarray(xi) for xi in x]
nx = len(x) # number of datasets
if color is None:
color = [self._get_lines.color_cycle.next()
for i in xrange(nx)]
else:
color = mcolors.colorConverter.to_rgba_array(color)
if len(color) != nx:
raise ValueError("color kwarg must have one color per dataset")
# We need to do to 'weights' what was done to 'x'
if weights is not None:
if isinstance(weights, np.ndarray) or not iterable(weights[0]) :
w = np.array(weights)
if w.ndim == 2:
w = w.T
elif w.ndim == 1:
w.shape = (1, w.shape[0])
else:
raise ValueError("weights must be 1D or 2D")
else:
w = [np.asarray(wi) for wi in weights]
if len(w) != nx:
raise ValueError('weights should have the same shape as x')
for i in xrange(nx):
if len(w[i]) != len(x[i]):
raise ValueError(
'weights should have the same shape as x')
else:
w = [None]*nx
# Save autoscale state for later restoration; turn autoscaling
# off so we can do it all a single time at the end, instead
# of having it done by bar or fill and then having to be redone.
_saved_autoscalex = self.get_autoscalex_on()
_saved_autoscaley = self.get_autoscaley_on()
self.set_autoscalex_on(False)
self.set_autoscaley_on(False)
# Save the datalimits for the same reason:
_saved_bounds = self.dataLim.bounds
# Check whether bins or range are given explicitly. In that
# case use those values for autoscaling.
binsgiven = (cbook.iterable(bins) or range != None)
# If bins are not specified either explicitly or via range,
# we need to figure out the range required for all datasets,
# and supply that to np.histogram.
if not binsgiven:
xmin = np.inf
xmax = -np.inf
for xi in x:
xmin = min(xmin, xi.min())
xmax = max(xmax, xi.max())
range = (xmin, xmax)
#hist_kwargs = dict(range=range, normed=bool(normed))
# We will handle the normed kwarg within mpl until we
# get to the point of requiring numpy >= 1.5.
hist_kwargs = dict(range=range)
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs['new'] = True
n = []
for i in xrange(nx):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
if normed:
db = np.diff(bins)
m = (m.astype(float) / db) / m.sum()
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
if rwidth is not None:
dr = min(1.0, max(0.0, rwidth))
elif len(n)>1:
dr = 0.8
else:
dr = 1.0
if histtype=='bar':
width = dr*totwidth/nx
dw = width
if nx > 1:
boffset = -0.5*dr*totwidth*(1.0-1.0/nx)
else:
boffset = 0.0
stacked = False
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
if orientation == 'horizontal':
_barfunc = self.barh
else: # orientation == 'vertical'
_barfunc = self.bar
for m, c in zip(n, color):
patch = _barfunc(bins[:-1]+boffset, m, width, bottom,
align='center', log=log,
color=c)
patches.append(patch)
if stacked:
if bottom is None:
bottom = 0.0
bottom += m
boffset += dw
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
minimum = min(bins)
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
if log:
y[0],y[-1] = minimum, minimum
if orientation == 'horizontal':
self.set_xscale('log')
else: # orientation == 'vertical'
self.set_yscale('log')
fill = (histtype == 'stepfilled')
for m, c in zip(n, color):
y[1:-1:2], y[2::2] = m, m
if log:
y[y<minimum]=minimum
if orientation == 'horizontal':
x,y = y,x
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=c) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=c, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin0 = max(_saved_bounds[0]*0.9, minimum)
xmax = self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, minimum)
xmin = min(xmin0, xmin)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin0 = max(_saved_bounds[1]*0.9, minimum)
ymax = self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, minimum)
ymin = min(ymin0, ymin)
self.dataLim.intervaly = (ymin, ymax)
if label is None:
labels = [None]
elif is_string_like(label):
labels = [label]
elif is_sequence_of_strings(label):
labels = list(label)
else:
raise ValueError('invalid label: must be string or sequence of strings')
if len(labels) < nx:
labels += [None] * (nx - len(labels))
for (patch, lbl) in zip(patches, labels):
if patch:
p = patch[0]
p.update(kwargs)
if lbl is not None: p.set_label(lbl)
p.set_snap(False)
for p in patch[1:]:
p.update(kwargs)
p.set_label('_nolegend_')
if binsgiven:
if orientation == 'vertical':
self.update_datalim([(bins[0],0), (bins[-1],0)], updatey=False)
else:
self.update_datalim([(0,bins[0]), (0,bins[-1])], updatex=False)
self.set_autoscalex_on(_saved_autoscalex)
self.set_autoscaley_on(_saved_autoscaley)
self.autoscale_view()
if nx == 1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
@docstring.dedent_interpd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
@docstring.dedent_interpd
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
@docstring.dedent_interpd
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
:meth:`cohere` the coherence between *x* and *y*. Coherence
is the normalized cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
@docstring.dedent_interpd
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, **kwargs):
"""
Call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, **kwargs)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.colors.Colormap` instance; if *None*, use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`~matplotlib.mlab.specgram`
*kwargs*:
Additional kwargs are passed on to imshow which makes the
specgram image
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`~matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent, **kwargs)
self.axis('auto')
return Pxx, freqs, bins, im
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
Call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For image options.
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
For plotting options
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
"""
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *origin*,
*interpolation*, and *aspect*; if you want row zero to
be at the bottom instead of the top, you can set the *origin*
kwarg to "lower".
Returns: an :class:`matplotlib.image.AxesImage` instance.
"""
Z = np.asanyarray(Z)
nr, nc = Z.shape
kw = {'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
def get_default_bbox_extra_artists(self):
bbox_extra_artists = [t for t in self.texts if t.get_visible()]
if self.legend_:
bbox_extra_artists.append(self.legend_)
return bbox_extra_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
"""
Return the tight bounding box of the axes.
The dimension of the Bbox in canvas coordinate.
If *call_axes_locator* is *False*, it does not call the
_axes_locator attribute, which is necessary to get the correct
bounding box. ``call_axes_locator==False`` can be used if the
caller is only intereted in the relative size of the tightbbox
compared to the axes bbox.
"""
artists = []
bb = []
if not self.get_visible():
return None
locator = self.get_axes_locator()
if locator and call_axes_locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
bb.append(self.get_window_extent(renderer))
if self.title.get_visible():
bb.append(self.title.get_window_extent(renderer))
bb_xaxis = self.xaxis.get_tightbbox(renderer)
if bb_xaxis: bb.append(bb_xaxis)
bb_yaxis = self.yaxis.get_tightbbox(renderer)
if bb_yaxis: bb.append(bb_yaxis)
_bbox = mtransforms.Bbox.union([b for b in bb if b.width!=0 or b.height!=0])
return _bbox
def minorticks_on(self):
'Add autoscaling minor ticks to the axes.'
for ax in (self.xaxis, self.yaxis):
if ax.get_scale() == 'log':
s = ax._scale
ax.set_minor_locator(mticker.LogLocator(s.base, s.subs))
else:
ax.set_minor_locator(mticker.AutoMinorLocator())
def minorticks_off(self):
"""Remove minor ticks from the axes."""
self.xaxis.set_minor_locator(mticker.NullLocator())
self.yaxis.set_minor_locator(mticker.NullLocator())
def tricontour(self, *args, **kwargs):
return mtri.tricontour(self, *args, **kwargs)
tricontour.__doc__ = mtri.TriContourSet.tricontour_doc
def tricontourf(self, *args, **kwargs):
return mtri.tricontourf(self, *args, **kwargs)
tricontourf.__doc__ = mtri.TriContourSet.tricontour_doc
def tripcolor(self, *args, **kwargs):
return mtri.tripcolor(self, *args, **kwargs)
tripcolor.__doc__ = mtri.tripcolor.__doc__
def triplot(self, *args, **kwargs):
mtri.triplot(self, *args, **kwargs)
triplot.__doc__ = mtri.triplot.__doc__
from matplotlib.gridspec import GridSpec, SubplotSpec
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = map(int, s)
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit integer')
self._subplotspec = GridSpec(rows, cols)[num-1]
# num - 1 for converting from MATLAB to python indexing
elif len(args)==3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]
else:
self._subplotspec = GridSpec(rows, cols)[int(num)-1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
"""get the subplot geometry, eg 2,2,3"""
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1+1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
"""change subplot geometry, eg. from 1,1,1 to 2,2,3"""
self._subplotspec = GridSpec(numrows, numcols)[num-1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
"""get the SubplotSpec instance associated with the subplot"""
return self._subplotspec
def set_subplotspec(self, subplotspec):
"""set the SubplotSpec instance associated with the subplot"""
self._subplotspec = subplotspec
def update_params(self):
"""update the subplot position from fig.subplotpars"""
self.figbox, self.rowNum, self.colNum, self.numRows, self.numCols = \
self.get_subplotspec().get_position(self.figure,
return_all=True)
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubplotBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
docstring.interpd.update(Axes=martist.kwdoc(Axes))
docstring.interpd.update(Subplot=martist.kwdoc(Axes))
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| gpl-3.0 |
mne-tools/mne-python | mne/label.py | 4 | 100603 | # Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
from collections import defaultdict
from colorsys import hsv_to_rgb, rgb_to_hsv
import copy as cp
import os
import os.path as op
import re
import numpy as np
from .morph_map import read_morph_map
from .parallel import parallel_func, check_n_jobs
from .source_estimate import (SourceEstimate, VolSourceEstimate,
_center_of_mass, extract_label_time_course,
spatial_src_adjacency)
from .source_space import (add_source_space_distances, SourceSpaces,
read_freesurfer_lut, _import_nibabel)
from .stats.cluster_level import _find_clusters, _get_components
from .surface import read_surface, fast_cross_3d, mesh_edges, mesh_dist
from .transforms import apply_trans
from .utils import (get_subjects_dir, _check_subject, logger, verbose, warn,
check_random_state, _validate_type, fill_doc,
_check_option, check_version)
def _blend_colors(color_1, color_2):
"""Blend two colors in HSV space.
Parameters
----------
color_1, color_2 : None | tuple
RGBA tuples with values between 0 and 1. None if no color is available.
If both colors are None, the output is None. If only one is None, the
output is the other color.
Returns
-------
color : None | tuple
RGBA tuple of the combined color. Saturation, value and alpha are
averaged, whereas the new hue is determined as angle half way between
the two input colors' hues.
"""
if color_1 is None and color_2 is None:
return None
elif color_1 is None:
return color_2
elif color_2 is None:
return color_1
r_1, g_1, b_1, a_1 = color_1
h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1)
r_2, g_2, b_2, a_2 = color_2
h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2)
hue_diff = abs(h_1 - h_2)
if hue_diff < 0.5:
h = min(h_1, h_2) + hue_diff / 2.
else:
h = max(h_1, h_2) + (1. - hue_diff) / 2.
h %= 1.
s = (s_1 + s_2) / 2.
v = (v_1 + v_2) / 2.
r, g, b = hsv_to_rgb(h, s, v)
a = (a_1 + a_2) / 2.
color = (r, g, b, a)
return color
def _split_colors(color, n):
"""Create n colors in HSV space that occupy a gradient in value.
Parameters
----------
color : tuple
RGBA tuple with values between 0 and 1.
n : int >= 2
Number of colors on the gradient.
Returns
-------
colors : tuple of tuples, len = n
N RGBA tuples that occupy a gradient in value (low to high) but share
saturation and hue with the input color.
"""
r, g, b, a = color
h, s, v = rgb_to_hsv(r, g, b)
gradient_range = np.sqrt(n / 10.)
if v > 0.5:
v_max = min(0.95, v + gradient_range / 2)
v_min = max(0.05, v_max - gradient_range)
else:
v_min = max(0.05, v - gradient_range / 2)
v_max = min(0.95, v_min + gradient_range)
hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n))
rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors)
rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors)
return tuple(rgba_colors)
def _n_colors(n, bytes_=False, cmap='hsv'):
"""Produce a list of n unique RGBA color tuples based on a colormap.
Parameters
----------
n : int
Number of colors.
bytes : bool
Return colors as integers values between 0 and 255 (instead of floats
between 0 and 1).
cmap : str
Which colormap to use.
Returns
-------
colors : array, shape (n, 4)
RGBA color values.
"""
n_max = 2 ** 10
if n > n_max:
raise NotImplementedError("Can't produce more than %i unique "
"colors" % n_max)
from matplotlib.cm import get_cmap
cm = get_cmap(cmap, n_max)
pos = np.linspace(0, 1, n, False)
colors = cm(pos, bytes=bytes_)
if bytes_:
# make sure colors are unique
for ii, c in enumerate(colors):
if np.any(np.all(colors[:ii] == c, 1)):
raise RuntimeError('Could not get %d unique colors from %s '
'colormap. Try using a different colormap.'
% (n, cmap))
return colors
@fill_doc
class Label(object):
"""A FreeSurfer/MNE label with vertices restricted to one hemisphere.
Labels can be combined with the ``+`` operator:
* Duplicate vertices are removed.
* If duplicate vertices have conflicting position values, an error
is raised.
* Values of duplicate vertices are summed.
Parameters
----------
vertices : array, shape (N,)
Vertex indices (0 based).
pos : array, shape (N, 3) | None
Locations in meters. If None, then zeros are used.
values : array, shape (N,) | None
Values at the vertices. If None, then ones are used.
hemi : 'lh' | 'rh'
Hemisphere to which the label applies.
comment : str
Kept as information but not used by the object itself.
name : str
Kept as information but not used by the object itself.
filename : str
Kept as information but not used by the object itself.
subject : str | None
Name of the subject the label is from.
color : None | matplotlib color
Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
%(verbose)s
Attributes
----------
color : None | tuple
Default label color, represented as RGBA tuple with values between 0
and 1.
comment : str
Comment from the first line of the label file.
hemi : 'lh' | 'rh'
Hemisphere.
name : None | str
A name for the label. It is OK to change that attribute manually.
pos : array, shape (N, 3)
Locations in meters.
subject : str | None
Subject name. It is best practice to set this to the proper
value on initialization, but it can also be set manually.
values : array, shape (N,)
Values at the vertices.
%(verbose)s
vertices : array, shape (N,)
Vertex indices (0 based)
"""
@verbose
def __init__(self, vertices=(), pos=None, values=None, hemi=None,
comment="", name=None, filename=None, subject=None,
color=None, verbose=None): # noqa: D102
# check parameters
if not isinstance(hemi, str):
raise ValueError('hemi must be a string, not %s' % type(hemi))
vertices = np.asarray(vertices, int)
if np.any(np.diff(vertices.astype(int)) <= 0):
raise ValueError('Vertices must be ordered in increasing order.')
if color is not None:
from matplotlib.colors import colorConverter
color = colorConverter.to_rgba(color)
if values is None:
values = np.ones(len(vertices))
else:
values = np.asarray(values)
if pos is None:
pos = np.zeros((len(vertices), 3))
else:
pos = np.asarray(pos)
if not (len(vertices) == len(values) == len(pos)):
raise ValueError("vertices, values and pos need to have same "
"length (number of vertices)")
# name
if name is None and filename is not None:
name = op.basename(filename[:-6])
self.vertices = vertices
self.pos = pos
self.values = values
self.hemi = hemi
self.comment = comment
self.verbose = verbose
self.subject = _check_subject(None, subject, raise_error=False)
self.color = color
self.name = name
self.filename = filename
def __setstate__(self, state): # noqa: D105
self.vertices = state['vertices']
self.pos = state['pos']
self.values = state['values']
self.hemi = state['hemi']
self.comment = state['comment']
self.verbose = state['verbose']
self.subject = state.get('subject', None)
self.color = state.get('color', None)
self.name = state['name']
self.filename = state['filename']
def __getstate__(self): # noqa: D105
out = dict(vertices=self.vertices,
pos=self.pos,
values=self.values,
hemi=self.hemi,
comment=self.comment,
verbose=self.verbose,
subject=self.subject,
color=self.color,
name=self.name,
filename=self.filename)
return out
def __repr__(self): # noqa: D105
name = 'unknown, ' if self.subject is None else self.subject + ', '
name += repr(self.name) if self.name is not None else "unnamed"
n_vert = len(self)
return "<Label | %s, %s : %i vertices>" % (name, self.hemi, n_vert)
def __len__(self):
"""Return the number of vertices.
Returns
-------
n_vertices : int
The number of vertices.
"""
return len(self.vertices)
def __add__(self, other):
"""Add Labels."""
_validate_type(other, (Label, BiHemiLabel), 'other')
if isinstance(other, BiHemiLabel):
return other + self
else: # isinstance(other, Label)
if self.subject != other.subject:
raise ValueError('Label subject parameters must match, got '
'"%s" and "%s". Consider setting the '
'subject parameter on initialization, or '
'setting label.subject manually before '
'combining labels.' % (self.subject,
other.subject))
if self.hemi != other.hemi:
name = '%s + %s' % (self.name, other.name)
if self.hemi == 'lh':
lh, rh = self.copy(), other.copy()
else:
lh, rh = other.copy(), self.copy()
color = _blend_colors(self.color, other.color)
return BiHemiLabel(lh, rh, name, color)
# check for overlap
duplicates = np.intersect1d(self.vertices, other.vertices)
n_dup = len(duplicates)
if n_dup:
self_dup = [np.where(self.vertices == d)[0][0]
for d in duplicates]
other_dup = [np.where(other.vertices == d)[0][0]
for d in duplicates]
if not np.all(self.pos[self_dup] == other.pos[other_dup]):
err = ("Labels %r and %r: vertices overlap but differ in "
"position values" % (self.name, other.name))
raise ValueError(err)
isnew = np.array([v not in duplicates for v in other.vertices])
vertices = np.hstack((self.vertices, other.vertices[isnew]))
pos = np.vstack((self.pos, other.pos[isnew]))
# find position of other's vertices in new array
tgt_idx = [np.where(vertices == v)[0][0] for v in other.vertices]
n_self = len(self.values)
n_other = len(other.values)
new_len = n_self + n_other - n_dup
values = np.zeros(new_len, dtype=self.values.dtype)
values[:n_self] += self.values
values[tgt_idx] += other.values
else:
vertices = np.hstack((self.vertices, other.vertices))
pos = np.vstack((self.pos, other.pos))
values = np.hstack((self.values, other.values))
indcs = np.argsort(vertices)
vertices, pos, values = vertices[indcs], pos[indcs, :], values[indcs]
comment = "%s + %s" % (self.comment, other.comment)
name0 = self.name if self.name else 'unnamed'
name1 = other.name if other.name else 'unnamed'
name = "%s + %s" % (name0, name1)
color = _blend_colors(self.color, other.color)
verbose = self.verbose or other.verbose
label = Label(vertices, pos, values, self.hemi, comment, name, None,
self.subject, color, verbose)
return label
def __sub__(self, other):
"""Subtract Labels."""
_validate_type(other, (Label, BiHemiLabel), 'other')
if isinstance(other, BiHemiLabel):
if self.hemi == 'lh':
return self - other.lh
else:
return self - other.rh
else: # isinstance(other, Label):
if self.subject != other.subject:
raise ValueError('Label subject parameters must match, got '
'"%s" and "%s". Consider setting the '
'subject parameter on initialization, or '
'setting label.subject manually before '
'combining labels.' % (self.subject,
other.subject))
if self.hemi == other.hemi:
keep = np.in1d(self.vertices, other.vertices, True, invert=True)
else:
keep = np.arange(len(self.vertices))
name = "%s - %s" % (self.name or 'unnamed', other.name or 'unnamed')
return Label(self.vertices[keep], self.pos[keep], self.values[keep],
self.hemi, self.comment, name, None, self.subject,
self.color, self.verbose)
def save(self, filename):
r"""Write to disk as FreeSurfer \*.label file.
Parameters
----------
filename : str
Path to label file to produce.
Notes
-----
Note that due to file specification limitations, the Label's subject
and color attributes are not saved to disk.
"""
write_label(filename, self)
def copy(self):
"""Copy the label instance.
Returns
-------
label : instance of Label
The copied label.
"""
return cp.deepcopy(self)
def fill(self, src, name=None):
"""Fill the surface between sources for a source space label.
Parameters
----------
src : SourceSpaces
Source space in which the label was defined. If a source space is
provided, the label is expanded to fill in surface vertices that
lie between the vertices included in the source space. For the
added vertices, ``pos`` is filled in with positions from the
source space, and ``values`` is filled in from the closest source
space vertex.
name : None | str
Name for the new Label (default is self.name).
Returns
-------
label : Label
The label covering the same vertices in source space but also
including intermediate surface vertices.
See Also
--------
Label.restrict
Label.smooth
"""
# find source space patch info
if len(self.vertices) == 0:
return self.copy()
hemi_src = _get_label_src(self, src)
if not np.all(np.in1d(self.vertices, hemi_src['vertno'])):
msg = "Source space does not contain all of the label's vertices"
raise ValueError(msg)
if hemi_src['nearest'] is None:
warn("Source space is being modified in place because patch "
"information is needed. To avoid this in the future, run "
"mne.add_source_space_distances() on the source space "
"and save it to disk.")
if check_version('scipy', '1.3'):
dist_limit = 0
else:
warn('SciPy < 1.3 detected, adding source space patch '
'information will be slower. Consider upgrading SciPy.')
dist_limit = np.inf
add_source_space_distances(src, dist_limit=dist_limit)
nearest = hemi_src['nearest']
# find new vertices
include = np.in1d(nearest, self.vertices, False)
vertices = np.nonzero(include)[0]
# values
nearest_in_label = np.digitize(nearest[vertices], self.vertices, True)
values = self.values[nearest_in_label]
# pos
pos = hemi_src['rr'][vertices]
name = self.name if name is None else name
label = Label(vertices, pos, values, self.hemi, self.comment, name,
None, self.subject, self.color)
return label
def restrict(self, src, name=None):
"""Restrict a label to a source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to use to restrict the label.
name : None | str
Name for the new Label (default is self.name).
Returns
-------
label : instance of Label
The Label restricted to the set of source space vertices.
See Also
--------
Label.fill
Notes
-----
.. versionadded:: 0.20
"""
if len(self.vertices) == 0:
return self.copy()
hemi_src = _get_label_src(self, src)
mask = np.in1d(self.vertices, hemi_src['vertno'])
name = self.name if name is None else name
label = Label(self.vertices[mask], self.pos[mask], self.values[mask],
self.hemi, self.comment, name, None, self.subject,
self.color)
return label
@verbose
def smooth(self, subject=None, smooth=2, grade=None,
subjects_dir=None, n_jobs=1, verbose=None):
"""Smooth the label.
Useful for filling in labels made in a
decimated source space for display.
Parameters
----------
subject : str | None
The name of the subject used. If None, the value will be
taken from self.subject.
smooth : int
Number of iterations for the smoothing of the surface data.
Cannot be None here since not all vertices are used. For a
grade of 5 (e.g., fsaverage), a smoothing of 2 will fill a
label.
grade : int, list of shape (2,), array, or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
values will be morphed to the set of vertices specified in grade[0]
and grade[1], assuming that these are vertices for the left and
right hemispheres. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. If one array is used, it is assumed
that all vertices belong to the hemisphere of the label. To create
a label filling the surface, use None.
%(subjects_dir)s
%(n_jobs)s
%(verbose_meth)s
Returns
-------
label : instance of Label
The smoothed label.
Notes
-----
This function will set label.pos to be all zeros. If the positions
on the new surface are required, consider using mne.read_surface
with ``label.vertices``.
"""
subject = _check_subject(self.subject, subject)
return self.morph(subject, subject, smooth, grade, subjects_dir,
n_jobs, verbose)
@verbose
def morph(self, subject_from=None, subject_to=None, smooth=5, grade=None,
subjects_dir=None, n_jobs=1, verbose=None):
"""Morph the label.
Useful for transforming a label from one subject to another.
Parameters
----------
subject_from : str | None
The name of the subject of the current label. If None, the
initial subject will be taken from self.subject.
subject_to : str
The name of the subject to morph the label to. This will
be put in label.subject of the output label file.
smooth : int
Number of iterations for the smoothing of the surface data.
Cannot be None here since not all vertices are used.
grade : int, list of shape (2,), array, or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
values will be morphed to the set of vertices specified in grade[0]
and grade[1], assuming that these are vertices for the left and
right hemispheres. Note that specifying the vertices (e.g.,
``grade=[np.arange(10242), np.arange(10242)]`` for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. If one array is used, it is assumed
that all vertices belong to the hemisphere of the label. To create
a label filling the surface, use None.
%(subjects_dir)s
%(n_jobs)s
%(verbose_meth)s
Returns
-------
label : instance of Label
The morphed label.
See Also
--------
mne.morph_labels : Morph a set of labels.
Notes
-----
This function will set label.pos to be all zeros. If the positions
on the new surface are required, consider using `mne.read_surface`
with ``label.vertices``.
"""
from .morph import compute_source_morph, grade_to_vertices
subject_from = _check_subject(self.subject, subject_from)
if not isinstance(subject_to, str):
raise TypeError('"subject_to" must be entered as a string')
if not isinstance(smooth, int):
raise TypeError('smooth must be an integer')
if np.all(self.values == 0):
raise ValueError('Morphing label with all zero values will result '
'in the label having no vertices. Consider using '
'something like label.values.fill(1.0).')
idx = 0 if self.hemi == 'lh' else 1
if isinstance(grade, np.ndarray):
grade_ = [np.array([], int)] * 2
grade_[idx] = grade
grade = grade_
del grade_
grade = grade_to_vertices(subject_to, grade, subjects_dir=subjects_dir)
spacing = [np.array([], int)] * 2
spacing[idx] = grade[idx]
vertices = [np.array([], int)] * 2
vertices[idx] = self.vertices
data = self.values[:, np.newaxis]
assert len(data) == sum(len(v) for v in vertices)
stc = SourceEstimate(data, vertices, tmin=1, tstep=1,
subject=subject_from)
stc = compute_source_morph(
stc, subject_from, subject_to, spacing=spacing, smooth=smooth,
subjects_dir=subjects_dir, warn=False).apply(stc)
inds = np.nonzero(stc.data)[0]
self.values = stc.data[inds, :].ravel()
self.pos = np.zeros((len(inds), 3))
self.vertices = stc.vertices[idx][inds]
self.subject = subject_to
return self
@fill_doc
def split(self, parts=2, subject=None, subjects_dir=None,
freesurfer=False):
"""Split the Label into two or more parts.
Parameters
----------
parts : int >= 2 | tuple of str | str
Number of labels to create (default is 2), or tuple of strings
specifying label names for new labels (from posterior to anterior),
or 'contiguous' to split the label into connected components.
If a number or 'contiguous' is specified, names of the new labels
will be the input label's name with div1, div2 etc. appended.
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
%(subjects_dir)s
freesurfer : bool
By default (``False``) ``split_label`` uses an algorithm that is
slightly optimized for performance and numerical precision. Set
``freesurfer`` to ``True`` in order to replicate label splits from
FreeSurfer's ``mris_divide_parcellation``.
Returns
-------
labels : list of Label, shape (n_parts,)
The labels, starting from the lowest to the highest end of the
projection axis.
Notes
-----
If using 'contiguous' split, you must ensure that the label being split
uses the same triangular resolution as the surface mesh files in
``subjects_dir`` Also, some small fringe labels may be returned that
are close (but not connected) to the large components.
The spatial split finds the label's principal eigen-axis on the
spherical surface, projects all label vertex coordinates onto this
axis, and divides them at regular spatial intervals.
"""
if isinstance(parts, str) and parts == 'contiguous':
return _split_label_contig(self, subject, subjects_dir)
elif isinstance(parts, (tuple, int)):
return split_label(self, parts, subject, subjects_dir, freesurfer)
else:
raise ValueError("Need integer, tuple of strings, or string "
"('contiguous'). Got %s)" % type(parts))
def get_vertices_used(self, vertices=None):
"""Get the source space's vertices inside the label.
Parameters
----------
vertices : ndarray of int, shape (n_vertices,) | None
The set of vertices to compare the label to. If None, equals to
``np.arange(10242)``. Defaults to None.
Returns
-------
label_verts : ndarray of in, shape (n_label_vertices,)
The vertices of the label corresponding used by the data.
"""
if vertices is None:
vertices = np.arange(10242)
label_verts = vertices[np.in1d(vertices, self.vertices)]
return label_verts
def get_tris(self, tris, vertices=None):
"""Get the source space's triangles inside the label.
Parameters
----------
tris : ndarray of int, shape (n_tris, 3)
The set of triangles corresponding to the vertices in a
source space.
vertices : ndarray of int, shape (n_vertices,) | None
The set of vertices to compare the label to. If None, equals to
``np.arange(10242)``. Defaults to None.
Returns
-------
label_tris : ndarray of int, shape (n_tris, 3)
The subset of tris used by the label.
"""
vertices_ = self.get_vertices_used(vertices)
selection = np.all(np.in1d(tris, vertices_).reshape(tris.shape),
axis=1)
label_tris = tris[selection]
if len(np.unique(label_tris)) < len(vertices_):
logger.info('Surprising label structure. Trying to repair '
'triangles.')
dropped_vertices = np.setdiff1d(vertices_, label_tris)
n_dropped = len(dropped_vertices)
assert n_dropped == (len(vertices_) - len(np.unique(label_tris)))
# put missing vertices as extra zero-length triangles
add_tris = (dropped_vertices +
np.zeros((len(dropped_vertices), 3), dtype=int).T)
label_tris = np.r_[label_tris, add_tris.T]
assert len(np.unique(label_tris)) == len(vertices_)
return label_tris
@fill_doc
def center_of_mass(self, subject=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of the label.
This function computes the spatial center of mass on the surface
as in :footcite:`LarsonLee2013`.
Parameters
----------
subject : str | None
The subject the label is defined for.
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from the label. Otherwise,
it could be any vertex from surf. If an array of int, the
returned vertex will come from that array. If instance of
SourceSpaces (as of 0.13), the returned vertex will be from
the given source space. For most accuruate estimates, do not
restrict vertices.
%(subjects_dir)s
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by its label value.
See Also
--------
SourceEstimate.center_of_mass
vertex_to_mni
Notes
-----
.. versionadded:: 0.13
References
----------
.. footbibliography::
"""
if not isinstance(surf, str):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.values < 0):
raise ValueError('Cannot compute COM with negative values')
if np.all(self.values == 0):
raise ValueError('Cannot compute COM with all values == 0. For '
'structural labels, consider setting to ones via '
'label.values[:] = 1.')
vertex = _center_of_mass(self.vertices, self.values, self.hemi, surf,
subject, subjects_dir, restrict_vertices)
return vertex
def _get_label_src(label, src):
_validate_type(src, SourceSpaces, 'src')
if src.kind != 'surface':
raise RuntimeError('Cannot operate on SourceSpaces that are not '
'surface type, got %s' % (src.kind,))
if label.hemi == 'lh':
hemi_src = src[0]
else:
hemi_src = src[1]
return hemi_src
class BiHemiLabel(object):
"""A freesurfer/MNE label with vertices in both hemispheres.
Parameters
----------
lh : Label
Label for the left hemisphere.
rh : Label
Label for the right hemisphere.
name : None | str
Name for the label.
color : None | color
Label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
Note that due to file specification limitations, the color isn't saved
to or loaded from files written to disk.
Attributes
----------
lh : Label
Label for the left hemisphere.
rh : Label
Label for the right hemisphere.
name : None | str
A name for the label. It is OK to change that attribute manually.
subject : str | None
Subject the label is from.
"""
def __init__(self, lh, rh, name=None, color=None): # noqa: D102
if lh.subject != rh.subject:
raise ValueError('lh.subject (%s) and rh.subject (%s) must '
'agree' % (lh.subject, rh.subject))
self.lh = lh
self.rh = rh
self.name = name
self.subject = lh.subject
self.color = color
self.hemi = 'both'
def __repr__(self): # noqa: D105
temp = "<BiHemiLabel | %s, lh : %i vertices, rh : %i vertices>"
name = 'unknown, ' if self.subject is None else self.subject + ', '
name += repr(self.name) if self.name is not None else "unnamed"
return temp % (name, len(self.lh), len(self.rh))
def __len__(self):
"""Return the number of vertices.
Returns
-------
n_vertices : int
The number of vertices.
"""
return len(self.lh) + len(self.rh)
def __add__(self, other):
"""Add labels."""
if isinstance(other, Label):
if other.hemi == 'lh':
lh = self.lh + other
rh = self.rh
else:
lh = self.lh
rh = self.rh + other
elif isinstance(other, BiHemiLabel):
lh = self.lh + other.lh
rh = self.rh + other.rh
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
name = '%s + %s' % (self.name, other.name)
color = _blend_colors(self.color, other.color)
return BiHemiLabel(lh, rh, name, color)
def __sub__(self, other):
"""Subtract labels."""
_validate_type(other, (Label, BiHemiLabel), 'other')
if isinstance(other, Label):
if other.hemi == 'lh':
lh = self.lh - other
rh = self.rh
else:
rh = self.rh - other
lh = self.lh
else: # isinstance(other, BiHemiLabel)
lh = self.lh - other.lh
rh = self.rh - other.rh
if len(lh.vertices) == 0:
return rh
elif len(rh.vertices) == 0:
return lh
else:
name = '%s - %s' % (self.name, other.name)
return BiHemiLabel(lh, rh, name, self.color)
def read_label(filename, subject=None, color=None):
"""Read FreeSurfer Label file.
Parameters
----------
filename : str
Path to label file.
subject : str | None
Name of the subject the data are defined for.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
color : None | matplotlib color
Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
Note that due to file specification limitations, the color isn't saved
to or loaded from files written to disk.
Returns
-------
label : Label
Instance of Label object with attributes:
- ``comment``: comment from the first line of the label file
- ``vertices``: vertex indices (0 based, column 1)
- ``pos``: locations in meters (columns 2 - 4 divided by 1000)
- ``values``: values at the vertices (column 5)
See Also
--------
read_labels_from_annot
write_labels_to_annot
"""
if subject is not None and not isinstance(subject, str):
raise TypeError('subject must be a string')
# find hemi
basename = op.basename(filename)
if basename.endswith('lh.label') or basename.startswith('lh.'):
hemi = 'lh'
elif basename.endswith('rh.label') or basename.startswith('rh.'):
hemi = 'rh'
else:
raise ValueError('Cannot find which hemisphere it is. File should end'
' with lh.label or rh.label: %s' % (basename,))
# find name
if basename.startswith(('lh.', 'rh.')):
basename_ = basename[3:]
if basename.endswith('.label'):
basename_ = basename[:-6]
else:
basename_ = basename[:-9]
name = "%s-%s" % (basename_, hemi)
# read the file
with open(filename, 'r') as fid:
comment = fid.readline().replace('\n', '')[1:]
nv = int(fid.readline())
data = np.empty((5, nv))
for i, line in enumerate(fid):
data[:, i] = line.split()
# let's make sure everything is ordered correctly
vertices = np.array(data[0], dtype=np.int32)
pos = 1e-3 * data[1:4].T
values = data[4]
order = np.argsort(vertices)
vertices = vertices[order]
pos = pos[order]
values = values[order]
label = Label(vertices, pos, values, hemi, comment, name, filename,
subject, color)
return label
@verbose
def write_label(filename, label, verbose=None):
"""Write a FreeSurfer label.
Parameters
----------
filename : str
Path to label file to produce.
label : Label
The label object to save.
%(verbose)s
See Also
--------
write_labels_to_annot
Notes
-----
Note that due to file specification limitations, the Label's subject and
color attributes are not saved to disk.
"""
hemi = label.hemi
path_head, name = op.split(filename)
if name.endswith('.label'):
name = name[:-6]
if not (name.startswith(hemi) or name.endswith(hemi)):
name += '-' + hemi
filename = op.join(path_head, name) + '.label'
logger.info('Saving label to : %s' % filename)
with open(filename, 'wb') as fid:
n_vertices = len(label.vertices)
data = np.zeros((n_vertices, 5), dtype=np.float64)
data[:, 0] = label.vertices
data[:, 1:4] = 1e3 * label.pos
data[:, 4] = label.values
fid.write(b'#%s\n' % label.comment.encode())
fid.write(b'%d\n' % n_vertices)
for d in data:
fid.write(b'%d %f %f %f %f\n' % tuple(d))
def _prep_label_split(label, subject=None, subjects_dir=None):
"""Get label and subject information prior to label splitting."""
# If necessary, find the label
if isinstance(label, BiHemiLabel):
raise TypeError("Can only split labels restricted to one hemisphere.")
elif isinstance(label, str):
label = read_label(label)
# Find the subject
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if label.subject is None and subject is None:
raise ValueError("The subject needs to be specified.")
elif subject is None:
subject = label.subject
elif label.subject is None:
pass
elif subject != label.subject:
raise ValueError("The label specifies a different subject (%r) from "
"the subject parameter (%r)."
% label.subject, subject)
return label, subject, subjects_dir
def _split_label_contig(label_to_split, subject=None, subjects_dir=None):
"""Split label into contiguous regions (i.e., connected components).
Parameters
----------
label_to_split : Label | str
Label which is to be split (Label object or path to a label file).
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
%(subjects_dir)s
Returns
-------
labels : list of Label
The contiguous labels, in order of descending size.
"""
# Convert to correct input if necessary
label_to_split, subject, subjects_dir = _prep_label_split(label_to_split,
subject,
subjects_dir)
# Find the spherical surface to get vertices and tris
surf_fname = '.'.join((label_to_split.hemi, 'sphere'))
surf_path = op.join(subjects_dir, subject, 'surf', surf_fname)
surface_points, surface_tris = read_surface(surf_path)
# Get vertices we want to keep and compute mesh edges
verts_arr = label_to_split.vertices
edges_all = mesh_edges(surface_tris)
# Subselect rows and cols of vertices that belong to the label
select_edges = edges_all[verts_arr][:, verts_arr].tocoo()
# Compute connected components and store as lists of vertex numbers
comp_labels = _get_components(verts_arr, select_edges)
# Convert to indices in the original surface space
label_divs = []
for comp in comp_labels:
label_divs.append(verts_arr[comp])
# Construct label division names
n_parts = len(label_divs)
if label_to_split.name.endswith(('lh', 'rh')):
basename = label_to_split.name[:-3]
name_ext = label_to_split.name[-3:]
else:
basename = label_to_split.name
name_ext = ''
name_pattern = "%s_div%%i%s" % (basename, name_ext)
names = tuple(name_pattern % i for i in range(1, n_parts + 1))
# Colors
if label_to_split.color is None:
colors = (None,) * n_parts
else:
colors = _split_colors(label_to_split.color, n_parts)
# Sort label divisions by their size (in vertices)
label_divs.sort(key=lambda x: len(x), reverse=True)
labels = []
for div, name, color in zip(label_divs, names, colors):
# Get indices of dipoles within this division of the label
verts = np.array(sorted(list(div)), int)
vert_indices = np.in1d(verts_arr, verts, assume_unique=True)
# Set label attributes
pos = label_to_split.pos[vert_indices]
values = label_to_split.values[vert_indices]
hemi = label_to_split.hemi
comment = label_to_split.comment
lbl = Label(verts, pos, values, hemi, comment, name, None, subject,
color)
labels.append(lbl)
return labels
@fill_doc
def split_label(label, parts=2, subject=None, subjects_dir=None,
freesurfer=False):
"""Split a Label into two or more parts.
Parameters
----------
label : Label | str
Label which is to be split (Label object or path to a label file).
parts : int >= 2 | tuple of str
A sequence of strings specifying label names for the new labels (from
posterior to anterior), or the number of new labels to create (default
is 2). If a number is specified, names of the new labels will be the
input label's name with div1, div2 etc. appended.
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
%(subjects_dir)s
freesurfer : bool
By default (``False``) ``split_label`` uses an algorithm that is
slightly optimized for performance and numerical precision. Set
``freesurfer`` to ``True`` in order to replicate label splits from
FreeSurfer's ``mris_divide_parcellation``.
Returns
-------
labels : list of Label, shape (n_parts,)
The labels, starting from the lowest to the highest end of the
projection axis.
Notes
-----
Works by finding the label's principal eigen-axis on the spherical surface,
projecting all label vertex coordinates onto this axis and dividing them at
regular spatial intervals.
"""
from scipy import linalg
label, subject, subjects_dir = _prep_label_split(label, subject,
subjects_dir)
# find the parts
if np.isscalar(parts):
n_parts = int(parts)
if label.name.endswith(('lh', 'rh')):
basename = label.name[:-3]
name_ext = label.name[-3:]
else:
basename = label.name
name_ext = ''
name_pattern = "%s_div%%i%s" % (basename, name_ext)
names = tuple(name_pattern % i for i in range(1, n_parts + 1))
else:
names = parts
n_parts = len(names)
if n_parts < 2:
raise ValueError("Can't split label into %i parts" % n_parts)
# find the spherical surface
surf_fname = '.'.join((label.hemi, 'sphere'))
surf_path = op.join(subjects_dir, subject, "surf", surf_fname)
surface_points, surface_tris = read_surface(surf_path)
# find the label coordinates on the surface
points = surface_points[label.vertices]
center = np.mean(points, axis=0)
centered_points = points - center
# find the label's normal
if freesurfer:
# find the Freesurfer vertex closest to the center
distance = np.sqrt(np.sum(centered_points ** 2, axis=1))
i_closest = np.argmin(distance)
closest_vertex = label.vertices[i_closest]
# find the normal according to freesurfer convention
idx = np.any(surface_tris == closest_vertex, axis=1)
tris_for_normal = surface_tris[idx]
r1 = surface_points[tris_for_normal[:, 0], :]
r2 = surface_points[tris_for_normal[:, 1], :]
r3 = surface_points[tris_for_normal[:, 2], :]
tri_normals = fast_cross_3d((r2 - r1), (r3 - r1))
normal = np.mean(tri_normals, axis=0)
normal /= linalg.norm(normal)
else:
# Normal of the center
normal = center / linalg.norm(center)
# project all vertex coordinates on the tangential plane for this point
q, _ = linalg.qr(normal[:, np.newaxis])
tangent_u = q[:, 1:]
m_obs = np.dot(centered_points, tangent_u)
# find principal eigendirection
m_cov = np.dot(m_obs.T, m_obs)
w, vr = linalg.eig(m_cov)
i = np.argmax(w)
eigendir = vr[:, i]
# project back into 3d space
axis = np.dot(tangent_u, eigendir)
# orient them from posterior to anterior
if axis[1] < 0:
axis *= -1
# project the label on the axis
proj = np.dot(points, axis)
# assign mark (new label index)
proj -= proj.min()
proj /= (proj.max() / n_parts)
mark = proj // 1
mark[mark == n_parts] = n_parts - 1
# colors
if label.color is None:
colors = (None,) * n_parts
else:
colors = _split_colors(label.color, n_parts)
# construct new labels
labels = []
for i, name, color in zip(range(n_parts), names, colors):
idx = (mark == i)
vert = label.vertices[idx]
pos = label.pos[idx]
values = label.values[idx]
hemi = label.hemi
comment = label.comment
lbl = Label(vert, pos, values, hemi, comment, name, None, subject,
color)
labels.append(lbl)
return labels
def label_sign_flip(label, src):
"""Compute sign for label averaging.
Parameters
----------
label : Label | BiHemiLabel
A label.
src : SourceSpaces
The source space over which the label is defined.
Returns
-------
flip : array
Sign flip vector (contains 1 or -1).
"""
from scipy import linalg
if len(src) != 2:
raise ValueError('Only source spaces with 2 hemisphers are accepted')
lh_vertno = src[0]['vertno']
rh_vertno = src[1]['vertno']
# get source orientations
ori = list()
if label.hemi in ('lh', 'both'):
vertices = label.vertices if label.hemi == 'lh' else label.lh.vertices
vertno_sel = np.intersect1d(lh_vertno, vertices)
ori.append(src[0]['nn'][vertno_sel])
if label.hemi in ('rh', 'both'):
vertices = label.vertices if label.hemi == 'rh' else label.rh.vertices
vertno_sel = np.intersect1d(rh_vertno, vertices)
ori.append(src[1]['nn'][vertno_sel])
if len(ori) == 0:
raise Exception('Unknown hemisphere type "%s"' % (label.hemi,))
ori = np.concatenate(ori, axis=0)
if len(ori) == 0:
return np.array([], int)
_, _, Vh = linalg.svd(ori, full_matrices=False)
# The sign of Vh is ambiguous, so we should align to the max-positive
# (outward) direction
dots = np.dot(ori, Vh[0])
if np.mean(dots) < 0:
dots *= -1
# Comparing to the direction of the first right singular vector
flip = np.sign(dots)
return flip
@verbose
def stc_to_label(stc, src=None, smooth=True, connected=False,
subjects_dir=None, verbose=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : bool
Fill in vertices on the cortical surface that are not in the source
space based on the closest source space vertex (requires
src to be a SourceSpace).
connected : bool
If True a list of connected labels will be returned in each
hemisphere. The labels are ordered in decreasing order depending
of the maximum value in the stc.
%(subjects_dir)s
%(verbose)s
Returns
-------
labels : list of Label | list of list of Label
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
if not isinstance(smooth, bool):
raise ValueError('smooth should be True or False. Got %s.' % smooth)
src = stc.subject if src is None else src
if src is None:
raise ValueError('src cannot be None if stc.subject is None')
if isinstance(src, str):
subject = src
else:
subject = stc.subject
if not isinstance(stc, SourceEstimate):
raise ValueError('SourceEstimate should be surface source estimates')
if isinstance(src, str):
if connected:
raise ValueError('The option to return only connected labels is '
'only available if source spaces are provided.')
if smooth:
msg = ("stc_to_label with smooth=True requires src to be an "
"instance of SourceSpace")
raise ValueError(msg)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
src_conn = spatial_src_adjacency(src).tocsr()
labels = []
cnt = 0
cnt_full = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
if connected: # we know src *must* be a SourceSpaces now
vertno = np.where(src[hemi_idx]['inuse'])[0]
if not len(np.setdiff1d(this_vertno, vertno)) == 0:
raise RuntimeError('stc contains vertices not present '
'in source space, did you morph?')
tmp = np.zeros((len(vertno), this_data.shape[1]))
this_vertno_idx = np.searchsorted(vertno, this_vertno)
tmp[this_vertno_idx] = this_data
this_data = tmp
offset = cnt_full + len(this_data)
this_src_adj = src_conn[cnt_full:offset, cnt_full:offset].tocoo()
this_data_abs_max = np.abs(this_data).max(axis=1)
clusters, _ = _find_clusters(this_data_abs_max, 0.,
adjacency=this_src_adj)
cnt_full += len(this_data)
# Then order clusters in descending order based on maximum value
clusters_max = np.argsort([np.max(this_data_abs_max[c])
for c in clusters])[::-1]
clusters = [clusters[k] for k in clusters_max]
clusters = [vertno[c] for c in clusters]
else:
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
if not connected:
this_labels = None
else:
this_labels = []
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
if smooth:
label = label.fill(src)
this_labels.append(label)
if not connected:
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def _verts_within_dist(graph, sources, max_dist):
"""Find all vertices wihin a maximum geodesic distance from source.
Parameters
----------
graph : scipy.sparse.csr_matrix
Sparse matrix with distances between adjacent vertices.
sources : list of int
Source vertices.
max_dist : float
Maximum geodesic distance.
Returns
-------
verts : array
Vertices within max_dist.
dist : array
Distances from source vertex.
"""
dist_map = {}
verts_added_last = []
for source in sources:
dist_map[source] = 0
verts_added_last.append(source)
# add neighbors until no more neighbors within max_dist can be found
while len(verts_added_last) > 0:
verts_added = []
for i in verts_added_last:
v_dist = dist_map[i]
row = graph[i, :]
neighbor_vert = row.indices
neighbor_dist = row.data
for j, d in zip(neighbor_vert, neighbor_dist):
n_dist = v_dist + d
if j in dist_map:
if n_dist < dist_map[j]:
dist_map[j] = n_dist
else:
if n_dist <= max_dist:
dist_map[j] = n_dist
# we found a new vertex within max_dist
verts_added.append(j)
verts_added_last = verts_added
verts = np.sort(np.array(list(dist_map.keys()), int))
dist = np.array([dist_map[v] for v in verts], int)
return verts, dist
def _grow_labels(seeds, extents, hemis, names, dist, vert, subject):
"""Parallelize grow_labels."""
labels = []
for seed, extent, hemi, name in zip(seeds, extents, hemis, names):
label_verts, label_dist = _verts_within_dist(dist[hemi], seed, extent)
# create a label
if len(seed) == 1:
seed_repr = str(seed)
else:
seed_repr = ','.join(map(str, seed))
comment = 'Circular label: seed=%s, extent=%0.1fmm' % (seed_repr,
extent)
label = Label(vertices=label_verts,
pos=vert[hemi][label_verts],
values=label_dist,
hemi=hemi,
comment=comment,
name=str(name),
subject=subject)
labels.append(label)
return labels
@fill_doc
def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
overlap=True, names=None, surface='white', colors=None):
"""Generate circular labels in source space with region growing.
This function generates a number of labels in source space by growing
regions starting from the vertices defined in "seeds". For each seed, a
label is generated containing all vertices within a maximum geodesic
distance on the white matter surface from the seed.
Parameters
----------
subject : str
Name of the subject as in SUBJECTS_DIR.
seeds : int | list
Seed, or list of seeds. Each seed can be either a vertex number or
a list of vertex numbers.
extents : array | float
Extents (radius in mm) of the labels.
hemis : array | int
Hemispheres to use for the labels (0: left, 1: right).
%(subjects_dir)s
%(n_jobs)s
Likely only useful if tens or hundreds of labels are being expanded
simultaneously. Does not apply with ``overlap=False``.
overlap : bool
Produce overlapping labels. If True (default), the resulting labels
can be overlapping. If False, each label will be grown one step at a
time, and occupied territory will not be invaded.
names : None | list of str
Assign names to the new labels (list needs to have the same length as
seeds).
surface : str
The surface used to grow the labels, defaults to the white surface.
colors : array, shape (n, 4) or (, 4) | None
How to assign colors to each label. If None then unique colors will be
chosen automatically (default), otherwise colors will be broadcast
from the array. The first three values will be interpreted as RGB
colors and the fourth column as the alpha value (commonly 1).
Returns
-------
labels : list of Label
The labels' ``comment`` attribute contains information on the seed
vertex and extent; the ``values`` attribute contains distance from the
seed in millimeters.
Notes
-----
"extents" and "hemis" can either be arrays with the same length as
seeds, which allows using a different extent and hemisphere for
label, or integers, in which case the same extent and hemisphere is
used for each label.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
n_jobs = check_n_jobs(n_jobs)
# make sure the inputs are arrays
if np.isscalar(seeds):
seeds = [seeds]
seeds = [np.atleast_1d(seed) for seed in seeds]
extents = np.atleast_1d(extents)
hemis = np.atleast_1d(hemis)
n_seeds = len(seeds)
if len(extents) != 1 and len(extents) != n_seeds:
raise ValueError('The extents parameter has to be of length 1 or '
'len(seeds)')
if len(hemis) != 1 and len(hemis) != n_seeds:
raise ValueError('The hemis parameter has to be of length 1 or '
'len(seeds)')
if colors is not None:
if len(colors.shape) == 1: # if one color for all seeds
n_colors = 1
n = colors.shape[0]
else:
n_colors, n = colors.shape
if n_colors != n_seeds and n_colors != 1:
msg = ('Number of colors (%d) and seeds (%d) are not compatible.' %
(n_colors, n_seeds))
raise ValueError(msg)
if n != 4:
msg = 'Colors must have 4 values (RGB and alpha), not %d.' % n
raise ValueError(msg)
# make the arrays the same length as seeds
if len(extents) == 1:
extents = np.tile(extents, n_seeds)
if len(hemis) == 1:
hemis = np.tile(hemis, n_seeds)
hemis = np.array(['lh' if h == 0 else 'rh' for h in hemis])
# names
if names is None:
names = ["Label_%i-%s" % items for items in enumerate(hemis)]
else:
if np.isscalar(names):
names = [names]
if len(names) != n_seeds:
raise ValueError('The names parameter has to be None or have '
'length len(seeds)')
for i, hemi in enumerate(hemis):
if not names[i].endswith(hemi):
names[i] = '-'.join((names[i], hemi))
names = np.array(names)
# load the surfaces and create the distance graphs
tris, vert, dist = {}, {}, {}
for hemi in set(hemis):
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert[hemi], tris[hemi] = read_surface(surf_fname)
dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
if overlap:
# create the patches
parallel, my_grow_labels, _ = parallel_func(_grow_labels, n_jobs)
seeds = np.array_split(np.array(seeds, dtype='O'), n_jobs)
extents = np.array_split(extents, n_jobs)
hemis = np.array_split(hemis, n_jobs)
names = np.array_split(names, n_jobs)
labels = sum(parallel(my_grow_labels(s, e, h, n, dist, vert, subject)
for s, e, h, n
in zip(seeds, extents, hemis, names)), [])
else:
# special procedure for non-overlapping labels
labels = _grow_nonoverlapping_labels(subject, seeds, extents, hemis,
vert, dist, names)
if colors is None:
# add a unique color to each label
label_colors = _n_colors(len(labels))
else:
# use specified colors
label_colors = np.empty((len(labels), 4))
label_colors[:] = colors
for label, color in zip(labels, label_colors):
label.color = color
return labels
def _grow_nonoverlapping_labels(subject, seeds_, extents_, hemis, vertices_,
graphs, names_):
"""Grow labels while ensuring that they don't overlap."""
labels = []
for hemi in set(hemis):
hemi_index = (hemis == hemi)
seeds = [seed for seed, h in zip(seeds_, hemis) if h == hemi]
extents = extents_[hemi_index]
names = names_[hemi_index]
graph = graphs[hemi] # distance graph
n_vertices = len(vertices_[hemi])
n_labels = len(seeds)
# prepare parcellation
parc = np.empty(n_vertices, dtype='int32')
parc[:] = -1
# initialize active sources
sources = {} # vert -> (label, dist_from_seed)
edge = [] # queue of vertices to process
for label, seed in enumerate(seeds):
if np.any(parc[seed] >= 0):
raise ValueError("Overlapping seeds")
parc[seed] = label
for s in np.atleast_1d(seed):
sources[s] = (label, 0.)
edge.append(s)
# grow from sources
while edge:
vert_from = edge.pop(0)
label, old_dist = sources[vert_from]
# add neighbors within allowable distance
row = graph[vert_from, :]
for vert_to, dist in zip(row.indices, row.data):
# Prevent adding a point that has already been used
# (prevents infinite loop)
if (vert_to == seeds[label]).any():
continue
new_dist = old_dist + dist
# abort if outside of extent
if new_dist > extents[label]:
continue
vert_to_label = parc[vert_to]
if vert_to_label >= 0:
_, vert_to_dist = sources[vert_to]
# abort if the vertex is occupied by a closer seed
if new_dist > vert_to_dist:
continue
elif vert_to in edge:
edge.remove(vert_to)
# assign label value
parc[vert_to] = label
sources[vert_to] = (label, new_dist)
edge.append(vert_to)
# convert parc to labels
for i in range(n_labels):
vertices = np.nonzero(parc == i)[0]
name = str(names[i])
label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
labels.append(label_)
return labels
@fill_doc
def random_parcellation(subject, n_parcel, hemi, subjects_dir=None,
surface='white', random_state=None):
"""Generate random cortex parcellation by growing labels.
This function generates a number of labels which don't intersect and
cover the whole surface. Regions are growing around randomly chosen
seeds.
Parameters
----------
subject : str
Name of the subject as in SUBJECTS_DIR.
n_parcel : int
Total number of cortical parcels.
hemi : str
Hemisphere id (ie 'lh', 'rh', 'both'). In the case
of 'both', both hemispheres are processed with (n_parcel // 2)
parcels per hemisphere.
%(subjects_dir)s
surface : str
The surface used to grow the labels, defaults to the white surface.
%(random_state)s
Returns
-------
labels : list of Label
Random cortex parcellation.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if hemi == 'both':
hemi = ['lh', 'rh']
hemis = np.atleast_1d(hemi)
# load the surfaces and create the distance graphs
tris, vert, dist = {}, {}, {}
for hemi in set(hemis):
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert[hemi], tris[hemi] = read_surface(surf_fname)
dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
# create the patches
labels = _cortex_parcellation(subject, n_parcel, hemis, vert, dist,
random_state)
# add a unique color to each label
colors = _n_colors(len(labels))
for label, color in zip(labels, colors):
label.color = color
return labels
def _cortex_parcellation(subject, n_parcel, hemis, vertices_, graphs,
random_state=None):
"""Random cortex parcellation."""
labels = []
rng = check_random_state(random_state)
for hemi in set(hemis):
parcel_size = len(hemis) * len(vertices_[hemi]) // n_parcel
graph = graphs[hemi] # distance graph
n_vertices = len(vertices_[hemi])
# prepare parcellation
parc = np.full(n_vertices, -1, dtype='int32')
# initialize active sources
s = rng.choice(range(n_vertices))
label_idx = 0
edge = [s] # queue of vertices to process
parc[s] = label_idx
label_size = 1
rest = len(parc) - 1
# grow from sources
while rest:
# if there are not free neighbors, start new parcel
if not edge:
rest_idx = np.where(parc < 0)[0]
s = rng.choice(rest_idx)
edge = [s]
label_idx += 1
label_size = 1
parc[s] = label_idx
rest -= 1
vert_from = edge.pop(0)
# add neighbors within allowable distance
# row = graph[vert_from, :]
# row_indices, row_data = row.indices, row.data
sl = slice(graph.indptr[vert_from], graph.indptr[vert_from + 1])
row_indices, row_data = graph.indices[sl], graph.data[sl]
for vert_to, dist in zip(row_indices, row_data):
vert_to_label = parc[vert_to]
# abort if the vertex is already occupied
if vert_to_label >= 0:
continue
# abort if outside of extent
if label_size > parcel_size:
label_idx += 1
label_size = 1
edge = [vert_to]
parc[vert_to] = label_idx
rest -= 1
break
# assign label value
parc[vert_to] = label_idx
label_size += 1
edge.append(vert_to)
rest -= 1
# merging small labels
# label adjacency matrix
n_labels = label_idx + 1
label_sizes = np.empty(n_labels, dtype=int)
label_conn = np.zeros([n_labels, n_labels], dtype='bool')
for i in range(n_labels):
vertices = np.nonzero(parc == i)[0]
label_sizes[i] = len(vertices)
neighbor_vertices = graph[vertices, :].indices
neighbor_labels = np.unique(np.array(parc[neighbor_vertices]))
label_conn[i, neighbor_labels] = 1
np.fill_diagonal(label_conn, 0)
# merging
label_id = range(n_labels)
while n_labels > n_parcel // len(hemis):
# smallest label and its smallest neighbor
i = np.argmin(label_sizes)
neighbors = np.nonzero(label_conn[i, :])[0]
j = neighbors[np.argmin(label_sizes[neighbors])]
# merging two labels
label_conn[j, :] += label_conn[i, :]
label_conn[:, j] += label_conn[:, i]
label_conn = np.delete(label_conn, i, 0)
label_conn = np.delete(label_conn, i, 1)
label_conn[j, j] = 0
label_sizes[j] += label_sizes[i]
label_sizes = np.delete(label_sizes, i, 0)
n_labels -= 1
vertices = np.nonzero(parc == label_id[i])[0]
parc[vertices] = label_id[j]
label_id = np.delete(label_id, i, 0)
# convert parc to labels
for i in range(n_labels):
vertices = np.nonzero(parc == label_id[i])[0]
name = 'label_' + str(i)
label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
labels.append(label_)
return labels
def _read_annot_cands(dir_name, raise_error=True):
"""List the candidate parcellations."""
if not op.isdir(dir_name):
if not raise_error:
return list()
raise IOError('Directory for annotation does not exist: %s',
dir_name)
cands = os.listdir(dir_name)
cands = sorted(set(c.replace('lh.', '').replace('rh.', '').replace(
'.annot', '')
for c in cands if '.annot' in c),
key=lambda x: x.lower())
# exclude .ctab files
cands = [c for c in cands if '.ctab' not in c]
return cands
def _read_annot(fname):
"""Read a Freesurfer annotation from a .annot file.
Note : Copied from PySurfer
Parameters
----------
fname : str
Path to annotation file
Returns
-------
annot : numpy array, shape=(n_verts)
Annotation id at each vertex
ctab : numpy array, shape=(n_entries, 5)
RGBA + label id colortable array
names : list of str
List of region names as stored in the annot file
"""
if not op.isfile(fname):
dir_name = op.split(fname)[0]
cands = _read_annot_cands(dir_name)
if len(cands) == 0:
raise IOError('No such file %s, no candidate parcellations '
'found in directory' % fname)
else:
raise IOError('No such file %s, candidate parcellations in '
'that directory:\n%s' % (fname, '\n'.join(cands)))
with open(fname, "rb") as fid:
n_verts = np.fromfile(fid, '>i4', 1)[0]
data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
annot = data[data[:, 0], 1]
ctab_exists = np.fromfile(fid, '>i4', 1)[0]
if not ctab_exists:
raise Exception('Color table not found in annotation file')
n_entries = np.fromfile(fid, '>i4', 1)[0]
if n_entries > 0:
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, '>c', length) # discard orig_tab
names = list()
ctab = np.zeros((n_entries, 5), np.int64)
for i in range(n_entries):
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16) +
ctab[i, 3] * (2 ** 24))
else:
ctab_version = -n_entries
if ctab_version != 2:
raise Exception('Color table version not supported')
n_entries = np.fromfile(fid, '>i4', 1)[0]
ctab = np.zeros((n_entries, 5), np.int64)
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, "|S%d" % length, 1) # Orig table path
entries_to_read = np.fromfile(fid, '>i4', 1)[0]
names = list()
for i in range(entries_to_read):
np.fromfile(fid, '>i4', 1) # Structure
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16))
# convert to more common alpha value
ctab[:, 3] = 255 - ctab[:, 3]
return annot, ctab, names
def _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir):
"""Get the .annot filenames and hemispheres."""
if annot_fname is not None:
# we use use the .annot file specified by the user
hemis = [op.basename(annot_fname)[:2]]
if hemis[0] not in ['lh', 'rh']:
raise ValueError('Could not determine hemisphere from filename, '
'filename has to start with "lh" or "rh".')
annot_fname = [annot_fname]
else:
# construct .annot file names for requested subject, parc, hemi
_check_option('hemi', hemi, ['lh', 'rh', 'both'])
if hemi == 'both':
hemis = ['lh', 'rh']
else:
hemis = [hemi]
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
dst = op.join(subjects_dir, subject, 'label', '%%s.%s.annot' % parc)
annot_fname = [dst % hemi_ for hemi_ in hemis]
return annot_fname, hemis
def _load_vert_pos(subject, subjects_dir, surf_name, hemi, n_expected,
extra=''):
fname_surf = op.join(subjects_dir, subject, 'surf',
'%s.%s' % (hemi, surf_name))
vert_pos, _ = read_surface(fname_surf)
vert_pos /= 1e3 # the positions in labels are in meters
if len(vert_pos) != n_expected:
raise RuntimeError('Number of surface vertices (%s) for subject %s'
' does not match the expected number of vertices'
'(%s)%s'
% (len(vert_pos), subject, n_expected, extra))
return vert_pos
@verbose
def read_labels_from_annot(subject, parc='aparc', hemi='both',
surf_name='white', annot_fname=None, regexp=None,
subjects_dir=None, sort=True, verbose=None):
"""Read labels from a FreeSurfer annotation file.
Note: Only cortical labels will be returned.
Parameters
----------
subject : str
The subject for which to read the parcellation.
parc : str
The parcellation to use, e.g., 'aparc' or 'aparc.a2009s'.
hemi : str
The hemisphere from which to read the parcellation, can be 'lh', 'rh',
or 'both'.
surf_name : str
Surface used to obtain vertex locations, e.g., 'white', 'pial'.
annot_fname : str or None
Filename of the .annot file. If not None, only this file is read
and 'parc' and 'hemi' are ignored.
regexp : str
Regular expression or substring to select particular labels from the
parcellation. E.g. 'superior' will return all labels in which this
substring is contained.
%(subjects_dir)s
sort : bool
If true, labels will be sorted by name before being returned.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
labels : list of Label
The labels, sorted by label name (ascending).
See Also
--------
write_labels_to_annot
morph_labels
"""
logger.info('Reading labels from parcellation...')
subjects_dir = get_subjects_dir(subjects_dir)
# get the .annot filenames and hemispheres
annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
subjects_dir)
if regexp is not None:
# allow for convenient substring match
r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum()
else regexp))
# now we are ready to create the labels
n_read = 0
labels = list()
orig_names = set()
for fname, hemi in zip(annot_fname, hemis):
# read annotation
annot, ctab, label_names = _read_annot(fname)
label_rgbas = ctab[:, :4] / 255.
label_ids = ctab[:, -1]
# load the vertex positions from surface
vert_pos = _load_vert_pos(
subject, subjects_dir, surf_name, hemi, len(annot),
extra='for annotation file %s' % fname)
for label_id, label_name, label_rgba in\
zip(label_ids, label_names, label_rgbas):
vertices = np.where(annot == label_id)[0]
if len(vertices) == 0:
# label is not part of cortical surface
continue
label_name = label_name.decode()
orig_names.add(label_name)
name = f'{label_name}-{hemi}'
if (regexp is not None) and not r_.match(name):
continue
pos = vert_pos[vertices, :]
label = Label(vertices, pos, hemi=hemi, name=name,
subject=subject, color=tuple(label_rgba))
labels.append(label)
n_read = len(labels) - n_read
logger.info(' read %d labels from %s' % (n_read, fname))
# sort the labels by label name
if sort:
labels = sorted(labels, key=lambda l: l.name)
if len(labels) == 0:
msg = 'No labels found.'
if regexp is not None:
orig_names = '\n'.join(sorted(orig_names))
msg += (f' Maybe the regular expression {repr(regexp)} did not '
f'match any of:\n{orig_names}')
raise RuntimeError(msg)
return labels
def _check_labels_subject(labels, subject, name):
_validate_type(labels, (list, tuple), 'labels')
for label in labels:
_validate_type(label, Label, 'each entry in labels')
if subject is None:
subject = label.subject
if subject is not None: # label.subject can be None, depending on init
if subject != label.subject:
raise ValueError('Got multiple values of %s: %s and %s'
% (name, subject, label.subject))
if subject is None:
raise ValueError('if label.subject is None for all labels, '
'%s must be provided' % name)
return subject
@verbose
def morph_labels(labels, subject_to, subject_from=None, subjects_dir=None,
surf_name='white', verbose=None):
"""Morph a set of labels.
This is useful when morphing a set of non-overlapping labels (such as those
obtained with :func:`read_labels_from_annot`) from one subject to
another.
Parameters
----------
labels : list
The labels to morph.
subject_to : str
The subject to morph labels to.
subject_from : str | None
The subject to morph labels from. Can be None if the labels
have the ``.subject`` property defined.
%(subjects_dir)s
surf_name : str
Surface used to obtain vertex locations, e.g., 'white', 'pial'.
%(verbose)s
Returns
-------
labels : list
The morphed labels.
See Also
--------
read_labels_from_annot
mne.Label.morph
Notes
-----
This does not use the same algorithm as Freesurfer, so the results
morphing (e.g., from ``'fsaverage'`` to your subject) might not match
what Freesurfer produces during ``recon-all``.
.. versionadded:: 0.18
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_from = _check_labels_subject(labels, subject_from, 'subject_from')
mmaps = read_morph_map(subject_from, subject_to, subjects_dir)
vert_poss = [_load_vert_pos(subject_to, subjects_dir, surf_name, hemi,
mmap.shape[0])
for hemi, mmap in zip(('lh', 'rh'), mmaps)]
idxs = [mmap.argmax(axis=1) for mmap in mmaps]
out_labels = list()
values = filename = None
for label in labels:
li = dict(lh=0, rh=1)[label.hemi]
vertices = np.where(np.in1d(idxs[li], label.vertices))[0]
pos = vert_poss[li][vertices]
out_labels.append(
Label(vertices, pos, values, label.hemi, label.comment, label.name,
filename, subject_to, label.color, label.verbose))
return out_labels
@verbose
def labels_to_stc(labels, values, tmin=0, tstep=1, subject=None, src=None,
verbose=None):
"""Convert a set of labels and values to a STC.
This function is meant to work like the opposite of
`extract_label_time_course`.
Parameters
----------
%(eltc_labels)s
values : ndarray, shape (n_labels, ...)
The values in each label. Can be 1D or 2D.
tmin : float
The tmin to use for the STC.
tstep : float
The tstep to use for the STC.
subject : str | None
The subject for which to create the STC.
%(eltc_src)s
Can be omitted if using a surface source space, in which case
the label vertices will determine the output STC vertices.
Required if using a volumetric source space.
.. versionadded:: 0.22
%(verbose)s
Returns
-------
stc : instance of SourceEstimate | instance of VolSourceEstimate
The values-in-labels converted to a STC.
See Also
--------
extract_label_time_course
Notes
-----
Vertices that appear in more than one label will be averaged.
.. versionadded:: 0.18
"""
values = np.array(values, float)
if values.ndim == 1:
values = values[:, np.newaxis]
if values.ndim != 2:
raise ValueError('values must have 1 or 2 dimensions, got %s'
% (values.ndim,))
_validate_type(src, (SourceSpaces, None))
if src is None:
data, vertices, subject = _labels_to_stc_surf(
labels, values, tmin, tstep, subject)
klass = SourceEstimate
else:
kind = src.kind
subject = _check_subject(
src._subject, subject, first_kind='source space subject',
raise_error=False)
_check_option('source space kind', kind, ('surface', 'volume'))
if kind == 'volume':
klass = VolSourceEstimate
else:
klass = SourceEstimate
# Easiest way is to get a dot-able operator and use it
vertices = [s['vertno'].copy() for s in src]
stc = klass(
np.eye(sum(len(v) for v in vertices)), vertices, 0, 1, subject)
label_op = extract_label_time_course(
stc, labels, src=src, mode='mean', allow_empty=True)
_check_values_labels(values, label_op.shape[0])
rev_op = np.zeros(label_op.shape[::-1])
rev_op[np.arange(label_op.shape[1]), np.argmax(label_op, axis=0)] = 1.
data = rev_op @ values
return klass(data, vertices, tmin, tstep, subject, verbose)
def _check_values_labels(values, n_labels):
if n_labels != len(values):
raise ValueError(
f'values.shape[0] ({values.shape[0]}) must match the number of '
f'labels ({n_labels})')
def _labels_to_stc_surf(labels, values, tmin, tstep, subject):
from scipy import sparse
subject = _check_labels_subject(labels, subject, 'subject')
_check_values_labels(values, len(labels))
vertices = dict(lh=[], rh=[])
data = dict(lh=[], rh=[])
for li, label in enumerate(labels):
data[label.hemi].append(
np.repeat(values[li][np.newaxis], len(label.vertices), axis=0))
vertices[label.hemi].append(label.vertices)
hemis = ('lh', 'rh')
for hemi in hemis:
vertices[hemi] = np.concatenate(vertices[hemi], axis=0)
data[hemi] = np.concatenate(data[hemi], axis=0).astype(float)
cols = np.arange(len(vertices[hemi]))
vertices[hemi], rows = np.unique(vertices[hemi], return_inverse=True)
mat = sparse.coo_matrix((np.ones(len(rows)), (rows, cols))).tocsr()
mat = mat * sparse.diags(1. / np.asarray(mat.sum(axis=-1))[:, 0])
data[hemi] = mat.dot(data[hemi])
vertices = [vertices[hemi] for hemi in hemis]
data = np.concatenate([data[hemi] for hemi in hemis], axis=0)
return data, vertices, subject
_DEFAULT_TABLE_NAME = 'MNE-Python Colortable'
def _write_annot(fname, annot, ctab, names, table_name=_DEFAULT_TABLE_NAME):
"""Write a Freesurfer annotation to a .annot file."""
assert len(names) == len(ctab)
with open(fname, 'wb') as fid:
n_verts = len(annot)
np.array(n_verts, dtype='>i4').tofile(fid)
data = np.zeros((n_verts, 2), dtype='>i4')
data[:, 0] = np.arange(n_verts)
data[:, 1] = annot
data.ravel().tofile(fid)
# indicate that color table exists
np.array(1, dtype='>i4').tofile(fid)
# color table version 2
np.array(-2, dtype='>i4').tofile(fid)
# write color table
n_entries = len(ctab)
np.array(n_entries, dtype='>i4').tofile(fid)
# write our color table name
_write_annot_str(fid, table_name)
# number of entries to write
np.array(n_entries, dtype='>i4').tofile(fid)
# write entries
for ii, (name, color) in enumerate(zip(names, ctab)):
np.array(ii, dtype='>i4').tofile(fid)
_write_annot_str(fid, name)
np.array(color[:4], dtype='>i4').tofile(fid)
def _write_annot_str(fid, s):
s = s.encode('ascii') + b'\x00'
np.array(len(s), '>i4').tofile(fid)
fid.write(s)
@verbose
def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
subjects_dir=None, annot_fname=None,
colormap='hsv', hemi='both', sort=True,
table_name=_DEFAULT_TABLE_NAME, verbose=None):
r"""Create a FreeSurfer annotation from a list of labels.
Parameters
----------
labels : list with instances of mne.Label
The labels to create a parcellation from.
subject : str | None
The subject for which to write the parcellation.
parc : str | None
The parcellation name to use.
overwrite : bool
Overwrite files if they already exist.
%(subjects_dir)s
annot_fname : str | None
Filename of the .annot file. If not None, only this file is written
and 'parc' and 'subject' are ignored.
colormap : str
Colormap to use to generate label colors for labels that do not
have a color specified.
hemi : 'both' | 'lh' | 'rh'
The hemisphere(s) for which to write \*.annot files (only applies if
annot_fname is not specified; default is 'both').
sort : bool
If True (default), labels will be sorted by name before writing.
.. versionadded:: 0.21.0
table_name : str
The table name to use for the colortable.
.. versionadded:: 0.21.0
%(verbose)s
See Also
--------
read_labels_from_annot
Notes
-----
Vertices that are not covered by any of the labels are assigned to a label
named "unknown".
"""
logger.info('Writing labels to parcellation...')
subjects_dir = get_subjects_dir(subjects_dir)
# get the .annot filenames and hemispheres
annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
subjects_dir)
if not overwrite:
for fname in annot_fname:
if op.exists(fname):
raise ValueError('File %s exists. Use "overwrite=True" to '
'overwrite it' % fname)
# prepare container for data to save:
to_save = []
# keep track of issues found in the labels
duplicate_colors = []
invalid_colors = []
overlap = []
no_color = (-1, -1, -1, -1)
no_color_rgb = (-1, -1, -1)
for hemi, fname in zip(hemis, annot_fname):
hemi_labels = [label for label in labels if label.hemi == hemi]
n_hemi_labels = len(hemi_labels)
if n_hemi_labels == 0:
ctab = np.empty((0, 4), dtype=np.int32)
ctab_rgb = ctab[:, :3]
else:
if sort:
hemi_labels.sort(key=lambda label: label.name)
# convert colors to 0-255 RGBA tuples
hemi_colors = [no_color if label.color is None else
tuple(int(round(255 * i)) for i in label.color)
for label in hemi_labels]
ctab = np.array(hemi_colors, dtype=np.int32)
ctab_rgb = ctab[:, :3]
# make color dict (for annot ID, only R, G and B count)
labels_by_color = defaultdict(list)
for label, color in zip(hemi_labels, ctab_rgb):
labels_by_color[tuple(color)].append(label.name)
# check label colors
for color, names in labels_by_color.items():
if color == no_color_rgb:
continue
if color == (0, 0, 0):
# we cannot have an all-zero color, otherw. e.g. tksurfer
# refuses to read the parcellation
warn('At least one label contains a color with, "r=0, '
'g=0, b=0" value. Some FreeSurfer tools may fail '
'to read the parcellation')
if any(i > 255 for i in color):
msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
invalid_colors.append(msg)
if len(names) > 1:
msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
duplicate_colors.append(msg)
# replace None values (labels with unspecified color)
if labels_by_color[no_color_rgb]:
default_colors = _n_colors(n_hemi_labels, bytes_=True,
cmap=colormap)
# keep track of colors known to be in hemi_colors :
safe_color_i = 0
for i in range(n_hemi_labels):
if ctab[i, 0] == -1:
color = default_colors[i]
# make sure to add no duplicate color
while np.any(np.all(color[:3] == ctab_rgb, 1)):
color = default_colors[safe_color_i]
safe_color_i += 1
# assign the color
ctab[i] = color
# find number of vertices in surface
if subject is not None and subjects_dir is not None:
fpath = op.join(subjects_dir, subject, 'surf', '%s.white' % hemi)
points, _ = read_surface(fpath)
n_vertices = len(points)
else:
if len(hemi_labels) > 0:
max_vert = max(np.max(label.vertices) for label in hemi_labels)
n_vertices = max_vert + 1
else:
n_vertices = 1
warn('Number of vertices in the surface could not be '
'verified because the surface file could not be found; '
'specify subject and subjects_dir parameters.')
# Create annot and color table array to write
annot = np.empty(n_vertices, dtype=np.int64)
annot[:] = -1
# create the annotation ids from the colors
annot_id_coding = np.array((1, 2 ** 8, 2 ** 16))
annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
for label, annot_id in zip(hemi_labels, annot_ids):
# make sure the label is not overwriting another label
if np.any(annot[label.vertices] != -1):
other_ids = set(annot[label.vertices])
other_ids.discard(-1)
other_indices = (annot_ids.index(i) for i in other_ids)
other_names = (hemi_labels[i].name for i in other_indices)
other_repr = ', '.join(other_names)
msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
overlap.append(msg)
annot[label.vertices] = annot_id
hemi_names = [label.name for label in hemi_labels]
if None in hemi_names:
msg = ("Found %i labels with no name. Writing annotation file"
"requires all labels named" % (hemi_names.count(None)))
# raise the error immediately rather than crash with an
# uninformative error later (e.g. cannot join NoneType)
raise ValueError(msg)
# Assign unlabeled vertices to an "unknown" label
unlabeled = (annot == -1)
if np.any(unlabeled):
msg = ("Assigning %i unlabeled vertices to "
"'unknown-%s'" % (unlabeled.sum(), hemi))
logger.info(msg)
# find an unused color (try shades of gray first)
for i in range(1, 257):
if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
break
if i < 256:
color = (i, i, i, 0)
else:
err = ("Need one free shade of gray for 'unknown' label. "
"Please modify your label colors, or assign the "
"unlabeled vertices to another label.")
raise ValueError(err)
# find the id
annot_id = np.sum(annot_id_coding * color[:3])
# update data to write
annot[unlabeled] = annot_id
ctab = np.vstack((ctab, color))
hemi_names.append("unknown")
# convert to FreeSurfer alpha values
ctab[:, 3] = 255 - ctab[:, 3]
# remove hemi ending in names
hemi_names = [name[:-3] if name.endswith(hemi) else name
for name in hemi_names]
to_save.append((fname, annot, ctab, hemi_names))
issues = []
if duplicate_colors:
msg = ("Some labels have the same color values (all labels in one "
"hemisphere must have a unique color):")
duplicate_colors.insert(0, msg)
issues.append('\n'.join(duplicate_colors))
if invalid_colors:
msg = ("Some labels have invalid color values (all colors should be "
"RGBA tuples with values between 0 and 1)")
invalid_colors.insert(0, msg)
issues.append('\n'.join(invalid_colors))
if overlap:
msg = ("Some labels occupy vertices that are also occupied by one or "
"more other labels. Each vertex can only be occupied by a "
"single label in *.annot files.")
overlap.insert(0, msg)
issues.append('\n'.join(overlap))
if issues:
raise ValueError('\n\n'.join(issues))
# write it
for fname, annot, ctab, hemi_names in to_save:
logger.info(' writing %d labels to %s' % (len(hemi_names), fname))
_write_annot(fname, annot, ctab, hemi_names, table_name)
@fill_doc
def select_sources(subject, label, location='center', extent=0.,
grow_outside=True, subjects_dir=None, name=None,
random_state=None, surf='white'):
"""Select sources from a label.
Parameters
----------
%(subject)s
label : instance of Label | str
Define where the seed will be chosen. If str, can be 'lh' or 'rh',
which correspond to left or right hemisphere, respectively.
location : 'random' | 'center' | int
Location to grow label from. If the location is an int, it represents
the vertex number in the corresponding label. If it is a str, it can be
either 'random' or 'center'.
extent : float
Extents (radius in mm) of the labels, i.e. maximum geodesic distance
on the white matter surface from the seed. If 0, the resulting label
will contain only one vertex.
grow_outside : bool
Let the region grow outside the original label where location was
defined.
%(subjects_dir)s
name : None | str
Assign name to the new label.
%(random_state)s
surf : str
The surface used to simulated the label, defaults to the white surface.
Returns
-------
label : instance of Label
The label that contains the selected sources.
Notes
-----
This function selects a region of interest on the cortical surface based
on a label (or a hemisphere). The sources are selected by growing a region
around a seed which is selected randomly, is the center of the label, or
is a specific vertex. The selected vertices can extend beyond the initial
provided label. This can be prevented by setting grow_outside to False.
The selected sources are returned in the form of a new Label object. The
values of the label contain the distance from the seed in millimeters.
.. versionadded:: 0.18
"""
# If label is a string, convert it to a label that contains the whole
# hemisphere.
if isinstance(label, str):
_check_option('label', label, ['lh', 'rh'])
surf_filename = op.join(subjects_dir, subject, 'surf',
label + '.white')
vertices, _ = read_surface(surf_filename)
indices = np.arange(len(vertices), dtype=int)
label = Label(indices, vertices, hemi=label)
# Choose the seed according to the selected strategy.
if isinstance(location, str):
_check_option('location', location, ['center', 'random'])
if location == 'center':
seed = label.center_of_mass(
subject, restrict_vertices=True, subjects_dir=subjects_dir,
surf=surf)
else:
rng = check_random_state(random_state)
seed = rng.choice(label.vertices)
else:
seed = label.vertices[location]
hemi = 0 if label.hemi == 'lh' else 1
new_label = grow_labels(subject, seed, extent, hemi, subjects_dir)[0]
# We override the name because grow_label automatically adds a -rh or -lh
# to the given parameter.
new_label.name = name
# Restrict the new label to the vertices of the input label if needed.
if not grow_outside:
to_keep = np.array([v in label.vertices for v in new_label.vertices])
new_label = Label(new_label.vertices[to_keep], new_label.pos[to_keep],
hemi=new_label.hemi, name=name, subject=subject)
return new_label
def find_pos_in_annot(pos, subject='fsaverage', annot='aparc+aseg',
subjects_dir=None):
"""
Find name in atlas for given MRI coordinates.
Parameters
----------
pos : ndarray, shape (3,)
Vector of x,y,z coordinates in MRI space.
subject : str
MRI subject name.
annot : str
MRI volumetric atlas file name. Do not include the ``.mgz`` suffix.
subjects_dir : path-like
Path to MRI subjects directory.
Returns
-------
label : str
Anatomical region name from atlas.
Notes
-----
.. versionadded:: 0.24
"""
pos = np.asarray(pos, float)
if pos.shape != (3,):
raise ValueError(
'pos must be an array of shape (3,), ' f'got {pos.shape}')
nibabel = _import_nibabel('read MRI parcellations')
if subjects_dir is None:
subjects_dir = get_subjects_dir(None)
atlas_fname = os.path.join(subjects_dir, subject, 'mri', annot + '.mgz')
parcellation_img = nibabel.load(atlas_fname)
# Load freesurface atlas LUT
lut_inv_dict = read_freesurfer_lut()[0]
label_lut = {v: k for k, v in lut_inv_dict.items()}
# Find voxel for dipole position
mri_vox_t = np.linalg.inv(parcellation_img.header.get_vox2ras_tkr())
vox_dip_pos_f = apply_trans(mri_vox_t, pos)
vox_dip_pos = np.rint(vox_dip_pos_f).astype(int)
# Get voxel value and label from LUT
vol_values = parcellation_img.get_fdata()[tuple(vox_dip_pos.T)]
label = label_lut.get(vol_values, 'Unknown')
return label
| bsd-3-clause |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.py | 39 | 7457 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| gpl-3.0 |
stuartminch/studious-adventure | case_report.py | 1 | 1705 | # Author: Stuart Minchington
# Email: [email protected]
# Written in Python 3.6.3 (default, Oct 3 2017, 21:45:48)
# Requirements: python3-dev and pip3
# Installation for Ubuntu: sudo apt-get install python3-dev && python3-pip
# Installation for Mac: brew install python3-dev && python3-pip
# Then run 'pip3 install' for the following:
# openpyxl
# pandas
# psycopg2
# sqlalchemy
from sqlalchemy import create_engine
from sys import argv
import openpyxl
import pandas
import numpy
import csv
# Must provide credentials by passing them on the command line
script, username, password = argv
# Connect to the database
engine = create_engine(f'postgresql://{username}:{password}@bi-prod-dw-instance.cceimtxgnc4w.us-west-2.redshift.amazonaws.com:5439/bigblueguess')
# Create the initial Excel file
writer = pandas.ExcelWriter(f'/home/stuart/enterprise_case_report.xlsx')
# Create string array of customers
customers = "< Enter customer names separated by : >"
customers = customers.split(':')
# Loop through the customer list
for customer in customers:
print(f"Getting case data for {customer}...")
case_data = pandas.read_sql_query(f"""
SELECT createddate
,casenumber
,jira__c
,origin
,case_owner_for_intercall__c
,subject
,type
,reason_type__c
,status
,case_age__c
,description
FROM hist_sfdc_case
WHERE account_name_for_intercall__c='{customer}'
AND status!='Closed'
ORDER BY createddate DESC
""", engine)
# Write the data to its own worksheet in Excel
case_data.to_excel(writer,f'{customer}')
writer.save()
print(f"Finished getting case data for {customer}...")
| gpl-3.0 |
deot95/Tesis | Proyecto de Grado Ingeniería Electrónica/Workspace/Comparison/Small Linear/ddpg_small.py | 2 | 10811 | import linear_env_small as linear_env
import sim_env
from actor import Actor
from critic import Critic
from replay_buffer import ReplayBuffer
import numpy as np
import tensorflow as tf
import keras.backend as kbck
import json
import time
import argparse
import matplotlib.pylab as plt
import os.path
def ou(x, mu, theta, sigma):
return theta * (mu - x) + sigma * np.random.randn(np.shape(x)[0])
def simulate(control, swmm ,flows):
best_reward = -1*np.inf
BUFFER_SIZE = 100000
BATCH_SIZE = 120
GAMMA = 0.99
TAU = 0.01 #Target Network HyperParameters
LRA = 0.0001 #Learning rate for Actor
LRC = 0.001 #Lerning rate for Critic
action_dim = 4
state_dim = 5
max_steps = 6000
np.random.seed(9501)
EXPLORE = 100000.
episode_count = 1000
done = False
step = 0
epsilon = 1
if swmm:
print("No support")
else:
# Constants for the linear environment
Hs = 1800
A1 = 0.0020
mu1 = 250
sigma1 = 70
A2 = 0.0048
mu2 = 250
sigma2 = 70
dt = 1
x = np.arange(Hs)
d = np.zeros((2,Hs))
if control:
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
kbck.set_session(sess)
# Actor, critic and replay buffer creation
actor = Actor(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA,flows)
critic = Critic(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE)
# Get the linear environment
reward_hist = []
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
A1 += 0.0004*np.random.rand()
mu1 += 50*np.random.rand()
sigma1 += 14*np.random.rand()
A2 += 0.00096*np.random.rand()
mu2 += 50*np.random.rand()
sigma2 += 14*np.random.rand()
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
s_t = np.divide(env.reset(),env.vmax)
total_reward = 0.
for j in range(max_steps):
## Noise addition for exploration
## Ornstein-Uhlenbeck process
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.munet.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0,:] = max(epsilon, 0) * ou(a_t_original[0,:], 0.5 , 1 , 1.5)
#noise_t[0,4:] = max(epsilon, 0) * ou(a_t_original[0,4:], 0.5 , 1 , 1.5)
a_t[0] = a_t_original[0] + noise_t[0]
#Act over the system and get info of the next states
s_t1 , r_t, done, _ = env.step(a_t[0],flows=flows)
s_t1 = np.divide(s_t1,env.vmax)
#Add replay buffer
buff.add(s_t, a_t[0], r_t, s_t1, done)
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
next_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
# Get estimated q-values of the pair (next_state,mu(next_state))
actions_next = actor.target_munet.predict(next_states)
target_q_values = critic.target_qnet.predict([next_states, actions_next])
y_t = np.zeros(np.shape(actions))
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
loss += critic.qnet.train_on_batch([states,actions], y_t)
a_for_grad = actor.munet.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward = total_reward + GAMMA*r_t
s_t = s_t1
if j%100==0:
print("Episode", i, "Step", j, "Reward", r_t, "Loss", loss)
if done:
break
reward_hist.append(total_reward)
np.save("reward_small_history_flows_"+str(flows).lower()+".npy",np.array(reward_hist))
if i%20 == 0:
print("Saving the networks...")
actor.munet.save_weights("./actors_small/anetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
critic.qnet.save_weights("./critics_small/cnetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
if total_reward > best_reward:
print("Saving Best Actor...")
np.save("best_reward"+"_flows_"+str(flows)+".npy",np.array(total_reward))
actor.munet.save_weights("./actors_small/best_anetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
critic.qnet.save_weights("./critics_small/best_cnetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
best_reward = total_reward
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
print("Finish.")
else:
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
resv, resf, resu = env.free_sim()
font_labels = 16
font_legends = 22
ticksize = 16
width = 2.5
f , axarr = plt.subplots(nrows=1, ncols=2,figsize=(14,6),sharex=True )
resv_norm = np.divide(np.transpose(resv),np.matlib.repmat(env.vmax,Hs,1))
resu = np.transpose(np.asarray(resu))
## Plot Volume Results
lines = axarr[0].plot(x,resv_norm[:,:3],linewidth=width)
axarr[0].legend(lines , list(map(lambda x: "v"+str(x+1),range(3))),prop ={'size':font_legends})
axarr[0].set_title("Volumes - Tanks 1 to 3",fontsize=font_labels)
axarr[0].set_xlabel("Times(s)",fontsize=font_labels)
axarr[0].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[0].tick_params(labelsize=ticksize)
lines = axarr[1].plot(x,resv_norm[:,3:],linewidth=width)
axarr[1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=5 else "vT",range(3,5))),prop ={'size':font_legends})
axarr[1].set_title("Volumes - Tank 4 and Storm Tank",fontsize=font_labels)
axarr[1].set_xlabel("Times(s)",fontsize=font_labels)
#axarr[0,1].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[1].tick_params(labelsize=ticksize)
plt.tight_layout()
plt.show()
'''
lines = axarr[1,0].plot(x,resu[:,:2],linewidth=width)
axarr[1,0].legend(lines , list(map(lambda x: "u"+str(x+1),range(2))),prop ={'size':font_legends})
axarr[1,0].set_title("Control Actions - Valves 1 and 2",fontsize=font_labels)
axarr[1,0].set_xlabel("Times(s)",fontsize=font_labels)
axarr[1,0].set_ylabel("% Aperture",fontsize=font_labels)
axarr[1,0].tick_params(labelsize=ticksize)
lines = axarr[1,1].plot(x,resu[:,2:],linewidth=width)
axarr[1,1].legend(lines , list(map(lambda x: "u"+str(x+1),range(2,4))),prop ={'size':font_legends})
axarr[1,1].set_title("Control Actions - Valves 3 and 4",fontsize=font_labels)
axarr[1,1].set_xlabel("Times(s)",fontsize=font_labels)
#axarr[1,1].set_ylabel("% Aperture",fontsize=font_labels)
axarr[1,1].tick_params(labelsize=ticksize)
#sns.despine()
'''
def rainfile():
from math import exp
import numpy as np
from matplotlib import pylab as plt
#Gaussian Extension
A1 = 0.008 + 0.0008*np.random.rand(); mu1 = 500+50*np.random.rand(); sigma1 = 250+25*np.random.rand()
A2 = 0.0063 + 0.00063*np.random.rand() ; mu2 = 500+50*np.random.rand(); sigma2 = 250+25*np.random.rand()
dt = 1
Hs = 1800
x = np.arange(0,Hs,dt)
d = [[],[]]
# dconst = 0.5*mpc_obj.k1*mpc_obj.vmax(1);
d[0] = A1*np.exp((-(x-mu1)**2)/(2*sigma1**2)) # Node 1 - left
d[1] = A2*np.exp((-(x-mu2)**2)/(2*sigma2**2)) # Node 2 - right
def secs_to_hour(secs_convert):
hour = secs_convert//3600
mins = (secs_convert%3600)//60
secs = secs_convert%60
return '{h:02d}:{m:02d}'.format(h=hour,m=mins)
secs_hour_vec = np.vectorize(secs_to_hour)
for k in (1,2):
with open('swmm/runoff%d.dat' % k, 'w') as f:
i = 0
for (t,val) in zip(secs_hour_vec(x), d[k-1]):
if i%60 == 0:
f.write(t+" "+str(val)+"\n")
i += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c","--control", type=int, choices = [0,1], help = "Choose between control(1) or free dynamics(0)")
parser.add_argument("-s","--swmm", type=int, choices = [0,1], help = "Choose between a simulation with swmm(1) or not(0)")
parser.add_argument("-f","--flow", type=int, choices = [0,1], help = "Choose between a simulation with flows(1) or not(0)")
args = parser.parse_args()
if args.flow == 1 and args.swmm == 1:
print("Conflicting option flow 1 and swmm 1")
else:
t0 = time.process_time()
simulate(control=args.control, swmm=args.swmm, flows = args.flow)
tf = time.process_time()
print("Elapsed time: ",tf-t0) | mit |
arjoly/scikit-learn | sklearn/metrics/regression.py | 7 | 17322 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', this behaviour is
deprecated since version 0.17 and will be changed to 'uniform_average'
starting from 0.19.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value which is deprecated since "
"0.17, it will be changed to 'uniform_average' "
"starting from 0.19.",
DeprecationWarning)
multioutput = 'variance_weighted'
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
thorwhalen/ut | ml/sk/decomposition/pca.py | 1 | 6060 |
from sklearn.decomposition.pca import PCA, _infer_dimension_
from ut.ml.sk.utils.validation import weighted_data
# from wpca import WPCA
from ut.ml.sk.decomposition.wpca import WPCA
from numpy import reshape, tile
from numpy import reshape, sqrt, average
import numpy as np
from scipy import linalg
from sklearn.decomposition.pca import PCA
from ut.ml.utils import trailing_underscore_attributes, get_model_attributes
# from scipy.special import gammaln
from sklearn.utils.validation import as_float_array
from sklearn.utils.validation import check_array
# from sklearn.utils.extmath import fast_dot, fast_logdet, randomized_svd
# from sklearn.utils.validation import check_is_fitted
__author__ = 'thor'
class WeightedPCA(WPCA):
"""
Weighted version of sklearn.decomposition.pca.PCA
>>> from sklearn.decomposition.pca import PCA
>>> from ut.ml.sk.decomposition.pca import WeightedPCA
>>> from sklearn.datasets import make_blobs
>>> from ut.ml.sk.preprocessing import WeightedStandardScaler
>>> from numpy import ones, vstack, hstack, random, allclose, sort, abs, isnan
>>> from ut.ml.sk.utils.validation import compare_model_attributes, repeat_rows
>>>
>>> def abs_ratio_close_when_sorted(a, b):
... a = sort(abs(a), axis=0)
... b = sort(abs(b), axis=0)
... t = a / b
... t = t[~isnan(t)]
... return allclose(t, 1.)
>>>
>>> model_1 = PCA()
>>> model_2 = WeightedPCA()
>>>
>>> X, y = make_blobs(100, 5, 4)
>>> w = ones(len(X))
>>> compare_model_attributes(model_1.fit(X), model_2.fit(X), only_attr='components_', \
close_enough_fun=abs_ratio_close_when_sorted)
all fitted attributes were close
>>>
>>> X, y = make_blobs(100, 5, 4)
>>> w = ones(len(X))
>>>
>>> XX = vstack((X, X))
>>> wX = (X, 2 * ones(len(X)))
>>> compare_model_attributes(model_1.fit(X), model_2.fit(X), only_attr='components_', \
close_enough_fun=abs_ratio_close_when_sorted)
all fitted attributes were close
>>>
>>> X, y = make_blobs(100, 5, 4)
>>> w = ones(len(X))
>>>
>>> XX = vstack((X, X[-2:, :], X[-1, :]))
>>> wX = (X, hstack((ones(len(X)-2), [2, 3])))
>>> compare_model_attributes(model_1.fit(XX), model_2.fit(wX), only_attr='components_', \
close_enough_fun=abs_ratio_close_when_sorted)
all fitted attributes were close
>>> w = random.randint(1, 5, len(X))
>>> compare_model_attributes(model_1.fit(repeat_rows(X, w)), model_2.fit((X, w)), only_attr='components_', \
close_enough_fun=abs_ratio_close_when_sorted)
all fitted attributes were close
>>> t = model_1.fit(repeat_rows(X, w))
>>> model_2 = model_2.fit((X, w)).to_pca()
>>> assert allclose(abs(model_1.transform(X) / model_2.transform(X)), 1), "transformation of X not close"
>>> XX = random.rand(3, X.shape[1])
>>> assert allclose(abs(model_1.transform(XX) / model_2.transform(XX)), 1), "transformation of X not close"
"""
def fit(self, X, y=None):
X, w = weighted_data(X)
return super(self.__class__, self).fit(X, weights=tile(reshape(w, (len(w), 1)), X.shape[1]))
def transform(self, X):
X, w = weighted_data(X)
return super(self.__class__, self).transform(X)
def fit_transform(self, X, y=None):
return self.fit(X).transform(X)
def to_pca(self):
pca = PCA(n_components=self.n_components)
for attr in trailing_underscore_attributes(self):
setattr(pca, attr, getattr(self, attr))
return pca
class MyWeightedPCA(PCA):
def _fit(self, X):
X, w = weighted_data(X)
X = check_array(X)
n_samples, n_features = X.shape
n_samples_weighted = sum(w)
X = as_float_array(X, copy=self.copy)
# Center data
# self.mean_ = average(X, axis=0, weights=w)
# X -= self.mean_
U, S, V = linalg.svd((X.T * reshape(sqrt(w), (1, len(X)))).T, full_matrices=True)
explained_variance_ = (S ** 2) / n_samples_weighted
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples_weighted # n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
#
# # Center data
# self.mean_ = average(X, axis=0, weights=w)
# X -= self.mean_
#
# X = (X.T * reshape(sqrt(w), (1, len(X)))).T
# return super(self.__class__, self)._fit(X)
| mit |
marc-sensenich/ansible | hacking/cgroup_perf_recap_graph.py | 54 | 4384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2018, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import argparse
import csv
from collections import namedtuple
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
raise SystemExit('matplotlib is required for this script to work')
Data = namedtuple('Data', ['axis_name', 'dates', 'names', 'values'])
def task_start_ticks(dates, names):
item = None
ret = []
for i, name in enumerate(names):
if name == item:
continue
item = name
ret.append((dates[i], name))
return ret
def create_axis_data(filename, relative=False):
x_base = None if relative else 0
axis_name, dummy = os.path.splitext(os.path.basename(filename))
dates = []
names = []
values = []
with open(filename) as f:
reader = csv.reader(f)
for row in reader:
if x_base is None:
x_base = float(row[0])
dates.append(mdates.epoch2num(float(row[0]) - x_base))
names.append(row[1])
values.append(float(row[3]))
return Data(axis_name, dates, names, values)
def create_graph(data1, data2, width=11.0, height=8.0, filename='out.png', title=None):
fig, ax1 = plt.subplots(figsize=(width, height), dpi=300)
task_ticks = task_start_ticks(data1.dates, data1.names)
ax1.grid(linestyle='dashed', color='lightgray')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%X'))
ax1.plot(data1.dates, data1.values, 'b-')
if title:
ax1.set_title(title)
ax1.set_xlabel('Time')
ax1.set_ylabel(data1.axis_name, color='b')
for item in ax1.get_xticklabels():
item.set_rotation(60)
ax2 = ax1.twiny()
ax2.set_xticks([x[0] for x in task_ticks])
ax2.set_xticklabels([x[1] for x in task_ticks])
ax2.grid(axis='x', linestyle='dashed', color='lightgray')
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 86))
ax2.set_xlabel('Task')
ax2.set_xlim(ax1.get_xlim())
for item in ax2.get_xticklabels():
item.set_rotation(60)
ax3 = ax1.twinx()
ax3.plot(data2.dates, data2.values, 'g-')
ax3.set_ylabel(data2.axis_name, color='g')
fig.tight_layout()
fig.savefig(filename, format='png')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs=2, help='2 CSV files produced by cgroup_perf_recap to graph together')
parser.add_argument('--relative', default=False, action='store_true',
help='Use relative dates instead of absolute')
parser.add_argument('--output', default='out.png', help='output path of PNG file: Default %s(default)s')
parser.add_argument('--width', type=float, default=11.0,
help='Width of output image in inches. Default %(default)s')
parser.add_argument('--height', type=float, default=8.0,
help='Height of output image in inches. Default %(default)s')
parser.add_argument('--title', help='Title for graph')
return parser.parse_args()
def main():
args = parse_args()
data1 = create_axis_data(args.files[0], relative=args.relative)
data2 = create_axis_data(args.files[1], relative=args.relative)
create_graph(data1, data2, width=args.width, height=args.height, filename=args.output, title=args.title)
print('Graph written to %s' % os.path.abspath(args.output))
if __name__ == '__main__':
main()
| gpl-3.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_backend_qt4.py | 2 | 3197 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib import pyplot as plt
from matplotlib._pylab_helpers import Gcf
import matplotlib
import copy
import pytest
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
with matplotlib.rc_context(rc={'backend': 'Qt4Agg'}):
qt_compat = pytest.importorskip('matplotlib.backends.qt_compat')
from matplotlib.backends.backend_qt4 import (
MODIFIER_KEYS, SUPER, ALT, CTRL, SHIFT) # noqa
QtCore = qt_compat.QtCore
_, ControlModifier, ControlKey = MODIFIER_KEYS[CTRL]
_, AltModifier, AltKey = MODIFIER_KEYS[ALT]
_, SuperModifier, SuperKey = MODIFIER_KEYS[SUPER]
_, ShiftModifier, ShiftKey = MODIFIER_KEYS[SHIFT]
try:
py_qt_ver = int(QtCore.PYQT_VERSION_STR.split('.')[0])
except AttributeError:
py_qt_ver = QtCore.__version_info__[0]
if py_qt_ver != 4:
pytestmark = pytest.mark.xfail(reason='Qt4 is not available')
@pytest.mark.backend('Qt4Agg')
def test_fig_close():
# save the state of Gcf.figs
init_figs = copy.copy(Gcf.figs)
# make a figure using pyplot interface
fig = plt.figure()
# simulate user clicking the close button by reaching in
# and calling close on the underlying Qt object
fig.canvas.manager.window.close()
# assert that we have removed the reference to the FigureManager
# that got added by plt.figure()
assert init_figs == Gcf.figs
@pytest.mark.parametrize(
'qt_key, qt_mods, answer',
[
(QtCore.Qt.Key_A, ShiftModifier, 'A'),
(QtCore.Qt.Key_A, QtCore.Qt.NoModifier, 'a'),
(QtCore.Qt.Key_A, ControlModifier, 'ctrl+a'),
(QtCore.Qt.Key_Aacute, ShiftModifier,
'\N{LATIN CAPITAL LETTER A WITH ACUTE}'),
(QtCore.Qt.Key_Aacute, QtCore.Qt.NoModifier,
'\N{LATIN SMALL LETTER A WITH ACUTE}'),
(ControlKey, AltModifier, 'alt+control'),
(AltKey, ControlModifier, 'ctrl+alt'),
(QtCore.Qt.Key_Aacute, (ControlModifier | AltModifier | SuperModifier),
'ctrl+alt+super+\N{LATIN SMALL LETTER A WITH ACUTE}'),
(QtCore.Qt.Key_Backspace, QtCore.Qt.NoModifier, 'backspace'),
(QtCore.Qt.Key_Backspace, ControlModifier, 'ctrl+backspace'),
(QtCore.Qt.Key_Play, QtCore.Qt.NoModifier, None),
],
ids=[
'shift',
'lower',
'control',
'unicode_upper',
'unicode_lower',
'alt_control',
'control_alt',
'modifier_order',
'backspace',
'backspace_mod',
'non_unicode_key',
]
)
@pytest.mark.backend('Qt4Agg')
def test_correct_key(qt_key, qt_mods, answer):
"""
Make a figure
Send a key_press_event event (using non-public, qt4 backend specific api)
Catch the event
Assert sent and caught keys are the same
"""
qt_canvas = plt.figure().canvas
event = mock.Mock()
event.isAutoRepeat.return_value = False
event.key.return_value = qt_key
event.modifiers.return_value = qt_mods
def receive(event):
assert event.key == answer
qt_canvas.mpl_connect('key_press_event', receive)
qt_canvas.keyPressEvent(event)
| mit |
silky/sms-tools | lectures/05-Sinusoidal-model/plots-code/spec-sine-synthesis-lobe.py | 24 | 2626 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
M = 256
N = 256
hN = N/2
hM = int(M/2.0)
fs = 44100
f0 = 5000.0
A0 = 1
ph = 1.5
t = np.arange(-hM,hM)/float(fs)
x = A0 * np.cos(2*np.pi*f0*t+ph)
w = hamming(M)
xw = x*w
fftbuffer = np.zeros(N)
fftbuffer[0:M] = xw
X = fft(fftbuffer)
mX = abs(X)
pX = np.angle(X[0:hN])
powerX = sum(2*mX[0:hN]**2)/N
mask = np.zeros(N/2)
mask[int(N*f0/fs-2*N/float(M)):int(N*f0/fs+3*N/float(M))] = 1.0
mY = mask*mX[0:hN]
powerY = sum(2*mY[0:hN]**2)/N
Y = np.zeros(N, dtype = complex)
Y[:hN] = mY * np.exp(1j*pX)
Y[hN+1:] = mY[:0:-1] * np.exp(-1j*pX[:0:-1])
y = ifft(Y)
SNR1 = -10*np.log10((powerX-powerY)/(powerX))
freqaxis = fs*np.arange(0,N/2)/float(N)
taxis = np.arange(N)/float(fs)
plt.figure(1, figsize=(9, 6))
plt.subplot(3,2,1)
plt.plot(20*np.log10(mY[:hN])-max(20*np.log10(mY[:hN])), 'r', lw=1.5)
plt.title ('mX, mY (main lobe); Hamming')
plt.plot(20*np.log10(mX[:hN])-max(20*np.log10(mX[:hN])), 'r', lw=1.5, alpha=.2)
plt.axis([0,hN,-120,0])
plt.subplot(3,2,3)
plt.plot(y[0:M], 'b', lw=1.5)
plt.axis([0,M,-1,1])
plt.title ('y (synthesis of main lobe)')
plt.subplot(3,2,5)
yerror = xw - y
plt.plot(yerror, 'k', lw=1.5)
plt.axis([0,M,-.003,.003])
plt.title ("error function: x-y; SNR = ${%d}$ dB" %(SNR1))
w = blackmanharris(M)
xw = x*w
fftbuffer = np.zeros(N)
fftbuffer[0:M] = xw
X = fft(fftbuffer)
mX = abs(X)
pX = np.angle(X[0:hN])
powerX = sum(2*mX[0:hN]**2)/N
mask = np.zeros(N/2)
mask[int(N*f0/fs-4*N/float(M)):int(N*f0/fs+5*N/float(M))] = 1.0
mY = mask*mX[0:hN]
powerY = sum(2*mY[0:hN]**2)/N
Y = np.zeros(N, dtype = complex)
Y[:hN] = mY * np.exp(1j*pX)
Y[hN+1:] = mY[:0:-1] * np.exp(-1j*pX[:0:-1])
y = ifft(Y)
SNR2 = -10*np.log10((powerX-powerY)/(powerX))
plt.subplot(3,2,2)
plt.plot(20*np.log10(mY[:hN])-max(20*np.log10(mY[:hN])), 'r', lw=1.5)
plt.title ('mX, mY (main lobe); Blackman Harris')
plt.plot(20*np.log10(mX[:hN])-max(20*np.log10(mX[:hN])), 'r', lw=1.5, alpha=.2)
plt.axis([0,hN,-120,0])
plt.subplot(3,2,4)
plt.plot(y[0:M], 'b', lw=1.5)
plt.axis([0,M,-1,1])
plt.title ('y (synthesis of main lobe)')
plt.subplot(3,2,6)
yerror2 = xw - y
plt.plot(yerror2, 'k', lw=1.5)
plt.axis([0,M,-.003,.003])
plt.title ("error function: x-y; SNR = ${%d}$ dB" %(SNR2))
plt.tight_layout()
plt.savefig('spec-sine-synthesis-lobe.png')
plt.show()
| agpl-3.0 |
russel1237/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
ApolloAuto/apollo | modules/tools/plot_planning/plot_acc_jerk.py | 3 | 2593 | #!/usr/bin/env python3
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
import sys
import matplotlib.pyplot as plt
import numpy as np
from os import listdir
from os.path import isfile, join
from modules.tools.plot_planning.record_reader import RecordItemReader
from modules.tools.plot_planning.imu_speed_jerk import ImuSpeedJerk
from modules.tools.plot_planning.imu_speed_acc import ImuSpeedAcc
def grid(data_list, shift):
data_grid = []
for data in data_list:
data_grid.append(round(data) + shift/10.0)
return data_grid
if __name__ == "__main__":
folders = sys.argv[1:]
fig, ax = plt.subplots(1, 1)
colors = ["g", "b", "r", "m", "y"]
markers = [".", ".", ".", "."]
for i in range(len(folders)):
x = []
y = []
folder = folders[i]
color = colors[i % len(colors)]
marker = markers[i % len(markers)]
fns = [f for f in listdir(folder) if isfile(join(folder, f))]
fns.sort()
for fn in fns:
reader = RecordItemReader(folder+"/"+fn)
jerk_processor = ImuSpeedJerk()
acc_processor = ImuSpeedAcc()
topics = ["/apollo/localization/pose"]
for data in reader.read(topics):
if "pose" in data:
pose_data = data["pose"]
acc_processor.add(pose_data)
jerk_processor.add(pose_data)
data_x = grid(acc_processor.get_acc_list(), i + 1)
data_y = grid(jerk_processor.get_jerk_list(), i + 1)
data_x = data_x[-1 * len(data_y):]
x.extend(data_x)
y.extend(data_y)
if len(x) <= 0:
continue
ax.scatter(x, y, c=color, marker=marker, alpha=0.4)
#ax.plot(x, y, c=color, alpha=0.4)
ax.set_xlabel('Acc')
ax.set_ylabel('Jerk')
plt.show()
| apache-2.0 |
kjung/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
yonglehou/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
mikebenfield/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
charterscruz/auto-encoder-tests | visualization/layer_checker.py | 1 | 5035 | #!/usr/bin/env python
# This script is used to visualize the layers that are learned
from keras import backend as K
import os
import cv2
import numpy as np
import sys
import glob
from utils import load_data_rgb, display_results, normalize_data, display_results_RGB, enumerate2
from keras.models import load_model
from visualization_utils import nice_imshow, make_mosaic
import pylab as pl
import matplotlib.cm as cm
# import numpy.ma as ma
# utility functions
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# import pylab as pl
# import matplotlib.cm as cm
# import numpy.ma as ma
# # utility functions
# from mpl_toolkits.axes_grid1 import make_axes_locatable
#
# def nice_imshow(ax, data, vmin=None, vmax=None, cmap=None):
# """Wrapper around pl.imshow"""
#
# if cmap is None:
# cmap = cm.jet
# if vmin is None:
# vmin = data.min()
# if vmax is None:
# vmax = data.max()
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="5%", pad=0.05)
# im = ax.imshow(data, vmin=vmin, vmax=vmax, interpolation='nearest', cmap=cmap)
# pl.colorbar(im, cax=cax)
#
#
#
# def make_mosaic(imgs, nrows, ncols, border=1):
# """
# Given a set of images with all the same shape, makes a
# mosaic with nrows and ncols
# """
# # nimgs = imgs.shape[2]
# # imshape = imgs.shape[1:]
# nimgs = imgs.shape[2]
# imshape = imgs.shape[:2]
#
# mosaic = ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,
# ncols * imshape[1] + (ncols - 1) * border),
# dtype=np.float32)
#
# paddedh = imshape[0] + border
# paddedw = imshape[1] + border
# for i in xrange(nimgs):
# row = int(np.floor(i / ncols))
# col = i % ncols
#
# mosaic[row * paddedh:row * paddedh + imshape[0],
# col * paddedw:col * paddedw + imshape[1]] = imgs[:, :, i]
# return mosaic
starting_dir = os.getcwd()
img_size = 200
# master_folder = '/home/gcx/lstm_sequences/autoencoder-27-simple/'
# master_folder = '/home/gcx/lstm_sequences/autoencoder-100-improved/'
master_folder = '/home/gcx/lstm_sequences/autoencoder-200/'
# master_folder = '/home/gcx/lstm_sequences/anomaly_dataset/'
model_to_load = glob.glob('models/vgg/rect_vs_circles/*.h5')
if len(model_to_load)>1:
sys.exit('Multiple files in folder. Do not know what to choose. Exiting!')
print('loading model: ', model_to_load)
best_model = load_model(model_to_load[0])
print(best_model.summary())
print(' \n')
# W = best_model.layers[01].W.get_value(borrow=True)
# W = best_model.layers[01].kernel.get_value()
weights = best_model.layers[01].get_weights()[0][:,:,0,:]
print("W shape : ", weights.shape)
pl.figure(figsize=(15, 15))
current_name ='conv1_1 weights'
pl.title(current_name)
nice_imshow(pl.gca(), make_mosaic(weights, 8, 8), cmap=cm.binary)
pl.savefig(current_name + '.png')
weights = best_model.layers[02].get_weights()[0][:,:,0,:]
print("W shape : ", weights.shape)
pl.figure(figsize=(15, 15))
current_name ='conv1_2 weights'
pl.title(current_name)
nice_imshow(pl.gca(), make_mosaic(weights, 8, 8), cmap=cm.binary)
pl.savefig(current_name + '.png')
weights = best_model.layers[05].get_weights()[0][:,:,0,:]
print("W shape : ", weights.shape)
pl.figure(figsize=(15, 15))
current_name = 'conv2_1 weights'
pl.title(current_name)
nice_imshow(pl.gca(), make_mosaic(weights, int(np.ceil(np.sqrt(weights.shape[2]))), int(np.ceil(np.sqrt(weights.shape[2])))), cmap=cm.binary)
pl.savefig(current_name + '.png')
weights = best_model.layers[06].get_weights()[0][:, :, 0, :]
print("W shape : ", weights.shape)
pl.figure(figsize=(15, 15))
current_name = 'conv2_2 weights'
pl.title(current_name)
nice_imshow(pl.gca(), make_mosaic(weights, int(np.ceil(np.sqrt(weights.shape[2]))), int(np.ceil(np.sqrt(weights.shape[2])))), cmap=cm.binary)
pl.savefig(current_name + '.png')
weights = best_model.layers[9].get_weights()[0][:, :, 0, :]
print("W shape : ", weights.shape)
pl.figure(figsize=(15, 15))
current_name = 'conv3_1 weights'
pl.title(current_name)
nice_imshow(pl.gca(), make_mosaic(weights, int(np.ceil(np.sqrt(weights.shape[2]))), int(np.ceil(np.sqrt(weights.shape[2])))), cmap=cm.binary)
pl.savefig(current_name + '.png')
weights = best_model.layers[10].get_weights()[0][:, :, 0, :]
print("W shape : ", weights.shape)
pl.figure(figsize=(15, 15))
current_name = 'conv3_2 weights'
pl.title(current_name)
nice_imshow(pl.gca(), make_mosaic(weights, int(np.ceil(np.sqrt(weights.shape[2]))), int(np.ceil(np.sqrt(weights.shape[2])))), cmap=cm.binary)
pl.savefig(current_name + '.png')
weights = best_model.layers[11].get_weights()[0][:, :, 0, :]
print("W shape : ", weights.shape)
pl.figure(figsize=(15, 15))
current_name = 'conv3_3 weights'
pl.title(current_name)
nice_imshow(pl.gca(), make_mosaic(weights, int(np.ceil(np.sqrt(weights.shape[2]))), int(np.ceil(np.sqrt(weights.shape[2])))), cmap=cm.binary)
pl.savefig(current_name + '.png')
pl.show()
print('finished script!')
| mit |
elizabethswann/RR_fitter | codes/plot_corner_walkers_fullshape.py | 1 | 5653 | #Code to plot the corner plots and walker plots of the fullshape bayesian SSP fitting code
#that assumes a Calzetti dust model
###################################################################
#Import modules
import matplotlib
matplotlib.use('agg')
import corner
import matplotlib.pyplot as plt
import cPickle
import numpy as np
from math import floor,ceil
import re
import os
import linecache
import sys
import logging
###################################################################
logging.getLogger().setLevel(logging.ERROR)
###################################################################
#Load in input parameters from input file
if len(sys.argv)<3:
print 'An input file and setup file are needed as arguments'
print 'Input file path must be entered before setup file path'
print 'Example: python run_fullshape_fitter.py /path/to/input_file.txt /path/to/setup_file.txt'
sys.exit(0)
input_file=sys.argv[1]
setup_file=sys.argv[2]
if not os.path.isfile(input_file) or not os.path.isfile(setup_file):
print 'Your input file or setup file does not exist. Please provide a valid file path.'
sys.exit(0)
directory=str(re.search(r"\'\s*(.*?)\s*\'",linecache.getline(input_file,10)).group(1))
datadir=directory+'data/'
savedir=directory+'saved_data/'
imagedir=directory+'images/'
num_gals=boxSize=int(re.search(r"\'\s*(.*?)\s*\'",linecache.getline(input_file,2)).group(1))
for index in range(num_gals):
name_file=np.loadtxt(input_file,unpack=True,skiprows=11,dtype=str)[index]
name_folder=os.path.splitext(name_file)[0]+'/'
burn_in_fullshape=int(re.search(r"\'\s*(.*?)\s*\'",linecache.getline(setup_file,7)).group(1))
if not os.path.exists(imagedir+name_folder):
os.makedirs(imagedir+name_folder)
###################################################################
#Load in the full chain from the bayesian fullshape fit
sampler_chain=cPickle.load(
open(savedir+name_folder+'sampler_chain_'+name_folder[:-1]+'_fullshape_correct_dust.pkl','rb')
)
#Load in the log liklihood for the bayesian fullshape fit
log_liklihoods=cPickle.load(
open(savedir+name_folder+'sampler_logliklihoods_'+name_folder[:-1]+'_fullshape_correct_dust.pkl','rb')
)
#Load in the factor from before used to renormalise the input spectrum so
#the bayes fiter had an easier time
factor=cPickle.load(
open(savedir+name_folder+'logfactor_'+name_folder[:-1]+'_fullshape_correct_dust.pkl','rb')
)
###################################################################
#Calculate the best fit to the fullshape solution
index_max_liklihood=np.unravel_index(log_liklihoods.argmax(),log_liklihoods.shape)
best_fit_params=np.exp(sampler_chain[index_max_liklihood[0],index_max_liklihood[1],1:])
calzetti_dust=sampler_chain[index_max_liklihood[0],index_max_liklihood[1],1:]
ndim=len(sampler_chain[0,0,:].T)
###################################################################
#Plot the walkers positions as a function fo step (log space)
plt.figure(1, figsize=(12, 16))
counter=0
while counter<len(sampler_chain[0,0,:].T):
for i in range(len(sampler_chain[0,0,:].T)):
plt.subplot2grid((int(ceil(float(len(sampler_chain[0,0,:].T))/2.)), 2),
(int(floor(counter/2.)), int(counter%2)))
plt.plot(sampler_chain[:,:,i].T, alpha=0.05, color='k')
plt.ylabel(r'$t_{'+str(i+1)+'}$')
plt.xlabel('step')
plt.ylabel(r'ln(f$_{t_{'+str(i+1)+'}}$)')
counter+=1
plt.tight_layout()
plt.savefig(imagedir+name_folder+'walker_log_fullshape'+name_folder[:-1]+'_fullshape_calzetti_dust.pdf')
###################################################################
#Calculate the real space solutions to the fits
exp_sampler_chain=np.copy(sampler_chain)
#exponentiate all but the dust value which shouldn't be exponentiated
exp_sampler_chain[:,:,1:]=np.exp(exp_sampler_chain[:,:,1:])*factor
###################################################################
#Plot the walkers positions as a function fo step (real space)
plt.figure(1, figsize=(12, 16))
counter=0
while counter<len(exp_sampler_chain[0,0,:].T):
for i in range(len(exp_sampler_chain[0,0,:].T)):
plt.subplot2grid((int(ceil(float(len(exp_sampler_chain[0,0,:].T))/2.)), 2),
(int(floor(counter/2.)), int(counter%2)))
plt.plot(exp_sampler_chain[:, :, i].T, alpha=0.05, color='k')
plt.ylabel(r'$t_{'+str(i+1)+'}$')
plt.xlabel('step')
plt.ylabel(r'f$_{t_{'+str(i+1)+'}}$')
counter+=1
plt.tight_layout()
plt.savefig(imagedir+name_folder+'walker_normal_fullshape'+name_folder[:-1]+'_fullshape_calzetti_dust.pdf')
###################################################################
#Plot triangle plot for the fullshape solution
samples = sampler_chain[:,burn_in_fullshape:,:].reshape((-1, ndim)) #500
for i in range(len(sampler_chain[:,0,0])):
for j in range(len(sampler_chain[0,:,0])):
sampler_chain[i,j,1:]=sampler_chain[i,j,1:]/np.sum(sampler_chain[i,j,1:])
norm_samples=exp_sampler_chain[:,100:,:].reshape((-1,ndim))
labels=['$t_{1}$','$t_{2}$','$t_{3}$','$t_{4}$','$t_{5}$','$t_{6}$','$t_{7}$',
'$t_{8}$','$t_{9}$','$t_{10}$','$t_{11}$','$t_{12}$','$t_{13}$','$t_{14}$',
'$t_{15}$','$t_{16}$']
###################################################################
#Plot the corner plots for the fullshape solutions
fig = corner.corner(samples,labels=labels,quantiles=[0.16,0.5,0.84],show_titles=True)
fig.savefig(imagedir+name_folder+'triangle_log_fullshape'+name_folder[:-1]+'_fullshape_calzetti_dust.pdf')
fig = corner.corner(norm_samples,labels=labels,quantiles=[0.16,0.5,0.84],show_titles=True)
fig.savefig(imagedir+name_folder+'triangle_normal_fullshape'+name_folder[:-1]+'_fullshape_calzetti_dust.pdf')
| mit |
pli1988/portfolioFactory | portfolioFactory/utils/utils.py | 1 | 3534 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 8 22:09:49 2014
Author: Peter Li and Israel
"""
import numpy as np
from . import customExceptions
from .customExceptions import *
import pandas as pd
def processData(data):
""" Function to process timeseries data
processData performs 2 steps:
- check if data is numeric, monthly with no missing values (NaN in the middle)
- drop leading and trailng NaN
Args:
- data (Pandas time series): monthly timeseries
Returns:
Trimmed time series
"""
# check if there are any NaN values in the middle
tempData = data.dropna()
# check if monthly
if checkSeqentialMonthly(tempData.index):
# check values are numeric
if all([isinstance(x,(float, int, long)) for x in tempData.values]):
return tempData
else:
raise customExceptions.badData('non-numeric data found')
else:
raise customExceptions.badData('missing data found')
def checkSeqentialMonthly(index):
""" Function to check if dates for data timeseries are sequential
Args:
- data (Pandas time series): timeseries index
Returns:
True / False
"""
# Array of Months and Years
months = index.month
years = index.year
# Difference in months % 12 -- this value should always be 1
monthsDiff = np.mod(months[1:]-months[0:-1],12)
# If months are sequential
if all(monthsDiff == 1):
yearsDiff = years[1:] - years[0:-1]
ix = np.where(yearsDiff == 1)
# If years are sequential
if all(months[ix] == 12):
return True
else:
return False
else:
return False
def setParameters(configPath):
""" Function to read config file
Note:
configPath is assumed to be a .txt file with (at least) the following fields:
- name : a name/description for the strategy
- signalPath: signal data location
- rule: the cutoff point for selecting investment (positive/negative int-->pick top/bottom S investments)
- window: time-span between rebalancing
Args:
configPath (str): location of config file
Returns:
A dict with {key = parameter name: value = parameter value}
"""
# Load Parameters Data
try:
parameters = pd.read_table(configPath , sep = '=', index_col = 0, header = None)
except IOError:
raise invalidParameterPath(configPath)
parameters.columns = ['values']
# Strip spaces
parameters = parameters.astype('string')
parameters.index = parameters.index.map(str.strip)
parameters = parameters['values'].map(str.strip)
return parameters.to_dict()
def calcRollingReturns(df,window):
''' Function to calculate window-size rolling returns
Note: assumes returns are in decimal form (ex. 0.02 represents 2%)
Arguments:
- df (dataframe) : returns matrix (tickers as columns)
- window (int) : specifies size of rolling window
Returns
- pandas dataframe with rolling returns
'''
return (pd.rolling_apply(1+df,window=window,func=np.prod,min_periods=window) - 1)
| mit |
marcusrehm/serenata-de-amor | rosie/rosie/core/__init__.py | 1 | 2667 | import os.path
import numpy as np
from sklearn.externals import joblib
class Core:
"""
This is Rosie's core object: it implements a generic pipeline to collect
data, clean and normalize it, analyzies the data and output a dataset with
suspicions. It's initialization module takes a settings module and an
adapter.
The settings module should have three constants:
* CLASSIFIERS (dict) with pairs of human readable name (snake case) for
each classifier and the object (class) of the classifiers.
* UNIQUE_IDS (str or iterable) with the column(s) that should be taken as
unique identifiers if the main dataset of each module.
* VALUE (str) with the column that should be taken as the total net value
of the transaction represented by each row of the datset.
The adapter should be an object with:
* A `dataset` property with the main dataset to be analyzed;
* A `path` property with the path to the datasets (where the output will be
saved).
"""
def __init__(self, settings, adapter):
self.settings = settings
self.dataset = adapter.dataset
self.data_path = adapter.path
if self.settings.UNIQUE_IDS:
self.suspicions = self.dataset[self.settings.UNIQUE_IDS].copy()
else:
self.suspicions = self.dataset.copy()
def __call__(self):
for name, classifier in self.settings.CLASSIFIERS.items():
model = self.load_trained_model(classifier)
self.predict(model, name)
output = os.path.join(self.data_path, 'suspicions.xz')
kwargs = dict(compression='xz', encoding='utf-8', index=False)
self.suspicions.to_csv(output, **kwargs)
def load_trained_model(self, classifier):
filename = '{}.pkl'.format(classifier.__name__.lower())
path = os.path.join(self.data_path, filename)
# palliative: this outputs a model too large for joblib
if classifier.__name__ == 'MonthlySubquotaLimitClassifier':
model = classifier()
model.fit(self.dataset)
else:
if os.path.isfile(path):
model = joblib.load(path)
else:
model = classifier()
model.fit(self.dataset)
joblib.dump(model, path)
return model
def predict(self, model, name):
model.transform(self.dataset)
prediction = model.predict(self.dataset)
self.suspicions[name] = prediction
if prediction.dtype == np.int:
self.suspicions.loc[prediction == 1, name] = False
self.suspicions.loc[prediction == -1, name] = True
| mit |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/preprocessing/tests/test_data.py | 12 | 75601 | # Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import QuantileTransformer
from sklearn.preprocessing.data import quantile_transform
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(
[u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.nan, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
assert_raises_regex(ValueError, "Invalid value for 'n_quantiles': 0.",
QuantileTransformer(n_quantiles=0).fit, X)
assert_raises_regex(ValueError, "Invalid value for 'subsample': 0.",
QuantileTransformer(subsample=0).fit, X)
assert_raises_regex(ValueError, "The number of quantiles cannot be"
" greater than the number of samples used. Got"
" 1000 quantiles and 10 samples.",
QuantileTransformer(subsample=10).fit, X)
transformer = QuantileTransformer(n_quantiles=10)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.fit, X_neg)
transformer.fit(X)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.transform, X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.transform, X_bad_feat)
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.inverse_transform, X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.fit, X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.transform, X)
# check that an error is raised at inverse_transform time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.inverse_transform, X_tran)
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
assert_warns_message(UserWarning, "'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.", transformer.fit, X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert_true(inf_norm < 1e-2)
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
# sparse support
# TODO: rng should be seeded once we drop support for older versions of
# scipy (< 0.13) that don't support seeding.
X = sparse.rand(n_samples, 1, density=.99, format='csc')
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert_true(inf_norm < 1e-1)
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert_equal(transformer.transform(-10), transformer.transform(np.min(X)))
assert_equal(transformer.transform(10), transformer.transform(np.max(X)))
assert_equal(transformer.inverse_transform(-10),
transformer.inverse_transform(
np.min(transformer.references_)))
assert_equal(transformer.inverse_transform(10),
transformer.inverse_transform(
np.max(transformer.references_)))
def test_quantile_transform_and_inverse():
# iris dataset
X = iris.data
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
def test_quantile_transform_valid_axis():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
assert_raises_regex(ValueError, "axis should be either equal to 0 or 1"
". Got axis=2", quantile_transform, X.T, axis=2)
| mit |
EuropeanSocialInnovationDatabase/ESID-main | TextMining/Classifiers/Trainers/ANN_Trainer_Actors_exp.py | 1 | 27012 | import nltk
from os import listdir
from os.path import isfile, join,isdir
import csv
import re
import sklearn.metrics
from keras.callbacks import EarlyStopping
from keras import Input
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.metrics import precision_recall_fscore_support
from sklearn.model_selection import KFold
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv1D, MaxPooling1D, Flatten,Conv2D
from keras.preprocessing.text import Tokenizer
from keras.layers import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import gensim
import os
import numpy as np
import time
from keras import backend as K
def mcor(y_true, y_pred):
# matthews_correlation
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall))
class DataSet:
Annotators = []
def __init__(self):
self.Annotators = []
class Annotator:
files = []
documents = []
Name = ""
def __init__(self):
self.files = []
self.documents = []
self.Name = ""
class Document:
Lines = []
DocumentName = ""
DatabaseID = ""
Annotations = []
Text = ""
isSpam = False
Project_Mark_Objective_1A = 0
Project_Mark_Objective_1B = 0
Project_Mark_Objective_1C = 0
Project_Mark_Actors_2A = 0
Project_Mark_Actors_2B = 0
Project_Mark_Actors_2C = 0
Project_Mark_Outputs_3A = 0
Project_Mark_Innovativeness_3A = 0
isProjectObjectiveSatisfied = False
isProjectActorSatisfied = False
isProjectOutputSatisfied = False
isProjectInnovativenessSatisfied = False
isProjectObjectiveSatisfied_predicted = False
isProjectActorSatisfied_predicted = False
isProjectOutputSatisfied_predicted = False
isProjectInnovativenessSatisfied_predicted = False
def __init__(self):
self.Text = ""
self.Lines = []
self.DocumentName = ""
self.DatabaseID = ""
self.Annotations = []
self.isSpam = False
self.Project_Mark_Objective_1A = 0
self.Project_Mark_Objective_1B = 0
self.Project_Mark_Objective_1C = 0
self.Project_Mark_Actors_2A = 0
self.Project_Mark_Actors_2B = 0
self.Project_Mark_Actors_2C = 0
self.Project_Mark_Outputs_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.isProjectObjectiveSatisfied = False
self.isProjectActorSatisfied = False
self.isProjectOutputSatisfied = False
self.isProjectInnovativenessSatisfied = False
self.isProjectObjectiveSatisfied_predicted = False
self.isProjectActorSatisfied_predicted = False
self.isProjectOutputSatisfied_predicted = False
self.isProjectInnovativenessSatisfied_predicted = False
class Line:
StartSpan = 0
EndSpan = 0
Text = ""
Sentences = []
Tokens = []
Annotations = []
def __init__(self):
self.StartSpan = 0
self.EndSpan = 0
self.Text = ""
self.Sentences = []
self.Tokens = []
self.Annotations = []
class Sentence:
SentenceText = ""
StartSpan = -1
EndSpan = -1
Annotations = []
def __init__(self):
self.SentenceText = ""
self.StartSpan = -1
self.EndSpan = -1
self.Annotations = []
class Annotation:
FromFile = ""
FromAnnotator = ""
AnnotationText = ""
StartSpan = -1
EndSpan = -1
HighLevelClass = ""
LowLevelClass = ""
def build_tensor(data):
for d in data:
print d
if __name__ == '__main__':
os.environ['PYTHONHASHSEED'] = '4'
np.random.seed(523)
max_words = 400000
batch_size = 32
epochs =50
GLOVE_DIR = "../../../Helpers/BratDataProcessing/Glove_dir"
MAX_SEQUENCE_LENGTH = 20000
EMBEDDING_DIM = 300
data_folder = "../../../Helpers/FullDataset_Alina"
ds = DataSet()
total_num_spam = 0
sentences = []
total_num_files = 0
#job = aetros.backend.start_job('nikolamilosevic86/GloveModel')
annotators = [f for f in listdir(data_folder) if isdir(join(data_folder, f))]
for ann in annotators:
folder = data_folder+"/"+ann
Annot = Annotator()
Annot.Name = ann
ds.Annotators.append(Annot)
onlyfiles = [f for f in listdir(folder) if (f.endswith(".txt"))]
for file in onlyfiles:
Annot.files.append(data_folder+"/"+ann+'/'+file)
doc = Document()
total_num_files = total_num_files + 1
doc.Lines = []
#doc.Annotations = []
doc.DocumentName= file
Annot.documents.append(doc)
if(file.startswith('a') or file.startswith('t')):
continue
print file
doc.DatabaseID = file.split("_")[1].split(".")[0]
fl = open(data_folder+"/"+ann+'/'+file,'r')
content = fl.read()
doc.Text = content
lines = content.split('\n')
line_index = 0
for line in lines:
l = Line()
l.StartSpan = line_index
l.EndSpan = line_index+len(line)
l.Text = line
line_index = line_index+len(line)+1
sentences.append(line)
doc.Lines.append(l)
an = open(data_folder+"/"+ann+'/'+file.replace(".txt",".ann"),'r')
annotations = an.readlines()
for a in annotations:
a = re.sub(r'\d+;\d+','',a).replace(' ',' ')
split_ann = a.split('\t')
if (split_ann[0].startswith("T")):
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
low_level_ann = sp_split_ann[0]
if low_level_ann=="ProjectMark":
continue
span_start = sp_split_ann[1]
span_end = sp_split_ann[2]
ann_text = split_ann[2]
Ann = Annotation()
Ann.AnnotationText = ann_text
Ann.StartSpan = int(span_start)
Ann.EndSpan = int(span_end)
Ann.FromAnnotator = Annot.Name
Ann.FromFile = file
Ann.LowLevelClass = low_level_ann
if(low_level_ann == "SL_Outputs_3a"):
Ann.HighLevelClass = "Outputs"
if (low_level_ann == "SL_Objective_1a" or low_level_ann == "SL_Objective_1b" or low_level_ann == "SL_Objective_1c"):
Ann.HighLevelClass = "Objectives"
if (low_level_ann == "SL_Actors_2a" or low_level_ann == "SL_Actors_2b" or low_level_ann == "SL_Actors_2c"):
Ann.HighLevelClass = "Actors"
if (low_level_ann == "SL_Innovativeness_4a"):
Ann.HighLevelClass = "Innovativeness"
doc.Annotations.append(Ann)
for line in doc.Lines:
if line.StartSpan<=Ann.StartSpan and line.EndSpan>=Ann.EndSpan:
line.Annotations.append(Ann)
else:
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
mark_name = sp_split_ann[0]
if (len(sp_split_ann)<=2):
continue
mark = sp_split_ann[2].replace('\n','')
if(mark_name=="DL_Outputs_3a"):
doc.Project_Mark_Outputs_3A = int(mark)
if int(mark)>=2:
doc.isProjectOutputSatisfied = True
if (mark_name == "DL_Objective_1a"):
doc.Project_Mark_Objective_1A = int(mark)
if int(mark)>=2:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1b"):
doc.Project_Mark_Objective_1B = int(mark)
if int(mark)>=2:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1c"):
doc.Project_Mark_Objective_1C = int(mark)
if int(mark)>=2:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Innovativeness_4a"):
doc.Project_Mark_Innovativeness_3A = int(mark)
if int(mark)>=2:
doc.isProjectInnovativenessSatisfied = True
if (mark_name == "DL_Actors_2a"):
doc.Project_Mark_Actors_2A = int(mark)
if int(mark)>=2:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2b"):
doc.Project_Mark_Actors_2B = int(mark)
if int(mark)>=2:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2c"):
doc.Project_Mark_Actors_2C = int(mark)
if int(mark)>=2:
doc.isProjectActorSatisfied = True
if(doc.Project_Mark_Objective_1A==0 and doc.Project_Mark_Objective_1B == 0 and doc.Project_Mark_Objective_1C==0 and doc.Project_Mark_Actors_2A==0
and doc.Project_Mark_Actors_2B==0 and doc.Project_Mark_Actors_2B==0 and doc.Project_Mark_Actors_2C==0 and doc.Project_Mark_Outputs_3A == 0
and doc.Project_Mark_Innovativeness_3A==0):
doc.isSpam = True
total_num_spam = total_num_spam + 1
with open('annotations.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='\"', quoting=csv.QUOTE_MINIMAL)
for ann in ds.Annotators:
for doc in ann.documents:
for annot in doc.Annotations:
spamwriter.writerow([annot.FromFile,annot.FromAnnotator,annot.AnnotationText,annot.LowLevelClass,annot.HighLevelClass,annot.StartSpan,annot.EndSpan])
i = 0
j = i+1
kappa_files = 0
done_documents = []
num_overlap_spam = 0
num_spam = 0
total_objectives = 0
total_outputs = 0
total_actors = 0
total_innovativeness = 0
ann1_annotations_objectives = []
ann2_annotations_objectives = []
ann1_annotations_actors = []
ann2_annotations_actors = []
ann1_annotations_outputs = []
ann2_annotations_outputs = []
ann1_annotations_innovativeness = []
ann2_annotations_innovativeness = []
match_objectives = 0
match_outputs = 0
match_actors = 0
match_innovativeness = 0
while i<len(ds.Annotators)-1:
while j<len(ds.Annotators):
annotator1 = ds.Annotators[i]
annotator2 = ds.Annotators[j]
for doc1 in annotator1.documents:
for doc2 in annotator2.documents:
if doc1.DocumentName == doc2.DocumentName and doc1.DocumentName not in done_documents:
done_documents.append(doc1.DocumentName)
line_num = 0
ann1_objective = [0] * len(doc1.Lines)
ann2_objective = [0] * len(doc2.Lines)
ann1_output = [0] * len(doc1.Lines)
ann2_output = [0] * len(doc2.Lines)
ann1_actor = [0] * len(doc1.Lines)
ann2_actor = [0] * len(doc2.Lines)
ann1_innovativeness = [0] * len(doc1.Lines)
ann2_innovativeness = [0] * len(doc2.Lines)
while line_num<len(doc1.Lines):
if len(doc1.Lines[line_num].Annotations)>0:
for a in doc1.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann1_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann1_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann1_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann1_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
for a1 in doc2.Lines[line_num].Annotations:
if a1.HighLevelClass == a.HighLevelClass:
if a1.HighLevelClass == "Objectives":
match_objectives = match_objectives + 1
if a1.HighLevelClass == "Outputs":
match_outputs = match_outputs + 1
if a1.HighLevelClass == "Actors":
match_actors = match_actors + 1
if a1.HighLevelClass == "Innovativeness":
match_innovativeness = match_innovativeness + 1
if len(doc2.Lines[line_num].Annotations)>0:
for a in doc2.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann2_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann2_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann2_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann2_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
line_num = line_num + 1
ann1_annotations_outputs.extend(ann1_output)
ann2_annotations_outputs.extend(ann2_output)
ann1_annotations_objectives.extend(ann1_objective)
ann2_annotations_objectives.extend(ann2_objective)
ann1_annotations_actors.extend(ann1_actor)
ann2_annotations_actors.extend(ann2_actor)
ann1_annotations_innovativeness.extend(ann1_innovativeness)
ann2_annotations_innovativeness.extend(ann2_innovativeness)
kappa_outputs = sklearn.metrics.cohen_kappa_score(ann1_output,ann2_output)
kappa_objectives = sklearn.metrics.cohen_kappa_score(ann1_objective, ann2_objective)
kappa_actors = sklearn.metrics.cohen_kappa_score(ann1_actor, ann2_actor)
kappa_innovativeness = sklearn.metrics.cohen_kappa_score(ann1_innovativeness, ann2_innovativeness)
print "Statistics for document:"+doc1.DocumentName
print "Annotators "+annotator1.Name+" and "+annotator2.Name
print "Spam by "+annotator1.Name+":"+str(doc1.isSpam)
print "Spam by " + annotator2.Name + ":" + str(doc2.isSpam)
if(doc1.isSpam == doc2.isSpam):
num_overlap_spam = num_overlap_spam+1
if doc1.isSpam:
num_spam = num_spam + 1
if doc2.isSpam:
num_spam = num_spam + 1
print "Cohen Kappa for class Objectives: "+str(kappa_objectives)
print "Cohen Kappa for class Actors: " + str(kappa_actors)
print "Cohen Kappa for class Outputs: " + str(kappa_outputs)
print "Cohen Kappa for class Innovativeness: " + str(kappa_innovativeness)
print "------------------------------------------------------------------"
kappa_files = kappa_files +1
j = j+1
i = i+1
j = i+1
print annotators
doc_array = []
text_array = []
objectives = []
actors = []
outputs = []
innovativeness = []
for ann in ds.Annotators:
for doc in ann.documents:
doc_array.append([doc.Text,doc.isProjectObjectiveSatisfied,doc.isProjectActorSatisfied,doc.isProjectOutputSatisfied,doc.isProjectInnovativenessSatisfied])
objectives.append(doc.isProjectObjectiveSatisfied)
actors.append(doc.isProjectActorSatisfied)
outputs.append(doc.isProjectOutputSatisfied)
innovativeness.append(doc.isProjectInnovativenessSatisfied)
text_array.append(doc.Text)
if doc.isProjectActorSatisfied==True:
text_array.append(doc.Text)
actors.append(True)
text_array.append(doc.Text)
actors.append(True)
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(text_array)
sequences = tokenizer.texts_to_sequences(text_array)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(actors))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(0.1 * data.shape[0])
# x_train = data
# y_train = labels
total_precision = 0.0
total_recall = 0.0
total_fscore = 0.0
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.300d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
Total_TP = 0
Total_FP = 0
Total_FN = 0
x_train = data[0:9*nb_validation_samples]
y_train = labels[0:9*nb_validation_samples]
x_val = data[9*nb_validation_samples:]
y_val = labels[9*nb_validation_samples:]
early_stopping = EarlyStopping(monitor='binary_crossentropy', patience=5)
model = None
model = Sequential()
model.add(embedding_layer)
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(20))
# model.add(Dropout(0.2))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
#model.add(Dropout(0.2))
model.add(Flatten())
# model.add(Dense(200,activation='relu'))
# model.add(Dense(200, activation='relu'))
model.add(Dense(2,activation = 'softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy',precision,recall, f1])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1,
callbacks=[early_stopping]
)
score = model.evaluate(x_val, y_val,
batch_size=batch_size, verbose=1)
score1 = score[0]
acc1 = score[1]
print('Test score:', score[0])
print('Test accuracy:', score[1])
predictions = model.predict(x_val,batch_size,1)
TP = y_val*predictions
TP_sum = 0
FP_sum = 0
FN_sum = 0
i = 0
for pred in predictions:
print "Prediction: "+str(pred)
print "Y valuation: "+str(y_val[i])
if pred[1] > 0.5 and y_val[i][1] == 1:
TP_sum = TP_sum + 1
if pred[1] > 0.5 and y_val[i][1]==0:
FP_sum = FP_sum + 1
if pred[1] < 0.5 and y_val[i][1]==1:
FN_sum = FN_sum + 1
i = i+1
number_samples = len(predictions)
print "Number of samples:"+str(number_samples)
print "True positives:"+str(TP_sum)
print "False positives:" + str(FP_sum)
print "False negatives:" + str(FN_sum)
Total_TP = Total_TP + TP_sum
Total_FP = Total_FP + FP_sum
Total_FN = Total_FN + FN_sum
if TP_sum == 0:
TP_sum = TP_sum + 1
FP_sum = FP_sum + 1
FN_sum = FN_sum + 1
precision_s = float(TP_sum)/float(TP_sum+FP_sum)
recall_s = float(TP_sum) / float(TP_sum + FN_sum)
F_score_s = 2.0*precision_s*recall_s/(precision_s+recall_s)
print "Precision: "+str(precision_s)
print "Recall: "+str(recall_s)
print "F1-score: "+str(F_score_s)
total_precision = total_precision + precision_s
total_recall = total_recall + recall_s
total_fscore = total_fscore + F_score_s
X = [""""""]
Y = [1, 0]
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(X)
sequences = tokenizer.texts_to_sequences(X)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
predictions = model.predict(x_val, batch_size, 1)
print predictions
# x_train = data
# y_train = labels
#
# model = None
# model = Sequential()
# model.add(embedding_layer)
# model.add(Conv1D(128,5,activation='relu'))
# model.add(MaxPooling1D(20))
# # model.add(Dropout(0.2))
# # model.add(Conv1D(64, 5, activation='relu'))
# # model.add(MaxPooling1D(20))
# # model.add(Dropout(0.2))
# model.add(Flatten())
# #model.add(Dense(200,activation = 'relu'))
#
# model.add(Dense(2))
# model.add(Activation('softmax'))
#
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=['accuracy',mcor,precision,recall, f1])
#
# history = model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# verbose=1,
# validation_split=0.1,
# #callbacks=[early_stopping]
# )
# model_json = model.to_json()
# with open("../Models/model_actors.json", "w") as json_file:
# json_file.write(model_json)
# # serialize weights to HDF5
# model.save_weights("../Models/model_actors.h5")
# print("Saved model to disk")
print "Overall results"
prec = total_precision
print "True positives: "+str(Total_TP)
print "False positives: "+str(Total_FP)
print "False negatives: "+str(Total_FN)
print "Precision:"+str(prec)
rec = total_recall
print "Recall:"+str(rec)
f1s = total_fscore
print "F1-score:"+str(f1s)
| gpl-3.0 |
MBARIMike/stoqs | stoqs/loaders/CANON/realtime/Contour.py | 3 | 40306 | #!/usr/bin/env python
__author__ = 'D.Cline'
__license__ = 'GPL v3'
__contact__ = 'dcline at mbari.org'
'''
Creates still and animated contour and dot plots plots from MBARI LRAUV data
D Cline
MBARI 25 September 2015
'''
import os
import sys
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE']='config.settings.local'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) # settings.py is one dir up
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../"))
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from django.contrib.gis.geos import LineString, Point
from django.contrib.gis.db.models import Extent
import numpy as np
import time
import pytz
import logging
import signal
import ephem
import bisect
import tempfile
import shutil
from django.contrib.gis.geos import MultiPoint
from django.db.models import Max, Min
from django.conf import settings
from collections import defaultdict
from datetime import datetime, timedelta, tzinfo
from matplotlib.ticker import FormatStrFormatter
#from matplotlib.mlab import griddata
from scipy.interpolate import griddata
from mpl_toolkits.basemap import Basemap
from stoqs.models import Activity, ActivityParameter, ParameterResource, Platform, MeasuredParameter, Measurement, Parameter
from utils.utils import percentile
from matplotlib.transforms import Bbox, TransformedBbox
from matplotlib import dates
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnectorPatch
# Set up global variables for logging output to STDOUT
logger = logging.getLogger('monitorTethysHotSpotLogger')
fh = logging.StreamHandler()
f = logging.Formatter("%(levelname)s %(asctime)sZ %(filename)s %(funcName)s():%(lineno)d %(message)s")
fh.setFormatter(f)
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
class NoPPDataException(Exception):
pass
class Contour(object):
'''
Create plots for visualizing data from LRAUV vehicles
'''
def __init__(self, start_datetime, end_datetime, database, platformName, plotGroup, title, outFilename, autoscale, plotDotParmName, booleanPlotGroup, animate=False, zoom=6, overlap=3):
self.start_datetime = start_datetime
self.end_datetime = end_datetime
self.platformName = platformName
self.plotGroup = plotGroup
self.plotGroupValid = []
self.title = title
self.animate = animate
self.outFilename = outFilename
self.database = database
self.autoscale = autoscale
self.plotDotParmName = plotDotParmName
self.booleanPlotGroup = booleanPlotGroup
self.zoom = zoom
self.overlap = overlap
self.dirpath = []
def getActivityExtent(self,start_datetime, end_datetime):
'''
Get spatial temporal extent for a platform.
'''
qs = Activity.objects.using(self.database).filter(platform__name__in=self.platformName)
qs = qs.filter(startdate__gte=start_datetime)
qs = qs.filter(enddate__lte=end_datetime)
seaQS = qs.aggregate(Min('startdate'), Max('enddate'))
self.activityStartTime = seaQS['startdate__min']
self.activityEndTime = seaQS['enddate__max']
dataExtent = qs.aggregate(Extent('maptrack'))
return dataExtent
def getAxisInfo(self, parm):
'''
Return appropriate min and max values and units for a parameter name
'''
# Get the 1 & 99 percentiles of the data for setting limits on the scatter plot
apQS = ActivityParameter.objects.using(self.database).filter(activity__platform__name=self.platformName)
pQS = apQS.filter(parameter__name=parm).aggregate(Min('p010'), Max('p990'))
pmin, pmax = (pQS['p010__min'], pQS['p990__max'])
# Get units for each parameter
prQS = ParameterResource.objects.using(self.database).filter(resource__name='units').values_list('resource__value')
try:
units = prQS.filter(parameter__name=parm)[0][0]
except IndexError:
raise Exception("Unable to get units for parameter name %s from platform {}".format(parm, self.platformName))
return pmin, pmax, units
def getTimeSeriesData(self, start_datetime, end_datetime):
'''
Return time series of a list of Parameters from a Platform
'''
data_dict = defaultdict(lambda: {'datetime': [], 'lon': [], 'lat': [], 'depth': [], 'datavalue':[], 'units':'', 'p010':'', 'p990':''})
start_dt= []
end_dt = []
if not self.plotGroup :
raise Exception('Must specify list plotGroup')
for pln in self.platformName:
for g in self.plotGroup:
parameters = [x.strip() for x in g.split(',')]
parameters_valid = []
try:
for pname in parameters:
apQS = ActivityParameter.objects.using(self.database)
apQS = apQS.filter(activity__platform__name=pln)
apQS = apQS.filter(parameter__name=pname)
pQS = apQS.aggregate(Min('p010'), Max('p990'))
data_dict[pln+pname]['p010'] = pQS['p010__min']
data_dict[pln+pname]['p990'] = pQS['p990__max']
units=apQS.values('parameter__units')
data_dict[pln+pname]['units'] = units[0]['parameter__units']
qs = MeasuredParameter.objects.using(self.database)
qs = qs.filter(measurement__instantpoint__timevalue__gte=start_datetime)
qs = qs.filter(measurement__instantpoint__timevalue__lte=end_datetime)
qs = qs.filter(parameter__name=pname)
qs = qs.filter(measurement__instantpoint__activity__platform__name=pln)
sdt_count = qs.values_list('measurement__instantpoint__simpledepthtime__depth').count()
qs = qs.values('measurement__instantpoint__timevalue', 'measurement__depth', 'measurement__geom', 'datavalue'
).order_by('measurement__instantpoint__timevalue')
data_dict[pln+pname]['sdt_count'] = sdt_count
# only plot data with more than one point
if len(qs) > 0:
for rs in qs:
geom = rs['measurement__geom']
lat = geom.y
lon = geom.x
data_dict[pln+pname]['lat'].insert(0, lat)
data_dict[pln+pname]['lon'].insert(0, lon)
data_dict[pln+pname]['datetime'].insert(0, rs['measurement__instantpoint__timevalue'])
data_dict[pln+pname]['depth'].insert(0, rs['measurement__depth'])
data_dict[pln+pname]['datavalue'].insert(0, rs['datavalue'])
# for salinity, throw out anything less than 20 and do the percentiles manually
if pname.find('salinity') != -1 :
numpvar = np.array(data_dict[pln+pname]['datavalue'])
numpvar_filtered = numpvar[numpvar>20.0]
numpvar_filtered.sort()
listvar = list(numpvar_filtered)
p010 = percentile(listvar, 0.010)
p990 = percentile(listvar, 0.990)
data_dict[pln+pname]['p010'] = p010
data_dict[pln+pname]['p990'] = p990
# dates are in reverse order - newest first
start_dt.append(data_dict[pln+pname]['datetime'][-1])
end_dt.append(data_dict[pln+pname]['datetime'][0])
logger.debug('Loaded data for parameter {}'.format(pname))
parameters_valid.append(pname)
except Exception:
logger.error('{} not available in database for the dates {} {}'.format(pname, start_datetime, end_datetime))
continue
if len(parameters_valid) > 0:
self.plotGroupValid.append(','.join(parameters_valid))
# get the ranges of the data
if start_dt and end_dt:
data_start_dt = sorted(start_dt)[0]
data_end_dt = sorted(end_dt)[-1]
else:
#otherwise default to requested dates
data_start_dt = start_datetime
data_end_dt = end_datetime
if self.plotDotParmName not in self.plotGroupValid:
# if the dot plot parameter name is not in the valid list of parameters found, switch it to
# something else choosing chlorophyll over another
matching = [s for s in self.plotGroupValid if "chl" in s]
if len(matching) > 0:
self.plotDotParmName = matching[0]
else:
self.plotDotParmName = self.plotGroupValid[0]
return data_dict, data_start_dt, data_end_dt
def getMeasuredPPData(self, start_datetime, end_datetime, platform, parm):
points = []
data = []
activity_names = []
maptracks = []
try:
qs = MeasuredParameter.objects.using(self.database)
qs = qs.filter(measurement__instantpoint__timevalue__gte=start_datetime)
qs = qs.filter(measurement__instantpoint__timevalue__lte=end_datetime)
qs = qs.filter(parameter__name=parm)
qs = qs.filter(measurement__instantpoint__activity__platform__name=platform)
qs = qs.values('measurement__instantpoint__timevalue', 'measurement__geom', 'parameter', 'datavalue',
'measurement__instantpoint__activity__maptrack', 'measurement__instantpoint__activity__name'
).order_by('measurement__instantpoint__timevalue')
for rs in qs:
geom = rs['measurement__geom']
lon = geom.x
lat = geom.y
pt = Point(float(lon),float(lat))
points.append(pt)
value = rs['datavalue']
data.append(float(value))
geom = rs['measurement__instantpoint__activity__maptrack']
activity_name = rs['measurement__instantpoint__activity__name']
# only keep maptracks from new activities
if not any(activity_name in s for s in activity_names):
activity_names.append(activity_name)
maptracks.append(geom)
except Exception:
logger.error('{} not available in database for the dates {} {}', parm, start_datetime, end_datetime)
return data, points, maptracks
def loadData(self, start_datetime, end_datetime):
try:
self.data, data_start, data_end = self.getTimeSeriesData(start_datetime, end_datetime)
return data_start, data_end
except Exception as e:
logger.warning(e)
raise e
return start_datetime, end_datetime
class DateFormatter(mpl.ticker.Formatter):
def __init__(self, scale_factor=1):
self.scale_factor = scale_factor
def __call__(self, x, pos=None):
d = time.gmtime(x*self.scale_factor)
utc = datetime(*d[:6])
local_tz = pytz.timezone('America/Los_Angeles')
utc_tz = pytz.timezone('UTC')
utc = utc.replace(tzinfo=utc_tz)
pst = utc.astimezone(local_tz)
return pst.strftime('%Y-%m-%d %H:%M')
def readCLT(self, fileName):
'''
Read the color lookup table from disk and return a python list of rgb tuples.
'''
cltList = []
for rgb in open(fileName, 'r'):
(r, g, b) = rgb.split(' ')[1:]
cltList.append([float(r), float(g), float(b)])
return cltList
def shadeNight(self,ax,xdates,miny,maxy):
'''
Shades plots during local nighttime hours
'''
utc_zone = pytz.utc
if len(xdates) < 50:
logger.debug("skipping day/night shading - too few points")
return
datetimes = []
for xdt in xdates:
dt = datetime.fromtimestamp(xdt)
datetimes.append(dt.replace(tzinfo=utc_zone))
loc = ephem.Observer()
loc.lat = '36.7087' # Monterey Bay region
loc.lon = '-121.0000'
loc.elev = 0
sun = ephem.Sun(loc)
mint=min(datetimes)
maxt=max(datetimes)
numdays = (maxt - mint).days
d = [mint + timedelta(days=dt2) for dt2 in range(numdays+1)]
d.sort()
sunrise = [dates.date2num(loc.next_rising(sun,start=x).datetime()) for x in d]
sunset = [dates.date2num(loc.next_setting(sun,start=x).datetime()) for x in d]
result = []
for st in datetimes:
result.append(bisect.bisect(sunrise, dates.date2num(st)) != bisect.bisect(sunset, dates.date2num(st)))
if self.scale_factor:
scale_xdates = [x/self.scale_factor for x in xdates]
else:
scale_xdates = xdates
ax.fill_between(scale_xdates, miny, maxy, where=result, facecolor='#C8C8C8', edgecolor='none', alpha=0.3)
def createPlot(self, start_datetime, end_datetime):
if len(self.data) == 0:
logger.debug('no data found to plot')
raise Exception('no data found to plot')
# GridSpecs for plots
outer_gs = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=[1,3])
# tighten up space between plots
outer_gs.update(left=0.10, right=0.90, hspace=0.05)
map_gs = gridspec.GridSpecFromSubplotSpec(nrows=1, ncols=1, subplot_spec=outer_gs[0])
lower_gs = gridspec.GridSpecFromSubplotSpec(nrows=len(self.plotGroupValid), ncols=1, subplot_spec=outer_gs[1])
clt = self.readCLT(os.path.join(settings.STATICFILES_DIRS[0], 'colormaps', 'jetplus.txt'))
self.cm_jetplus = mpl.colors.ListedColormap(np.array(clt))
# start a new figure - size is in inches
fig = plt.figure(figsize=(8, 10))
fig.suptitle(self.title+'\n'+self.subtitle1+'\n'+self.subtitle2, fontsize=8)
pn = self.platformName[0]
# bound the depth to cover max of all parameter group depths
# and flag removal of scatter plot if have more than 2000 points in any parameter
maxy = 0
for group in self.plotGroupValid:
parm = [x.strip() for x in group.split(',')]
for name in parm:
y = max(self.data[pn+name]['depth'])
sz = len(self.data[pn+name]['datavalue'])
if y > maxy:
maxy = y
# pad the depth by 20 meters to make room for parameter name to be displayed at bottom
rangey = [0.0, int(maxy) + 20]
i = 0
# add contour plots for each parameter group
for group in self.plotGroupValid:
parm = [x.strip() for x in group.split(',')]
plot_step = sum([self.data[pn+p]['units'].count('bool') for p in parm]) # count the number of boolean plots in the groups
plot_scatter_contour = len(parm) - plot_step # otherwise all other plots are scatter plots
plot_scatter = 0
# this parameter only makes sense to plot as a scatter plot
if 'vertical_temperature_homogeneity_index' in self.plotGroupValid:
plot_scatter = 1
plot_scatter_contour -= 1
#plot_dense = sum([val for val in len(self.data[pn+name]['datavalue']) > 2000]) # if more than 2000 points, skip the scatter plot
# choose the right type of gridspec to display the data
if plot_scatter_contour:
# one row for scatter and one for contour
plot_gs = gridspec.GridSpecFromSubplotSpec(nrows=len(parm)*2, ncols=2, subplot_spec=lower_gs[i], width_ratios=[30,1], wspace=0.05)
else:
# one row for single step/scatter/contour plots
plot_gs = gridspec.GridSpecFromSubplotSpec(nrows=len(parm), ncols=2, subplot_spec=lower_gs[i], width_ratios=[30,1], wspace=0.05)
j = 0
i += 1
for name in parm:
title = name
x = [time.mktime(xe.timetuple()) for xe in self.data[pn+name]['datetime']]
y = self.data[pn+name]['depth']
z = self.data[pn+name]['datavalue']
sdt_count = self.data[pn+name]['sdt_count']
units = '(' + self.data[pn+name]['units'] + ')'
if len(z):
if self.autoscale:
rangez = [self.data[pn+name]['p010'],self.data[pn+name]['p990']]
else:
rangez = [min(z), max(z)]
else:
rangez = [0, 0]
if name.find('chlorophyll') != -1 :
if not self.autoscale:
rangez = [0.0, 10.0]
if name.find('salinity') != -1 :
if not self.autoscale:
rangez = [33.3, 34.9]
units = ''
if name.find('temperature') != -1 :
if not self.autoscale:
rangez = [10.0, 14.0]
units = ' ($^\circ$C)'
logger.debug('getting subplot ax0')
gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=plot_gs[j])
ax0_plot = plt.Subplot(fig, gs[:])
fig.add_subplot(ax0_plot)
gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=plot_gs[j+1])
ax0_colorbar = plt.Subplot(fig, gs[:])
fig.add_subplot(ax0_colorbar)
if plot_scatter_contour:
logger.debug('getting subplot ax1')
gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=plot_gs[j + 2])
ax1_plot = plt.Subplot(fig, gs[:])
fig.add_subplot(ax1_plot)
gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=plot_gs[j + 3])
ax1_colorbar = plt.Subplot(fig, gs[:])
fig.add_subplot(ax1_colorbar)
# if no data found add in some fake data and plot a placeholder time/depth plot
if not x:
tmin = time.mktime(start_datetime.timetuple())
tmax = time.mktime(end_datetime.timetuple())
x.append(tmin)
x.append(tmax)
y.append(np.NaN)
y.append(np.NaN)
z.append(np.NaN)
z.append(np.NaN)
if plot_scatter_contour:
cs0, _, scale_factor = self.createContourPlot(title + pn,ax0_plot,x,y,z,rangey,rangez,start_datetime,end_datetime,sdt_count)
cs1 = self.createScatterPlot(title + pn,ax1_plot,x,y,z,rangey,rangez,start_datetime,end_datetime)
elif plot_step:
cs0 = self.createStepPlot(title + pn,title,ax0_plot,x,z,rangez,start_datetime,end_datetime)
elif plot_scatter:
cs0 = self.createScatterPlot(title + pn,ax0_plot,x,y,z,rangey,rangez,start_datetime,end_datetime)
else:
cs0, _, scale_factor = self.createContourPlot(title + pn,ax0_plot,x,y,z,rangey,rangez,start_datetime,end_datetime,sdt_count)
if plot_scatter_contour:
ax1_plot.text(0.95,0.02, name, verticalalignment='bottom',
horizontalalignment='right',transform=ax1_plot.transAxes,color='black',fontsize=8)
# Don't show on the upper contour plot
ax0_plot.xaxis.set_ticks([])
# Rotate date labels and format bottom
x_fmt = self.DateFormatter(1)
ax1_plot.xaxis.set_major_formatter(x_fmt)
for label in ax1_plot.xaxis.get_ticklabels():
label.set_rotation(10)
else:
ax0_plot.text(0.95,0.02, name, verticalalignment='bottom',
horizontalalignment='right',transform=ax0_plot.transAxes,color='black',fontsize=8)
# Rotate date labels and format bottom
x_fmt = self.DateFormatter(1)
ax0_plot.xaxis.set_major_formatter(x_fmt)
for label in ax0_plot.xaxis.get_ticklabels():
label.set_rotation(10)
self.shadeNight(ax0_plot,sorted(x),rangey[0], rangey[1])
if plot_scatter:
self.shadeNight(ax1_plot,sorted(x),rangey[0], rangey[1])
logger.debug('plotting colorbars')
if plot_scatter or plot_scatter_contour:
cbFormatter = FormatStrFormatter('%.2f')
cb = plt.colorbar(cs0, cax=ax0_colorbar, ticks=[min(rangez), max(rangez)], format=cbFormatter, orientation='vertical')
cb.set_label(units,fontsize=8)#,labelpad=5)
cb.ax.xaxis.set_ticks_position('top')
for t in cb.ax.yaxis.get_ticklabels():
t.set_fontsize(8)
cb = plt.colorbar(cs1, cax=ax1_colorbar, ticks=[min(rangez), max(rangez)], format=cbFormatter, orientation='vertical')
cb.set_label(units,fontsize=8)#,labelpad=5)
cb.ax.xaxis.set_ticks_position('top')
for t in cb.ax.yaxis.get_ticklabels():
t.set_fontsize(8)
else:
if plot_step:
ax0_colorbar.xaxis.set_major_locator(plt.NullLocator())
ax0_colorbar.yaxis.set_ticks_position('right')
for t in ax0_colorbar.yaxis.get_ticklabels():
t.set_fontsize(8)
else:
cbFormatter = FormatStrFormatter('%.2f')
cb = plt.colorbar(cs0, cax=ax0_colorbar, ticks=[min(rangez), max(rangez)], format=cbFormatter, orientation='vertical')
cb.set_label(units,fontsize=8)#,labelpad=5)
cb.ax.xaxis.set_ticks_position('top')
for t in cb.ax.yaxis.get_ticklabels():
t.set_fontsize(8)
if plot_scatter:
j+=4
else:
j+=2
# plot tracks
ax = plt.Subplot(fig, map_gs[:])
fig.add_subplot(ax, aspect='equal')
z = []
logger.debug('getting measured data')
z, points, maptracks = self.getMeasuredPPData(start_datetime, end_datetime, pn, self.plotDotParmName)
# get the percentile ranges for this to autoscale
pointsnp = np.array(points)
lon = pointsnp[:,0]
lat = pointsnp[:,1]
ltmin = self.extent['maptrack__extent'][1]
ltmax = self.extent['maptrack__extent'][3]
lnmin = self.extent['maptrack__extent'][0]
lnmax = self.extent['maptrack__extent'][2]
lndiff = abs(lnmax - lnmin)
ltdiff = abs(ltmax - ltmin)
logger.debug("lon diff {} lat diff {}".format(lndiff, ltdiff))
mindeg = .02
paddeg = .01
if lndiff < mindeg :
lnmin -= mindeg
lnmax += mindeg
if ltdiff < mindeg:
ltmin -= mindeg
ltmax += mindeg
e = (lnmin - paddeg, ltmin - paddeg, lnmax + paddeg, ltmax + paddeg)
logger.debug('Extent {},{},{},{})'.format(e[0], e[1],e[2],e[3]))
# retry up to 5 times to get the basemap
for i in range(0, 5):
logger.debug('Getting basemap')
mp = Basemap(llcrnrlon=e[0], llcrnrlat=e[1], urcrnrlon=e[2], urcrnrlat=e[3], projection='cyl', resolution='l', ax=ax)
try:
# Works, but coarse resolution
##mp.wmsimage('http://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?', layers=['GEBCO_08_Grid'])
mp.arcgisimage(server='http://services.arcgisonline.com/ArcGIS', service='Ocean_Basemap')
mp.drawparallels(np.linspace(e[1],e[3],num=3), labels=[True,False,False,False], fontsize=8, linewidth=0)
mp.drawmeridians(np.linspace(e[0],e[2],num=3), labels=[False,False,False,True], fontsize=8, linewidth=0)
except Exception as e:
logger.error('Could not download ocean basemap ')
mp = None
if mp is not None :
break
if mp is None :
logger.debug('Error - cannot cannot fetch basemap')
return
try:
logger.debug('plotting tracks')
if self.animate:
try:
track = LineString(points).simplify(tolerance=.001)
if track is not None:
ln,lt = list(zip(*track))
mp.plot(ln,lt,'-',c='k',alpha=0.5,linewidth=2, zorder=1)
except TypeError as e:
logger.warning("{}\nCannot plot map track path to None".format(e))
else:
for track in maptracks:
if track is not None:
ln,lt = list(zip(*track))
mp.plot(ln,lt,'-',c='k',alpha=0.5,linewidth=2, zorder=1)
# if have a valid series, then plot the dots
if self.plotDotParmName and len(z) > 0:
if len(z) > 2000:
sz = len(z)
stride = int(sz/200)
z_stride = z[0:sz:stride]
lon_stride = lon[0:sz:stride]
lat_stride = lat[0:sz:stride]
mp.scatter(lon_stride,lat_stride,c=z_stride,marker='.',lw=0,alpha=1.0,cmap=self.cm_jetplus,label=self.plotDotParmName,zorder=2)
if stride > 1:
ax.text(0.70,0.1, ('{} (every {} points)'.format(self.plotDotParmName, stride)), verticalalignment='bottom',
horizontalalignment='center',transform=ax.transAxes,color='black',fontsize=8)
else:
ax.text(0.70,0.1, ('{} (every point)'.format(self.plotDotParmName)), verticalalignment='bottom',
horizontalalignment='center',transform=ax.transAxes,color='black',fontsize=8)
else:
mp.scatter(lon,lat,c=z,marker='.',lw=0,alpha=1.0,cmap=self.cm_jetplus,label=self.plotDotParmName,zorder=2)
ax.text(0.70,0.1, ('{} (every point)'.format(self.plotDotParmName)), verticalalignment='bottom',
horizontalalignment='center',transform=ax.transAxes,color='black',fontsize=8)
if self.booleanPlotGroup:
# plot the binary markers
markers = ['o','x','d','D','8','1','2','3','4']
i = 1
for g in self.booleanPlotGroup:
parm = [z2.strip() for z2 in g.split(',')]
for name in parm:
if name in self.plotGroupValid:
logger.debug('Plotting boolean plot group parameter {}', name)
z, points, maptracks = self.getMeasuredPPData(start_datetime, end_datetime, self.platformName[0], name)
pointsnp = np.array(points)
lon = pointsnp[:,0]
lat = pointsnp[:,1]
# scale up the size of point
s = [20*val for val in z]
if len(z) > 0:
mp.scatter(lon,lat,s=s,marker=markers[i],c='black',label=name,zorder=3)
i = i + 1
# plot the legend outside the plot in the upper left corner
l = ax.legend(loc='upper left', bbox_to_anchor=(1,1), prop={'size':8}, scatterpoints=1)# only plot legend symbol once
l.set_zorder(4) # put the legend on top
except Exception as e:
logger.warning(e)
if self.animate:
# append frames output as pngs with an indexed frame number before the gif extension
fname = '{}/frame_{:02}.png'.format(self.dirpath, self.frame)
else:
fname = self.outFilename
logger.debug('Saving figure {}'.format(fname))
fig.savefig(fname,dpi=120)#,transparent=True)
plt.close()
self.frame += 1
logger.debug('Done with contourPlot')
# Register an handler for the timeout
def handler(self, signum, frame):
logger.debug("Exceeded maximum time allowed for gridding!")
raise Exception("end of time")
def gridData(self, x, y, z, xi, yi):
try:
logger.debug('Gridding')
if (len(z) == 0):
raise('No data returned to grid')
logger.debug('Gridding')
zi = griddata((x, y), np.array(z), (xi[None,:], yi[:,None]), method='nearest')
logger.debug('Done gridding')
except KeyError as e:
logger.warning('Got KeyError. Could not grid the data')
zi = None
raise(e)
return zi
def gridDataRbf(self, tmin, tmax, dmin, dmax, x, y, z):
from scipy.interpolate import Rbf
xi=[]
try:
xi, yi = np.mgrid[tmin:tmax:1000j, dmin:dmax:100j]
# use RBF
rbf = Rbf(x, y, z, epsilon=2)
zi = rbf(xi, yi)
except Exception as e:
logger.warning('Could not grid the data' + str(e))
zi = None
return xi,yi,zi
def createContourPlot(self,title,ax,x,y,z,rangey,rangez,startTime,endTime,sdt_count):
tmin = time.mktime(startTime.timetuple())
tmax = time.mktime(endTime.timetuple())
tgrid_max = 1000 # Reasonable maximum width for time-depth-flot plot is about 1000 pixels
dgrid_max = 200 # Height of time-depth-flot plot area is 200 pixels
dinc = 0.5 # Average vertical resolution of AUV Dorado
nlevels = 255 # Number of color filled contour levels
zmin = rangez[0]
zmax = rangez[1]
dmin = rangey[0]
dmax = rangey[1]
scale_factor = 1
# 2 points define a line, take half the number of simpledepthtime points
sdt_count = int(max(sdt_count, 2) / 2)
if sdt_count > tgrid_max:
sdt_count = tgrid_max
xi = np.linspace(tmin, tmax, sdt_count)
#print 'xi = %s' % xi
# Make depth spacing dinc m, limit to time-depth-flot resolution (dgrid_max)
y_count = int((dmax - dmin) / dinc )
if y_count > dgrid_max:
y_count = dgrid_max
yi = np.linspace(dmin, dmax, y_count)
#print 'yi = %s' %yi
try:
scale_factor = float(tmax -tmin) / (dmax - dmin)
except ZeroDivisionError as e:
logger.warning('Not setting scale_factor. Scatter plots will still work.')
contour_flag = False
scale_factor = 1
else:
logger.warning('self.scale_factor = {}'.format(scale_factor))
xi = xi / scale_factor
xg = [xe/scale_factor for xe in x]
contour_flag = True
zi = []
cs = None
# Register the signal function handler
# TODO: factor this out into the main thread
# signal.signal(signal.SIGALRM, self.handler)
# Define a timeout of 90 seconds for gridding functions
# signal.alarm(90)
if not self.data:
logger.warning('no data found to plot')
#signal.alarm(0)
raise Exception('no data')
if contour_flag:
try:
logger.warning('Gridding data with sdt_count = {}, and y_count = {}'.format(sdt_count, y_count))
zi = self.gridData(xg, y, z, xi, yi)
#signal.alarm(0)
except KeyError:
logger.warning('Got KeyError. Could not grid the data')
contour_flag = False
scale_factor = 1
try:
# use RBF
logger.warning('Trying radial basis function')
xi,yi,zi = self.gridDataRbf(tmin, tmax, dmin, dmax, xg, y, z)
contour_flag = True
#signal.alarm(0)
except Exception as e:
logger.warning('Could not grid the data' + str(e))
except Exception as e:
logger.warning('Could not grid the data' + str(e))
contour_flag = False
try:
# use RBF
logger.warning('Trying radial basis function')
xi,yi,zi = self.gridDataRbf(tmin, tmax, dmin, dmax, xg, y, z)
contour_flag = True
#signal.alarm(0)
except Exception as e:
logger.warning('Could not grid the data' + str(e))
try:
if scale_factor > 1 and contour_flag:
ax.set_xlim(tmin / scale_factor, tmax / scale_factor)
else:
ax.set_xlim(tmin, tmax)
self.scale_factor = scale_factor
ax.set_ylim([dmax,dmin])
ax.set_ylabel('depth (m)',fontsize=8)
ax.tick_params(axis='both',which='major',labelsize=8)
ax.tick_params(axis='both',which='minor',labelsize=8)
if contour_flag:
logger.debug('Contouring the data')
cs = ax.contourf(xi, yi, zi, levels=np.linspace(zmin,zmax, nlevels), cmap=self.cm_jetplus, extend='both')
# this will show the points where the contouring occurs
#ax.scatter(x,y,marker='.',s=2,c='k',lw=0)
else:
logger.debug('Plotting the data')
cs = ax.scatter(x,y,c=z,s=20,marker='.',vmin=zmin,vmax=zmax,lw=0,alpha=1.0,cmap=self.cm_jetplus)
# limit the number of ticks
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
except Exception as e:
logger.error(e)
try:
logger.debug('Plotting the data')
cs = ax.scatter(x,y,c=z,s=20,marker='.',vmin=zmin,vmax=zmax,lw=0,alpha=1.0,cmap=self.cm_jetplus)
except Exception as e:
logger.error(e)
return cs, zi, scale_factor
def createScatterPlot(self,title,ax,x,y,z,rangey,rangez,startTime,endTime):
tmin = time.mktime(startTime.timetuple())
tmax = time.mktime(endTime.timetuple())
zmin = rangez[0]
zmax = rangez[1]
dmin = rangey[0]
dmax = rangey[1]
try:
ax.set_xlim(tmin, tmax)
self.scale_factor = 1
ax.set_ylim([dmax,dmin])
ax.set_ylabel('depth (m)',fontsize=8)
ax.tick_params(axis='both',which='major',labelsize=8)
ax.tick_params(axis='both',which='minor',labelsize=8)
logger.debug('Plotting the data')
cs = ax.scatter(x,y,c=z,s=20,marker='.',vmin=zmin,vmax=zmax,lw=0,alpha=1.0,cmap=self.cm_jetplus)
# limit the number of ticks
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
except Exception as e:
logger.error(e)
return cs
def createStepPlot(self,title,label,ax,x,y,rangey,startTime,endTime):
tmin = time.mktime(startTime.timetuple())
tmax = time.mktime(endTime.timetuple())
dmin = rangey[1]
dmax = rangey[0]
try:
ax.set_xlim(tmin, tmax)
self.scale_factor = 1
ax.set_ylim([dmax,dmin])
ax.set_ylabel('{} (bool)'.format(label),fontsize=8)
ax.tick_params(axis='both',which='major',labelsize=8)
ax.tick_params(axis='both',which='minor',labelsize=8)
logger.debug('Plotting the step data')
labels = []
for val in y:
if not val:
labels.append('False')
else:
labels.append('True')
cs = ax.step(x,y,lw=1,alpha=0.8,c='black',label=labels)
# limit the number of ticks
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
except Exception as e:
logger.error(e)
return cs
def run(self):
self.frame = 0
logger.debug("Getting activity extent")
self.extent = self.getActivityExtent(self.start_datetime, self.end_datetime)
logger.debug('Loading data')
data_start, data_end = self.loadData(self.start_datetime, self.end_datetime)
if not self.data:
logger.debug('No valid data to plot')
return
# need to fix the scale over all the plots if animating
if self.animate:
self.autoscale = True
if data_start.tzinfo is None:
data_start = data_start.replace(tzinfo=pytz.UTC)
if data_end.tzinfo is None:
data_end = data_end.replace(tzinfo=pytz.UTC)
if self.animate:
self.dirpath = tempfile.mkdtemp()
zoom_window = timedelta(hours=self.zoom)
overlap_window = timedelta(hours=self.overlap)
end_datetime = data_start + zoom_window
start_datetime = data_start
try:
# Loop through sections of the data with temporal constraints based on the window and step command line parameters
while end_datetime <= data_end :
data_end_local = end_datetime.astimezone(pytz.timezone('America/Los_Angeles'))
data_start_local = start_datetime.astimezone(pytz.timezone('America/Los_Angeles'))
logger.debug('Plotting data for animation')
self.subtitle1 = '{} to {} PDT'.format(data_start_local.strftime('%Y-%m-%d %H:%M'), data_end_local.strftime('%Y-%m-%d %H:%M'))
self.subtitle2 = '{} to {} UTC'.format(start_datetime.strftime('%Y-%m-%d %H:%M'), end_datetime.strftime('%Y-%m-%d %H:%M'))
self.createPlot(start_datetime, end_datetime)
start_datetime = end_datetime - overlap_window
end_datetime = start_datetime + zoom_window
if not os.listdir(self.dirpath):
raise Exception('No plots generated')
cmd = "convert -loop 1 -delay 250 {}/frame*.png {}".format(self.dirpath,self.outFilename)
logger.debug(cmd)
os.system(cmd)
except Exception as e:
logger.error(e)
finally:
print('Done!')
shutil.rmtree(self.dirpath)
else :
try:
data_end_local = data_end.astimezone(pytz.timezone('America/Los_Angeles'))
data_start_local = data_start.astimezone(pytz.timezone('America/Los_Angeles'))
logger.debug('Plotting data')
self.subtitle1 = '{} to {} PDT'.format(data_start_local.strftime('%Y-%m-%d %H:%M'), data_end_local.strftime('%Y-%m-%d %H:%M'))
self.subtitle2 = '{} to {} UTC'.format(data_start.strftime('%Y-%m-%d %H:%M'), data_end.strftime('%Y-%m-%d %H:%M'))
self.createPlot(data_start, data_end)
except Exception as e:
logger.error(e)
raise(e)
| gpl-3.0 |
kaichogami/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 32 | 8066 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
jennyzhang0215/incubator-mxnet | example/deep-embedded-clustering/dec.py | 20 | 7847 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], int(Y[i])] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9//10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
print(np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int)))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print(np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0])
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad, None,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| apache-2.0 |
colinbrislawn/scikit-bio | skbio/io/format/tests/test_ordination.py | 8 | 11787 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
import io
from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
from skbio import OrdinationResults
from skbio.io import OrdinationFormatError
from skbio.io.format.ordination import (
_ordination_to_ordination_results, _ordination_results_to_ordination,
_ordination_sniffer)
from skbio.util import get_data_path, assert_ordination_results_equal
class OrdinationTestData(TestCase):
def setUp(self):
self.valid_fps = map(
get_data_path,
['ordination_L&L_CA_data_scores', 'ordination_example3_scores',
'ordination_PCoA_sample_data_3_scores',
'ordination_example2_scores'])
# Store filepath, regex for matching the error message that should be
# raised when reading the file, and whether the file should be matched
# by the sniffer (True) or not (False).
self.invalid_fps = map(lambda e: (get_data_path(e[0]), e[1], e[2]), [
('empty', 'end of file.*Eigvals header', False),
('whitespace_only', 'Eigvals header not found', False),
('ordination_error1', 'Eigvals header not found', False),
('ordination_error2',
'Proportion explained header not found', False),
('ordination_error3', 'Species header not found', True),
('ordination_error4', 'Site header not found', True),
('ordination_error5', 'Biplot header not found', True),
('ordination_error6', 'Site constraints header not found', True),
('ordination_error7', 'empty line', False),
('ordination_error8', '9.*Proportion explained.*8', True),
('ordination_error9', '2 values.*1 in row 1', True),
('ordination_error10', '2 values.*1 in row 1', True),
('ordination_error11', 'Site constraints ids and site ids', True),
('ordination_error12', '9.*Eigvals.*8', True),
('ordination_error13', '9.*Proportion explained.*8', True),
('ordination_error14', 'Site is 0: 9 x 0', True),
('ordination_error15', '9 values.*8 in row 1', True),
('ordination_error16', 'Biplot is 0: 3 x 0', True),
('ordination_error17', '3 values.*2 in row 1', True),
('ordination_error18',
'proportion explained.*eigvals: 8 != 9', True),
('ordination_error19',
'coordinates.*species.*eigvals: 1 != 2', True),
('ordination_error20', 'coordinates.*site.*eigvals: 1 != 2', True),
('ordination_error21', 'one eigval', False),
('ordination_error22', 'end of file.*blank line', False),
('ordination_error23', 'end of file.*Proportion explained section',
True),
('ordination_error24', 'end of file.*row 2.*Species section', True)
])
class OrdinationResultsReaderWriterTests(OrdinationTestData):
def setUp(self):
super(OrdinationResultsReaderWriterTests, self).setUp()
# define in-memory results, one for each of the valid files in
# self.valid_fps
# CA results
axes_ids = ['CA1', 'CA2']
species_ids = ['Species1', 'Species2', 'Species3']
site_ids = ['Site1', 'Site2', 'Site3']
eigvals = pd.Series([0.0961330159181, 0.0409418140138], axes_ids)
species = pd.DataFrame([[0.408869425742, 0.0695518116298],
[-0.1153860437, -0.299767683538],
[-0.309967102571, 0.187391917117]],
index=species_ids, columns=axes_ids)
site = pd.DataFrame([[-0.848956053187, 0.882764759014],
[-0.220458650578, -1.34482000302],
[1.66697179591, 0.470324389808]],
index=site_ids, columns=axes_ids)
biplot = None
site_constraints = None
prop_explained = None
ca_scores = OrdinationResults(
'CA', 'Correspondence Analysis', eigvals=eigvals, features=species,
samples=site, biplot_scores=biplot,
sample_constraints=site_constraints,
proportion_explained=prop_explained)
# CCA results
axes_ids = ['CCA%d' % i for i in range(1, 10)]
species_ids = ['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5', 'Species6', 'Species7',
'Species8']
site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5',
'Site6', 'Site7', 'Site8', 'Site9']
eigvals = pd.Series([0.366135830393, 0.186887643052, 0.0788466514249,
0.082287840501, 0.0351348475787, 0.0233265839374,
0.0099048981912, 0.00122461669234,
0.000417454724117], axes_ids)
species = pd.DataFrame(np.loadtxt(
get_data_path('ordination_exp_Ordination_CCA_species')),
index=species_ids, columns=axes_ids)
site = pd.DataFrame(
np.loadtxt(get_data_path('ordination_exp_Ordination_CCA_site')),
index=site_ids, columns=axes_ids)
biplot = pd.DataFrame(
[[-0.169746767979, 0.63069090084, 0.760769036049],
[-0.994016563505, 0.0609533148724, -0.0449369418179],
[0.184352565909, -0.974867543612, 0.0309865007541]],
columns=axes_ids[:3])
site_constraints = pd.DataFrame(np.loadtxt(
get_data_path('ordination_exp_Ordination_CCA_site_constraints')),
index=site_ids, columns=axes_ids)
prop_explained = None
cca_scores = OrdinationResults('CCA',
'Canonical Correspondence Analysis',
eigvals=eigvals, features=species,
samples=site, biplot_scores=biplot,
sample_constraints=site_constraints,
proportion_explained=prop_explained)
# PCoA results
axes_ids = ['PC%d' % i for i in range(1, 10)]
species_ids = None
site_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
'PC.355', 'PC.607', 'PC.634']
eigvals = pd.Series([0.512367260461, 0.300719094427, 0.267912066004,
0.208988681078, 0.19169895326, 0.16054234528,
0.15017695712, 0.122457748167, 0.0], axes_ids)
species = None
site = pd.DataFrame(
np.loadtxt(get_data_path('ordination_exp_Ordination_PCoA_site')),
index=site_ids, columns=axes_ids)
biplot = None
site_constraints = None
prop_explained = pd.Series([0.267573832777, 0.15704469605,
0.139911863774, 0.109140272454,
0.100111048503, 0.0838401161912,
0.0784269939011, 0.0639511763509, 0.0],
axes_ids)
pcoa_scores = OrdinationResults('PCoA',
'Principal Coordinate Analysis',
eigvals=eigvals, features=species,
samples=site, biplot_scores=biplot,
sample_constraints=site_constraints,
proportion_explained=prop_explained)
# RDA results
axes_ids = ['RDA%d' % i for i in range(1, 8)]
species_ids = ['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5']
site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5',
'Site6', 'Site7', 'Site8', 'Site9']
eigvals = pd.Series([25.8979540892, 14.9825779819, 8.93784077262,
6.13995623072, 1.68070536498, 0.57735026919,
0.275983624351], axes_ids)
species = pd.DataFrame(np.loadtxt(
get_data_path('ordination_exp_Ordination_RDA_species')),
index=species_ids, columns=axes_ids)
site = pd.DataFrame(
np.loadtxt(get_data_path('ordination_exp_Ordination_RDA_site')),
index=site_ids, columns=axes_ids)
biplot = pd.DataFrame(
[[0.422650019179, -0.559142585857, -0.713250678211],
[0.988495963777, 0.150787422017, -0.0117848614073],
[-0.556516618887, 0.817599992718, 0.147714267459],
[-0.404079676685, -0.9058434809, -0.127150316558]],
columns=axes_ids[:3])
site_constraints = pd.DataFrame(np.loadtxt(
get_data_path('ordination_exp_Ordination_RDA_site_constraints')),
index=site_ids, columns=axes_ids)
prop_explained = None
rda_scores = OrdinationResults(
'RDA', 'Redundancy Analysis', eigvals=eigvals, features=species,
samples=site, biplot_scores=biplot,
sample_constraints=site_constraints,
proportion_explained=prop_explained)
self.ordination_results_objs = [ca_scores, cca_scores, pcoa_scores,
rda_scores]
def test_read_valid_files(self):
for fp, obj in zip(self.valid_fps, self.ordination_results_objs):
obs = _ordination_to_ordination_results(fp)
assert_ordination_results_equal(
obs, obj, ignore_method_names=True,
ignore_axis_labels=True, ignore_biplot_scores_labels=True)
def test_read_invalid_files(self):
for invalid_fp, error_msg_regexp, _ in self.invalid_fps:
with six.assertRaisesRegex(self, OrdinationFormatError,
error_msg_regexp):
_ordination_to_ordination_results(invalid_fp)
def test_write(self):
for fp, obj in zip(self.valid_fps, self.ordination_results_objs):
fh = io.StringIO()
_ordination_results_to_ordination(obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(fp) as fh:
exp = fh.read()
npt.assert_equal(obs, exp)
def test_roundtrip_read_write(self):
for fp in self.valid_fps:
# Read.
obj1 = _ordination_to_ordination_results(fp)
# Write.
fh = io.StringIO()
_ordination_results_to_ordination(obj1, fh)
fh.seek(0)
# Read.
obj2 = _ordination_to_ordination_results(fh)
fh.close()
assert_ordination_results_equal(obj1, obj2)
class SnifferTests(OrdinationTestData):
def setUp(self):
super(SnifferTests, self).setUp()
def test_matches_and_nonmatches(self):
# Sniffer should match all valid files, and will match some invalid
# ones too because it doesn't exhaustively check the entire file.
for fp in self.valid_fps:
self.assertEqual(_ordination_sniffer(fp), (True, {}))
for fp, _, expected_sniffer_match in self.invalid_fps:
self.assertEqual(_ordination_sniffer(fp),
(expected_sniffer_match, {}))
if __name__ == '__main__':
main()
| bsd-3-clause |
sujithvm/internationality-journals | src/aminer_community.py | 3 | 2600 | print "[INFO] Reading aminer_cites.json"
# nodes belonging to each publication
nodes = {}
# self cited edges
edge_list_1 = []
# non self cited edges
edge_list_2 = []
# publication edges
edge_list_3 = []
import json
with open('../output/aminer_cites.json') as data_file:
data = json.load(data_file)
for publication in data :
papers = data[publication]
for paper in papers :
# add edge to publication
src = paper
edge_list_3.append((publication, src))
# add node to respective publication
if publication not in nodes :
nodes[publication] = []
nodes[publication].append(paper)
cites = data[publication][paper]
for cite in cites :
src = paper
dest = cite['index']
# add node to respective publication
cite_pub = cite['publication']
if cite_pub not in nodes :
nodes[cite_pub] = []
nodes[cite_pub].append(dest)
# add edges
edge = (src, dest)
# self cited edge
if cite['self'] == True : edge_list_1.append(edge)
# non self cited edge
else : edge_list_2.append(edge)
# add edge to publication
edge_list_3.append((cite_pub, dest))
# remove all duplicates
edge_list_3 = list(set(edge_list_3))
# remove all duplicates
for pub in nodes :
nodes[pub] = list(set(nodes[pub]))
print "[INFO] Done reading"
print "[INFO] Generating graph"
import networkx as nx
import matplotlib.pyplot as plt
# make a new graph
G = nx.Graph()
all_edges = []
all_edges.extend(edge_list_1)
all_edges.extend(edge_list_2)
all_edges.extend(edge_list_3)
G.add_edges_from(all_edges)
cite_dict = {}
edge_dict = {}
import community
#first compute the best partitionf
partition=community.best_partition(G)
#drawing the graph based on number of links
size = float(len(set(partition.values())))
pos = nx.spring_layout(G)
count = 0.
for com in set(partition.values()) :
count = count + 1.
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
nx.draw_networkx_nodes(G, pos, list_nodes, node_size = 20,
node_color = str(count / size))
#nx.draw_networkx_edges(G,pos, alpha=0.5)
nx.draw(G,pos,node_size=15,alpha=1,node_color="blue", with_labels=False) # aplha = transparency, labels = names
#plt.savefig("aminer_smallest.png",dpi=1000)
#nx.draw_networkx_edges(G,pos, alpha=0.5)
plt.show()
print "[INFO] Done generating graph" | mit |
dancingdan/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 13 | 5823 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import uuid
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
from tensorflow.python.util.tf_export import estimator_export
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_unique_target_key(features, target_column_name):
"""Returns a key that does not exist in the input DataFrame `features`.
Args:
features: DataFrame
target_column_name: Name of the target column as a `str`
Returns:
A unique key that can be used to insert the target into
features.
"""
if target_column_name in features:
target_column_name += '_' + str(uuid.uuid4())
return target_column_name
@estimator_export('estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object or `DataFrame`. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`. This parameter
is not used when `y` is a `DataFrame`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
ValueError: if 'shuffle' is not provided or a bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise ValueError('shuffle must be provided and explicitly set as boolean '
'(it is recommended to set it as True for training); '
'got {}'.format(shuffle))
if not isinstance(target_column, six.string_types):
raise TypeError('target_column must be a string type')
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
if isinstance(y, pd.DataFrame):
y_columns = [(column, _get_unique_target_key(x, column))
for column in list(y)]
target_column = [v for _, v in y_columns]
x[target_column] = y
else:
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
if isinstance(target_column, list):
keys = [k for k, _ in y_columns]
values = [features.pop(column) for column in target_column]
target = {k: v for k, v in zip(keys, values)}
else:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
abhitopia/tensorflow | tensorflow/examples/learn/iris.py | 35 | 1654 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
mgarrett57/DENCLUE | denclue.py | 1 | 8997 | """
denclue.py
@author: mgarrett
"""
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
import networkx as nx
def _hill_climb(x_t, X, W=None, h=0.1, eps=1e-7):
"""
This function climbs the 'hill' of the kernel density function
and finds the 'peak', which represents the density attractor
"""
error = 99.
prob = 0.
x_l1 = np.copy(x_t)
#Sum of the last three steps is used to establish radius
#of neighborhood around attractor. Authors suggested two
#steps works well, but I found three is more robust to
#noisey datasets.
radius_new = 0.
radius_old = 0.
radius_twiceold = 0.
iters = 0.
while True:
radius_thriceold = radius_twiceold
radius_twiceold = radius_old
radius_old = radius_new
x_l0 = np.copy(x_l1)
x_l1, density = _step(x_l0, X, W=W, h=h)
error = density - prob
prob = density
radius_new = np.linalg.norm(x_l1-x_l0)
radius = radius_thriceold + radius_twiceold + radius_old + radius_new
iters += 1
if iters>3 and error < eps:
break
return [x_l1, prob, radius]
def _step(x_l0, X, W=None, h=0.1):
n = X.shape[0]
d = X.shape[1]
superweight = 0. #superweight is the kernel X weight for each item
x_l1 = np.zeros((1,d))
if W is None:
W = np.ones((n,1))
else:
W = W
for j in range(n):
kernel = kernelize(x_l0, X[j], h, d)
kernel = kernel * W[j]/(h**d)
superweight = superweight + kernel
x_l1 = x_l1 + (kernel * X[j])
x_l1 = x_l1/superweight
density = superweight/np.sum(W)
return [x_l1, density]
def kernelize(x, y, h, degree):
kernel = np.exp(-(np.linalg.norm(x-y)/h)**2./2.)/((2.*np.pi)**(degree/2))
return kernel
class DENCLUE(BaseEstimator, ClusterMixin):
"""Perform DENCLUE clustering from vector array.
Parameters
----------
h : float, optional
The smoothing parameter for the gaussian kernel. This is a hyper-
parameter, and the optimal value depends on data. Default is the
np.std(X)/5.
eps : float, optional
Convergence threshold parameter for density attractors
min_density : float, optional
The minimum kernel density required for a cluster attractor to be
considered a cluster and not noise. Cluster info will stil be kept
but the label for the corresponding instances will be -1 for noise.
Since what consitutes a high enough kernel density depends on the
nature of the data, it's often best to fit the model first and
explore the results before deciding on the min_density, which can be
set later with the 'set_minimum_density' method.
Default is 0.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. In this version, I've only tested 'euclidean' at this
moment.
Attributes
-------
cluster_info_ : dictionary [n_clusters]
Contains relevant information of all clusters (i.e. density attractors)
Information is retained even if the attractor is lower than the
minimum density required to be labelled a cluster.
labels_ : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
References
----------
Hinneburg A., Gabriel HH. "DENCLUE 2.0: Fast Clustering Based on Kernel
Density Estimation". In: R. Berthold M., Shawe-Taylor J., Lavrač N. (eds)
Advances in Intelligent Data Analysis VII. IDA 2007
"""
def __init__(self, h=None, eps=1e-8, min_density=0., metric='euclidean'):
self.h = h
self.eps = eps
self.min_density = min_density
self.metric = metric
def fit(self, X, y=None, sample_weight=None):
if not self.eps > 0.0:
raise ValueError("eps must be positive.")
self.n_samples = X.shape[0]
self.n_features = X.shape[1]
density_attractors = np.zeros((self.n_samples,self.n_features))
radii = np.zeros((self.n_samples,1))
density = np.zeros((self.n_samples,1))
#create default values
if self.h is None:
self.h = np.std(X)/5
if sample_weight is None:
sample_weight = np.ones((self.n_samples,1))
else:
sample_weight = sample_weight
#initialize all labels to noise
labels = -np.ones(X.shape[0])
#climb each hill
for i in range(self.n_samples):
density_attractors[i], density[i], radii[i] = _hill_climb(X[i], X, W=sample_weight,
h=self.h, eps=self.eps)
#initialize cluster graph to finalize clusters. Networkx graph is
#used to verify clusters, which are connected components of the
#graph. Edges are defined as density attractors being in the same
#neighborhood as defined by our radii for each attractor.
cluster_info = {}
num_clusters = 0
cluster_info[num_clusters]={'instances': [0],
'centroid': np.atleast_2d(density_attractors[0])}
g_clusters = nx.Graph()
for j1 in range(self.n_samples):
g_clusters.add_node(j1, attr_dict={'attractor':density_attractors[j1], 'radius':radii[j1],
'density':density[j1]})
#populate cluster graph
for j1 in range(self.n_samples):
for j2 in (x for x in range(self.n_samples) if x != j1):
if g_clusters.has_edge(j1,j2):
continue
diff = np.linalg.norm(g_clusters.node[j1]['attractor']-g_clusters.node[j2]['attractor'])
if diff <= (g_clusters.node[j1]['radius']+g_clusters.node[j1]['radius']):
g_clusters.add_edge(j1, j2)
#connected components represent a cluster
clusters = list(nx.connected_component_subgraphs(g_clusters))
num_clusters = 0
#loop through all connected components
for clust in clusters:
#get maximum density of attractors and location
max_instance = max(clust, key=lambda x: clust.node[x]['density'])
max_density = clust.node[max_instance]['density']
max_centroid = clust.node[max_instance]['attractor']
#In Hinneberg, Gabriel (2007), for attractors in a component that
#are not fully connected (i.e. not all attractors are within each
#other's neighborhood), they recommend re-running the hill climb
#with lower eps. From testing, this seems unnecesarry for all but
#special edge cases. Therefore, completeness info is put into
#cluster info dict, but not used to re-run hill climb.
complete = False
c_size = len(clust.nodes())
if clust.number_of_edges() == (c_size*(c_size-1))/2.:
complete = True
#populate cluster_info dict
cluster_info[num_clusters] = {'instances': clust.nodes(),
'size': c_size,
'centroid': max_centroid,
'density': max_density,
'complete': complete}
#if the cluster density is not higher than the minimum,
#instances are kept classified as noise
if max_density >= self.min_density:
labels[clust.nodes()]=num_clusters
num_clusters += 1
self.clust_info_ = cluster_info
self.labels_ = labels
return self
def get_density(self, x, X, y=None, sample_weight=None):
superweight=0.
n_samples = X.shape[0]
n_features = X.shape[1]
if sample_weight is None:
sample_weight = np.ones((n_samples,1))
else:
sample_weight = sample_weight
for y in range(n_samples):
kernel = kernelize(x, X[y], h=self.h, degree=n_features)
kernel = kernel * sample_weight[y]/(self.h**n_features)
superweight = superweight + kernel
density = superweight/np.sum(sample_weight)
return density
def set_minimum_density(self, min_density):
self.min_density = min_density
labels_copy = np.copy(self.labels_)
for k in self.clust_info_.keys():
if self.clust_info_[k]['density']<min_density:
labels_copy[self.clust_info_[k]['instances']]= -1
else:
labels_copy[self.clust_info_[k]['instances']]= k
self.labels_ = labels_copy
return self
| mit |
koldunovn/geopandas | geopandas/tools/util.py | 11 | 1552 | import pandas as pd
import geopandas as gpd
from shapely.geometry import (
Point,
LineString,
Polygon,
MultiPoint,
MultiLineString,
MultiPolygon
)
from shapely.geometry.base import BaseGeometry
_multi_type_map = {
'Point': MultiPoint,
'LineString': MultiLineString,
'Polygon': MultiPolygon
}
def collect(x, multi=False):
"""
Collect single part geometries into their Multi* counterpart
Parameters
----------
x : an iterable or Series of Shapely geometries, a GeoSeries, or
a single Shapely geometry
multi : boolean, default False
if True, force returned geometries to be Multi* even if they
only have one component.
"""
if isinstance(x, BaseGeometry):
x = [x]
elif isinstance(x, pd.Series):
x = list(x)
# We cannot create GeometryCollection here so all types
# must be the same. If there is more than one element,
# they cannot be Multi*, i.e., can't pass in combination of
# Point and MultiPoint... or even just MultiPoint
t = x[0].type
if not all(g.type == t for g in x):
raise ValueError('Geometry type must be homogenous')
if len(x) > 1 and t.startswith('Multi'):
raise ValueError(
'Cannot collect {0}. Must have single geometries'.format(t))
if len(x) == 1 and (t.startswith('Multi') or not multi):
# If there's only one single part geom and we're not forcing to
# multi, then just return it
return x[0]
return _multi_type_map[t](x)
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
djgagne/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
anntzer/scikit-learn | examples/calibration/plot_compare_calibration.py | 27 | 4999 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]_: "Methods such as bagging and random forests that average predictions
from a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subsetting." As a result, the calibration curve shows a characteristic
sigmoid shape, indicating that the classifier could trust its "intuition"
more and return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]_), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier()
# #############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
shuangshuangwang/spark | python/pyspark/sql/pandas/map_ops.py | 23 | 3806 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark.rdd import PythonEvalType
class PandasMapOpsMixin(object):
"""
Min-in for pandas map operations. Currently, only :class:`DataFrame`
can use this class.
"""
def mapInPandas(self, func, schema):
"""
Maps an iterator of batches in the current :class:`DataFrame` using a Python native
function that takes and outputs a pandas DataFrame, and returns the result as a
:class:`DataFrame`.
The function should take an iterator of `pandas.DataFrame`\\s and return
another iterator of `pandas.DataFrame`\\s. All columns are passed
together as an iterator of `pandas.DataFrame`\\s to the function and the
returned iterator of `pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
Each `pandas.DataFrame` size can be controlled by
`spark.sql.execution.arrow.maxRecordsPerBatch`.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes an iterator of `pandas.DataFrame`\\s, and
outputs an iterator of `pandas.DataFrame`\\s.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
>>> def filter_func(iterator):
... for pdf in iterator:
... yield pdf[pdf.id == 1]
>>> df.mapInPandas(filter_func, df.schema).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Notes
-----
This API is experimental
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql import DataFrame
from pyspark.sql.pandas.functions import pandas_udf
assert isinstance(self, DataFrame)
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_MAP_PANDAS_ITER_UDF)
udf_column = udf(*[self[col] for col in self.columns])
jdf = self._jdf.mapInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.map_ops
globs = pyspark.sql.pandas.map_ops.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.map_ops tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.map_ops, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
jreback/pandas | asv_bench/benchmarks/array.py | 8 | 1273 | import numpy as np
import pandas as pd
class BooleanArray:
def setup(self):
self.values_bool = np.array([True, False, True, False])
self.values_float = np.array([1.0, 0.0, 1.0, 0.0])
self.values_integer = np.array([1, 0, 1, 0])
self.values_integer_like = [1, 0, 1, 0]
self.data = np.array([True, False, True, False])
self.mask = np.array([False, False, True, False])
def time_constructor(self):
pd.arrays.BooleanArray(self.data, self.mask)
def time_from_bool_array(self):
pd.array(self.values_bool, dtype="boolean")
def time_from_integer_array(self):
pd.array(self.values_integer, dtype="boolean")
def time_from_integer_like(self):
pd.array(self.values_integer_like, dtype="boolean")
def time_from_float_array(self):
pd.array(self.values_float, dtype="boolean")
class IntegerArray:
def setup(self):
self.values_integer = np.array([1, 0, 1, 0])
self.data = np.array([1, 2, 3, 4], dtype="int64")
self.mask = np.array([False, False, True, False])
def time_constructor(self):
pd.arrays.IntegerArray(self.data, self.mask)
def time_from_integer_array(self):
pd.array(self.values_integer, dtype="Int64")
| bsd-3-clause |
kaffeebrauer/Lean | ToolBox/Visualizer/QuantConnect.Visualizer.py | 7 | 7534 | """
Usage:
QuantConnect.Visualizer.py DATAFILE [--assembly assembly_path] [--output output_folder] [--size height,width]
Arguments:
DATAFILE Absolute or relative path to a zipped data file to plot.
Optionally the zip entry file can be declared by using '#' as separator.
Options:
-h --help show this.
-a --assembly assembly_path path to the folder with the assemblies dll/exe [default: ../.].
-o --output output_folder path to the output folder, each new plot will be saved there with a random name [default: ./output_folder].
-s, --size height,width plot size in pixels [default: 800,400].
Examples:
QuantConnect.Visualizer.py ../relative/path/to/file.zip
QuantConnect.Visualizer.py absolute/path/to/file.zip#zipEntry.csv
QuantConnect.Visualizer.py absolute/path/to/file.zip -o path/to/image.png -s 1024,800
"""
import json
import os
import sys
import uuid
from clr import AddReference
from pathlib import Path
from numpy import NaN
import matplotlib as mpl
mpl.use('Agg')
from docopt import docopt
from matplotlib.dates import DateFormatter
class Visualizer:
"""
Python wrapper for the Lean ToolBox.Visualizer.
This class is instantiated with the dictionary docopt generates from the CLI arguments.
It contains the methods for set up and load the C# assemblies into Python. The QuantConnect.ToolBox assembly folder
can be declared in the module's CLI.
"""
def __init__(self, arguments):
self.arguments = arguments
zipped_data_file = Path(self.arguments['DATAFILE'].split('#')[0])
if not zipped_data_file.exists():
raise FileNotFoundError(f'File {zipped_data_file.resolve().absolute()} does not exist')
self.palette = ['#f5ae29', '#657584', '#b1b9c3', '#222222']
# Loads the Toolbox to access Visualizer
self.setup_and_load_toolbox()
# Sets up the Composer
from QuantConnect.Data.Auxiliary import LocalDiskMapFileProvider
from QuantConnect.Util import Composer
from QuantConnect.Interfaces import IMapFileProvider
localDiskMapFileProvider = LocalDiskMapFileProvider()
Composer.Instance.AddPart[IMapFileProvider](localDiskMapFileProvider)
# Initizlize LeanDataReader and PandasConverter
from QuantConnect.ToolBox import LeanDataReader
from QuantConnect.Python import PandasConverter
self.lean_data_reader = LeanDataReader(self.arguments['DATAFILE'])
self.pandas_converter = PandasConverter()
# Generate random name for the plot.
self.plot_filename = self.generate_plot_filename()
def setup_and_load_toolbox(self):
"""
Checks if the path given in the CLI (or its defaults values) contains the needed assemblies.
:return: void.
:raise: NotImplementedError: if the needed assemblies dll are not available.
"""
# Check Lean assemblies are present in the composer-dll-directory key provided.
assemblies_folder_info = (Path(self.arguments['--assembly']))
toolbox_assembly = assemblies_folder_info.joinpath('QuantConnect.ToolBox.exe')
common_assembly = assemblies_folder_info.joinpath('QuantConnect.Common.dll')
if not (toolbox_assembly.exists() and common_assembly.exists()):
raise KeyError("Please set up the '--assembly' option with the path to Lean assemblies.\n" +
f"Absolute path provided: {assemblies_folder_info.resolve().absolute()}")
AddReference(str(toolbox_assembly.resolve().absolute()))
AddReference(str(common_assembly.resolve().absolute()))
os.chdir(str(assemblies_folder_info.resolve().absolute()))
return
def generate_plot_filename(self):
"""
Generates a random name for the output plot image file in the default folder defined in the CLI.
:return: an absolute path to the output plot image file.
"""
default_output_folder = (Path(self.arguments['--output']))
if not default_output_folder.exists():
os.makedirs(str(default_output_folder.resolve().absolute()))
file_name = f'{str(uuid.uuid4())[:8]}.png'
file_path = default_output_folder.joinpath(file_name)
return str(file_path.resolve().absolute())
def get_data(self):
"""
Makes use of the Lean's Toolbox LeanDataReader plus the PandasConverter to parse the data as pandas.DataFrame
from a given zip file and an optional internal filename for option and futures.
:return: a pandas.DataFrame with the data from the file.
"""
from QuantConnect.Data import BaseData
df = self.pandas_converter.GetDataFrame[BaseData](self.lean_data_reader.Parse())
if df.empty:
raise Exception("Data frame is empty")
symbol = df.index.levels[0][0]
return df.loc[symbol]
def filter_data(self, df):
"""
Applies the filters defined in the CLI arguments to the parsed data.
Not fully implemented yet, it only selects the close columns.
:param df: pandas.DataFrame with all the data form the selected file.
:return: a filtered pandas.DataFrame.
TODO: implement column and time filters.
"""
if 'tick' in self.arguments['DATAFILE']:
cols_to_plot = [col for col in df.columns if 'price' in col]
else:
cols_to_plot = [col for col in df.columns if 'close' in col]
if 'openinterest' in self.arguments['DATAFILE']:
cols_to_plot = ['openinterest']
cols_to_plot = cols_to_plot[:2] if len(cols_to_plot) == 3 else cols_to_plot
df = df.loc[:, cols_to_plot]
return df
def plot_and_save_image(self, data):
"""
Plots the data and saves the plot as a png image.
:param data: a pandas.DataFrame with the data to plot.
:return: void
"""
is_future_tick = ('future' in self.arguments['DATAFILE'] and 'tick' in self.arguments['DATAFILE']
and 'quote' in self.arguments['DATAFILE'])
if is_future_tick:
data = data.replace(0, NaN)
plot = data.plot(grid=True, color=self.palette)
is_low_resolution_data = 'hour' in self.arguments['DATAFILE'] or 'daily' in self.arguments['DATAFILE']
if not is_low_resolution_data:
plot.xaxis.set_major_formatter(DateFormatter("%H:%M"))
plot.set_xlabel(self.lean_data_reader.GetDataTimeZone().Id)
is_forex = 'forex' in self.arguments['DATAFILE']
is_open_interest = 'openinterest' in self.arguments['DATAFILE']
if is_forex:
plot.set_ylabel('exchange rate')
elif is_open_interest:
plot.set_ylabel('open contracts')
else:
plot.set_ylabel('price (USD)')
fig = plot.get_figure()
size_px = [int(p) for p in self.arguments['--size'].split(',')]
fig.set_size_inches(size_px[0] / fig.dpi, size_px[1] / fig.dpi)
fig.savefig(self.plot_filename, transparent=True, dpi=fig.dpi)
return
if __name__ == "__main__":
arguments = docopt(__doc__)
visualizer = Visualizer(arguments)
# Gets the pandas.DataFrame from the data file
df = visualizer.get_data()
# Selects the columns you want to plot
df = visualizer.filter_data(df)
# Save the image
visualizer.plot_and_save_image(df)
print(visualizer.plot_filename)
sys.exit(0)
| apache-2.0 |
cryptobanana/sdnctrlsim | sim/controller.py | 2 | 20229 | #!/usr/bin/env python
#
# Dan Levin <[email protected]>
# Brandon Heller <[email protected]>
import logging
from random import choice
import sys
import matplotlib.pyplot as plt
import networkx as nx
from resource_allocator import ResourceAllocator
logger = logging.getLogger(__name__)
class Controller(ResourceAllocator):
"""
Generic controller -- does not implement control logic:
"""
def __init__(self, sw=[], srv=[], graph=None, name=""):
"""
sw: list of switch names governed by this controller
srv: list of servers known by this controller
to which requests may be dispatched sent
graph: A copy of the simulation graph is given to each controller
instance at the time of simulation initialization
name: string representation, should be unique in a simulation
mylinks: a list of links in the self.graph which are goverend by
this controller, inferred from switches
active_flows: used to track the (timeout, path) of all active flows
"""
self.switches = sw
self.servers = srv
self.graph = graph
self.name = name
self.active_flows = []
# Inferred from graph
self.localservers = []
self.mylinks = []
def __str__(self):
return "Controller %s of: %s" % (self.name, str(self.switches))
def set_name(self, name):
self.name = name
def set_graph(self, graph):
self.name = graph
def get_switches(self):
return self.switches
def handle_request(self):
raise NotImplementedError("Controller does not implement __name__")
def sync_toward(self, ctrl=None):
raise NotImplementedError("Controller does not implement __name__")
class LinkBalancerCtrl(Controller):
"""
Control logic for link balancer: Tracks link capacities of associated
switches, and decides how to map requests such to minimize the maximum link
utilization over all visible links
"""
def __init__(self, *args, **kwargs):
"""Reuse __init__ of our superclass"""
super(LinkBalancerCtrl, self).__init__(*args, **kwargs)
def learn_local_servers(self):
"""
Learn the servers of the sim graph that are within my domain
Requrires that the controller be initialized by the simulation
"""
assert len(self.mylinks) > 0
assert len(self.switches) > 0
assert self.graph != None
localservers = []
for srv in self.servers:
neighbor_sw = self.graph.neighbors(srv)
if len(neighbor_sw) != 1:
raise NotImplementedError("Single server links only")
else:
neighbor_sw = neighbor_sw[0]
if (neighbor_sw in self.switches):
localservers.append(srv)
# remove duplicates
self.localservers = list(set(localservers))
def learn_my_links(self):
"""
Learn the links of a graph that are directly observable by me
e.g. which are directly connected to my switches
Optionally, learn my links from a graph that is not my own
"""
assert (self.graph != None)
links = self.graph.edges()
mylinks = []
for link in links:
u, v = link[:2]
if (v in self.switches or u in self.switches):
self.graph[u][v]['mylink'] = True
mylinks.append((u, v))
# remove duplicates
self.mylinks = list(set(mylinks))
def update_my_state(self, simgraph):
"""
This action is akin to when a controller polls the switchport counters
of its switches: The controller will update the 'used' values each
link in the simulation graph which it governs
"""
for link in self.mylinks:
u, v = link
if not (self.graph[u][v]['used'] == simgraph[u][v]['used']):
self.graph[u][v]['used'] = simgraph[u][v]['used']
def sync_toward(self, dstctrl, specificedges=None, timestep=None):
"""
Share the utilization state of links goverend by this controller with
another controller in a "push" fashion
Optionally specify only specific links (edges) to share with the other dstctrl
In the corner case, where a link crosses a domain, its state is owned
by both controllers and not modified during sync. When two controllers
share ownership of a link and hold different state for it, the
controllers can not resolve their different views throgh sync. In the
simulation, this scenario will never emerge as long as controllers
learn their link state (learn_my_state) from the simulation graph
before handling requests.
"""
if (specificedges):
mylinks = specificedges
else:
mylinks = self.mylinks
for link in mylinks:
u, v = link
# A controller should only accept state updates to links that do
# not belong to its own domain.
if not (dstctrl.graph[u][v].get('mylink')):
dstctrl.graph[u][v]['used'] = self.graph[u][v]['used']
dstctrl.graph[u][v]['timestamp'] = timestep
logging.debug("%s syncs toward %s" % (self.name, dstctrl.name))
def get_srv_paths(self, sw, graph=None, local=False):
"""
Return a list of all paths from available servers to the entry
switch which can respond. We make the assumption here that the path list
(routing) is known and static
If local , Return only paths to servers within this controller's domain
"""
if graph == None:
graph = self.graph
paths = []
if local:
avail_srvs = self.localservers
else:
avail_srvs = self.servers
assert graph != None
assert len(sw) > 0
assert len(avail_srvs)> 0
for server in avail_srvs:
paths.append(nx.shortest_path(graph, server, sw))
return paths
def compute_path_metric(self, sw, path, util, time_now):
"""
Return a pathmetric rating the utilization of the path pathmetric is a
real number in [0,1] which is the max (worst) of all linkmetrics for all
links in the path
"""
pathmetric = 1
linkmetrics = []
links = zip(path[:-1], path[1:])
# calculate available capacity for each link in path
for link in links:
u, v = link
#DESIGN CHOICE: Should we 1) always include extra-domain state, 2)
#only include extra-domain state when not stale (timestamp), 3) always exclude
#extra-domain state when calculating the path metric? Here we do (1)
used = self.graph[u][v]['used'] + util
capacity = self.graph[u][v]['capacity']
linkmetric = float(used) / capacity
# If the controller estimates it would oversubscribe this link
if linkmetric > 1:
logging.info("[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]", str(time_now), linkmetric, str(sw))
break
else:
linkmetrics.append(linkmetric)
# We define pathmetric to be the worst link metric in path
if len(linkmetrics) > 0:
pathmetric = max(linkmetrics)
funname = sys._getframe().f_code.co_name
logging.debug("[%s] [%s] [%s] [%s]", funname, str(time_now), str(self),
str((path, linkmetrics)))
return (pathmetric, len(links))
def find_best_path(self, paths, sw, util, duration, time_now):
bestpath = None
bestpathmetric = None # [0,1] lower -> better path
bestpathlen = None # lower -> better path
for path in paths:
pathmetric, pathlen = self.compute_path_metric(sw, path, util, time_now)
#DESIGN CHOICE: We pick the path with the best pathmetric.
# If multiple path metrics tie, we pick the path with the shortest
# length
if (bestpathmetric == None):
bestpath = path
bestpathmetric = pathmetric
bestpathlen = pathlen
elif (pathmetric < bestpathmetric):
bestpath = path
bestpathmetric = pathmetric
bestpathlen = pathlen
elif (pathmetric == bestpathmetric and pathlen < bestpathlen):
bestpath = path
bestpathmetric = pathmetric
bestpathlen = pathlen
if (bestpath == None):
return None
funname = sys._getframe().f_code.co_name
logging.debug("[%s] [%s] [%s] [%s] [%s] [%s]",
funname, str(time_now), str(self), str(bestpath),
str(bestpathlen), str(bestpathmetric))
return (bestpath, bestpathmetric)
def handle_request(self, sw, util, duration, time_now):
"""
Given a request that utilizes some bandwidth for a duration, map
that request to an available path such that max link bandwidth util is
minimized
sw: switch at which request arrives
util: link utilization to be consumed by this flow
duration: time over which flow consumes resources
@return the chosen best path as a list of consecutive link pairs
((c1,sw1), (sw1,sw2),...,(sw_n, srv_x))
"""
#logging.debug(str(self.graph.edges(data=True)))
#1 Get available paths from servers to switch
paths = self.get_srv_paths(sw, self.graph)
#2 choose the path which mins the max link utilization for all links
# along the path
bestpath, bestpm = self.find_best_path(paths, sw, util, duration, time_now)
if len(bestpath) > 0:
self.allocate_resources(bestpath, util, time_now, duration)
else:
logging.warn("[%s] No best path found at switch [%s]", str(time_now), str(sw))
return bestpath
class GreedyLinkBalancerCtrl(LinkBalancerCtrl):
"""
A Greedy variant of the LinkBalancerCtrl which assigns all flows only to
servers in its own domain (local) until doing so would require the pathmetric to
exceed the greedylimit. Only when it is impossible to assign a flow to a
local server without the pathmetric exceeding the greedylimit, is the
controller allowed to send it to a server out of the domain.
greedylimit: A value between [0,1]. A greedylimit of 1 means keep all flows
in our domain until doing so would oversubscribe a link.
"""
def __init__(self, greedylimit, *args, **kwargs):
super(GreedyLinkBalancerCtrl, self).__init__(*args, **kwargs)
self.greedylimit = greedylimit
def handle_request(self, sw, util, duration, time_now):
#Find a best path to a server in our domain
paths = self.get_srv_paths(sw, self.graph, local=True)
bestpath, bestpm = self.find_best_path(paths, sw, util, duration, time_now)
if (bestpm > self.greedylimit):
oldbestpath = bestpath
oldbestpm = bestpm
#If the best path in our domain violates our greedy limit, find a
# best path to a server outside our domain
if (bestpath == None or bestpm > self.greedylimit):
paths = self.get_srv_paths(sw, self.graph)
bestpath, bestpm = self.find_best_path(paths, sw, util, duration, time_now)
#DESIGN CHOICE: If the bestpm has a worse pathmetric
# than the oldbestpm, should we return oldbestpath instead?
if len(bestpath) > 0:
self.allocate_resources(bestpath, util, time_now, duration)
else:
logging.warn("[%s] No best path found at switch [%s]", str(time_now), str(sw))
logging.debug(str(bestpath))
return bestpath
class SeparateStateLinkBalancerCtrl(LinkBalancerCtrl):
"""
This controller keeps extra-domain link state obtained through sync events
separate from extra-domain state inferred through tracking its contribution
to extra-domain contributed load.
alpha: Scaling factor for redistributing the load across links between sync
events
"""
def __init__(self, alpha, *args, **kwargs):
super(SeparateStateLinkBalancerCtrl, self).__init__(*args, **kwargs)
self.alpha = alpha
def sync_toward(self, dstctrl, specificedges=None, timestep=None):
"""
Share the utilization state of links goverend by this controller with
another controller in a "push" fashion Optionally specify only specific
links (edges) to share with the other dstctrl
"""
if (specificedges):
mylinks = specificedges
else:
mylinks = self.mylinks
for link in mylinks:
u, v = link
# A controller should only accept state updates to links that do
# not belong to its own domain.
if not (dstctrl.graph[u][v].get('mylink')):
dstctrl.graph[u][v]['sync_learned'] = self.graph[u][v]['used']
dstctrl.graph[u][v]['timestamp'] = timestep
logging.debug("%s syncs toward %s" % (self.name, dstctrl.name))
def compute_path_metric(self, sw, path, util, time_now, local_contrib):
"""
Return a pathmetric rating the utilization of the path pathmetric is a
real number in [0,1] which is the max (worst) of all linkmetrics for all
links in the path
"""
pathmetric = 1
linkmetrics = []
links = zip(path[:-1], path[1:])
# calculate available capacity for each link in path
for link in links:
u, v = link
# Use the last-learned-via-sync value for a link
if (not local_contrib) and 'sync_learned' in self.graph[u][v]:
used1 = self.graph[u][v]['sync_learned'] + util
used2 = self.graph[u][v]['used'] + util
# ['used'] is a strict lower bound for ['sync_learned']
if used1 > used2:
used = used1
logging.debug("CS [%s] using sync_learned value 1 [%f]", str(self.name), used1)
else:
used = used2
logging.debug("CS [%s] using sync_learned value 2 [%f]", str(self.name), used2)
else:
logging.debug("CS [%s] using tracking value", str(self.name))
used = self.graph[u][v]['used'] + util
capacity = self.graph[u][v]['capacity']
linkmetric = float(used) / capacity
# If the controller estimates it would oversubscribe this link
if linkmetric > 1:
logging.info("[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]", str(time_now), linkmetric, str(sw))
break
else:
linkmetrics.append(linkmetric)
# We define pathmetric to be the worst link metric in path
if len(linkmetrics) > 0:
pathmetric = max(linkmetrics)
funname = sys._getframe().f_code.co_name
logging.debug("[%s] [%s] [%s] [%s]", funname, str(time_now), str(self),
str((path, linkmetrics)))
return (pathmetric, len(links))
def calculate_what_to_shift(self, paths, sw):
"""
Calculate the current ratio of max(sync_learned, my contributed)
utilization across two paths corresponds to figure 1 in drawing
"""
pathmetrics = {}
for path in paths:
metric, length = self.compute_path_metric(sw, path, 0, 0, local_contrib=False)
assert metric >= 0
pathmetrics[metric] = path
metrics = pathmetrics.keys()
logging.debug("SS CWTS PATH METRICS:, %s", str(pathmetrics))
balanced_metric = sum(metrics)/len(metrics)
if max(metrics) == 0:
logging.debug("SS CWTS MAX METRIC is 0")
shift_by = 0
shift_from_path = None
else:
logging.debug("SS max(metrics) is %s", str(max(metrics)))
logging.debug("SS balanced metrics is %s", str(balanced_metric))
shift_by = (max(metrics) - balanced_metric)/max(metrics)
shift_from_path = pathmetrics[max(metrics)]
logging.debug("SS CWTS SHIFT FROM: %s", str(shift_from_path))
logging.debug("SS CWTS SHIFT BY: %s", str(shift_by))
return(shift_from_path, shift_by)
def find_best_path(self, paths, sw, util, duration, time_now):
"""
Calculate the current ratio of my contributed utilization across two paths
corresponds to figure 1 in drawing
"""
bestpath = None
bestpathmetric = None # [0,1] lower means better path
bestpathlen = None # lower -> better path
candidatepaths = []
assert len(paths) == 2
path_to_shift, shift_by = self.calculate_what_to_shift(paths, sw)
pathmetrics = {}
paths_by_length = {}
metrics = []
metricpaths = {}
for path in paths:
metric, length = self.compute_path_metric(sw, path, 0, 0, local_contrib=True)
paths_by_length[length] = path
metrics.append(metric)
assert metric >= 0
pathmetrics[" ".join(path)] = metric
metricpaths[metric] = path
logging.debug("SS FBP PATH METRICS:, %s", str(metricpaths))
if path_to_shift == None:
# return shortest path
logging.debug("SS FBP Returning LOCAL: %s", str((paths_by_length[min(paths_by_length.keys())],0)))
return (paths_by_length[min(paths_by_length.keys())], 0)
path_to_shift_metric = pathmetrics.pop(" ".join(path_to_shift))
path_to_receive_metric = pathmetrics.pop(pathmetrics.keys()[0])
logging.debug("SS FBP Path to Recv: %s", str(metricpaths[path_to_receive_metric]))
if (path_to_receive_metric == 0):
logging.debug("SS FBP EARLY Returning : %s", str((metricpaths[min(metrics)], 0)))
return (metricpaths[min(metrics)], 0)
else:
current_ratio = path_to_shift_metric * 1.0 / path_to_receive_metric
logging.debug("SS FBP CURRENT RATIO: %s", str(current_ratio))
goal_path_to_shift_metric = path_to_shift_metric * (1 - (shift_by * self.alpha))
goal_path_to_receive_metric = path_to_receive_metric + (path_to_shift_metric * (shift_by * self.alpha))
if (goal_path_to_receive_metric == 0):
# large number for practical purposes
goal_ratio = 100000
else:
goal_ratio = goal_path_to_shift_metric * 1.0 / goal_path_to_receive_metric
logging.debug("SS FBP GOAL RATIO: %s", str(goal_ratio))
# FINALLY DECIDE WHICH PATH TO RETURN BASED ON GOAL-Current RATIO
if goal_ratio - current_ratio < 0:
# return path with lower utiliztion
logging.debug("SS FBP LOWER Returning : %s", str((metricpaths[min(metrics)], 0)))
return (metricpaths[min(metrics)], 0)
if goal_ratio - current_ratio > 0:
# return path with higher utilization
logging.debug("SS FBP HIGHER Returning : %s", str((metricpaths[max(metrics)], 0)))
return (metricpaths[max(metrics)], 0)
if goal_ratio - current_ratio == 0:
# return shortest path
logging.debug("SS FBP Returning LOCAL: %s",
str((paths_by_length[min(paths_by_length.keys())], 0)))
return (paths_by_length[min(paths_by_length.keys())], 0)
class RandomChoiceCtrl(LinkBalancerCtrl):
"""
This controller picks a path at random
"""
def __init__(self, *args, **kwargs):
super(RandomChoiceCtrl, self).__init__(*args, **kwargs)
def handle_request(self, sw, util, duration, time_now):
#Find a best path to a server in our domain
paths = self.get_srv_paths(sw, self.graph)
return choice(paths)
| bsd-3-clause |
eg-zhang/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
sanderroobol/spacetime | lib/spacetime/plot.py | 2 | 9435 | # This file is part of Spacetime.
#
# Copyright 2010-2014 Leiden University.
# Written by Sander Roobol.
#
# Spacetime is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Spacetime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import itertools
import numpy
import matplotlib, matplotlib.figure, matplotlib.dates, matplotlib.gridspec
from . import util
class AbsoluteGridSpec(matplotlib.gridspec.GridSpecBase):
def __init__(self, nrows, ncols,
margins=(.75, .75, .75, .75),
spacing=(.75, .75),
ratios=(None, None)):
"""gridspec in absolute units
all sizes in inches
* margins: top, right, bottom, left
* spacing: horizontal, vertical
* ratios: like GridSpec's ratios
"""
self._margins = margins
self._spacing = spacing
matplotlib.gridspec.GridSpecBase.__init__(self, nrows, ncols,
width_ratios=ratios[0],
height_ratios=ratios[1])
@staticmethod
def _divide_into_cells(size, n, margin_low, margin_high, spacing, ratios):
total = size - margin_low - margin_high - (n-1)*spacing
cell = total / n
if ratios is None:
cells = [cell] * n
else:
tr = sum(ratios)
cells = [total*r/tr for r in ratios]
seps = [0] + ([spacing] * (n-1))
cells = numpy.add.accumulate(numpy.ravel(zip(seps, cells)))
lowers = [(margin_low + cells[2*i])/size for i in range(n)]
uppers = [(margin_low + cells[2*i+1])/size for i in range(n)]
return lowers, uppers
def get_grid_positions(self, fig):
width, height = fig.get_size_inches()
mtop, mright, mbottom, mleft = self._margins
hspace, vspace = self._spacing
if self._row_height_ratios:
# plot numbering should start at top, but y-coordinates start at bottom
height_ratios = self._row_height_ratios[::-1]
else:
height_ratios = None
figBottoms, figTops = self._divide_into_cells(height, self._nrows, mbottom, mtop, vspace, height_ratios)
figLefts, figRights = self._divide_into_cells(width, self._ncols, mleft, mright, hspace, self._col_width_ratios)
return figBottoms[::-1], figTops[::-1], figLefts, figRights
class Marker(object):
def __init__(self, left, right=None):
self.callbacks = []
self._set_params(left, right)
def add_callback(self, callback):
self.callbacks.append(callback)
def clear(self):
for callback in self.callbacks:
callback()
def draw(self):
for s in self.plot.subplots:
s.draw_marker(self)
def _set_params(self, left, right=None):
self.clear()
self.left = left
self.right = right
def move(self, left, right=None):
self._set_params(left, right)
self.draw()
def interval(self):
return self.right is not None
class Markers(object):
def __init__(self, parent):
self.parent = parent
self.clear()
def add(self, *args, **kwargs):
marker = Marker(*args, **kwargs)
marker.plot = self.parent
self.markers.append(marker)
marker.draw()
return marker
def redraw(self):
for marker in self.markers:
marker.clear()
marker.draw()
def clear(self):
self.markers = []
def remove(self, marker):
self.markers.remove(marker)
marker.clear()
def __iter__(self):
return iter(self.markers)
AutoDateFormatterScaling = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60. * 60.): '%H:%M:%S',
1. / (24. * 60. * 60. * 2): '%H:%M:%S.%f',
}
class Plot(object):
shared_xlim_callback_ext = None
shared_xmin = 0.
shared_xmax = 1.
shared_xauto = True
subplots = []
master_axes = None
rezero = False
rezero_unit = 1.
rezero_offset = 0.
def __init__(self, figure):
self.figure = figure
self.markers = Markers(self)
self.clear()
@classmethod
def newpyplotfigure(klass, size=(14,8)):
import matplotlib.pyplot
return klass(matplotlib.pyplot.figure(figsize=size))
@classmethod
def newmatplotlibfigure(klass):
return klass(matplotlib.figure.Figure())
@classmethod
def autopyplot(klass, *subplots, **kwargs):
import matplotlib.pyplot
plot = klass.newpyplotfigure(**kwargs)
for p in subplots:
plot.add_subplot(p)
plot.setup()
plot.draw()
matplotlib.pyplot.show()
return plot
def relocate(self, figure):
self.clear()
self.figure = figure
def clear(self):
for p in self.subplots:
p.clear(quick=True)
self.figure.clear()
self.markers.clear()
self.subplots = []
self.independent_axes = []
self.shared_axes = []
self.twinx_axes = []
def add_subplot(self, subplot):
subplot.parent = self
self.subplots.append(subplot)
def setup(self):
req = []
for p in self.subplots:
req.extend((p, r) for r in p.get_axes_requirements())
total = len(req)
ret = []
shared = None
gridspec = AbsoluteGridSpec(
total, 1,
margins=(.2, .75, .75, .75),
spacing=(.2, .2),
ratios=(None, tuple(r.size for (p, r) in req))
)
for i, (p, r) in enumerate(req):
if r.independent_x:
axes = self.figure.add_subplot(gridspec[i, 0])
self.independent_axes.append(axes)
else:
if shared:
axes = self.figure.add_subplot(gridspec[i, 0], sharex=shared)
else:
shared = axes = self.figure.add_subplot(gridspec[i, 0])
self.shared_axes.append(axes)
self.setup_xaxis_labels(axes)
axes.autoscale(False)
if r.twinx:
twin = axes.twinx()
self.twinx_axes.append(twin)
if not r.independent_x:
self.setup_xaxis_labels(twin)
axes = (axes, twin)
twin.autoscale(False)
ret.append((p, axes))
for p, groups in itertools.groupby(ret, key=lambda x: x[0]):
p.set_axes(list(axes for (subplot, axes) in groups))
for p in self.subplots:
p.setup()
if self.shared_axes:
self.master_axes = self.shared_axes[-1]
for axes in self.shared_axes:
axes.callbacks.connect('xlim_changed', self.shared_xlim_callback)
else:
self.master_axes = None
def draw(self):
for p in self.subplots:
p.draw()
def setup_xaxis_labels(self, axes):
if not self.rezero:
axes.xaxis_date(tz=util.localtz)
# Timezone support is not working properly with xaxis_date(), so override manually
locator = matplotlib.dates.AutoDateLocator(tz=util.localtz)
axes.xaxis.set_major_locator(locator)
formatter = matplotlib.dates.AutoDateFormatter(locator, tz=util.localtz)
formatter.scaled = AutoDateFormatterScaling
axes.xaxis.set_major_formatter(formatter)
if hasattr(axes, 'is_last_row') and axes.is_last_row():
if not self.rezero:
for label in axes.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
else:
for label in axes.get_xticklabels():
label.set_visible(False)
def shared_xlim_callback(self, ax):
self.shared_xmin, self.shared_xmax = self.get_ax_limits(ax)
if self.shared_xlim_callback_ext:
self.shared_xlim_callback_ext(ax)
def set_shared_xlim_callback(self, func):
self.shared_xlim_callback_ext = func
def autoscale(self, subplot=None):
if subplot:
subplots = [subplot]
else:
subplots = self.subplots
# this silently assumes that a single subplot will not have multiple
# graphs with mixed shared/non-shared x-axis
shared_xlim_rescale = False
for subplot in subplots:
subplot.ylim_rescale()
try:
subplot.xlim_rescale()
except (AttributeError, util.SharedXError):
shared_xlim_rescale = True
if shared_xlim_rescale:
self.shared_xlim_rescale()
def set_shared_xlim(self, min, max, auto):
self.shared_xmin = min
self.shared_xmax = max
self.shared_xauto = auto
self.shared_xlim_rescale()
if self.master_axes:
return self.get_ax_limits(self.master_axes)
else:
return self.shared_xmin, self.shared_xmax
def shared_xlim_rescale(self):
if not self.master_axes:
return
if self.shared_xauto:
self.autoscale_shared_x()
else:
self.master_axes.set_xlim(self.correct_time(self.shared_xmin), self.correct_time(self.shared_xmax))
def autoscale_shared_x(self):
# NOTE: this is a workaround for matplotlib's internal autoscaling routines.
# it imitates axes.autoscale_view(), but only takes the dataLim into account when
# there are actually some lines or images in the graph
# see also Subplots.autoscale_x
dl = [ax.dataLim for ax in self.shared_axes + self.twinx_axes if ax.lines or ax.images or ax.patches]
if dl:
bb = matplotlib.transforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
XL = self.master_axes.xaxis.get_major_locator().view_limits(x0, x1)
self.master_axes.set_xlim(XL)
def set_rezero_opts(self, enable, unit, offset):
self.rezero = enable
self.rezero_unit = unit
self.rezero_offset = offset
def correct_time(self, value):
if self.rezero:
return (value - self.rezero_offset) * self.rezero_unit
else:
return value
def correct_time_inverse(self, value):
if self.rezero:
return value / self.rezero_unit + self.rezero_offset
else:
return value
def get_ax_limits(self, ax):
low, up = ax.get_xlim()
return self.correct_time_inverse(low), self.correct_time_inverse(up)
| gpl-2.0 |
khkaminska/bokeh | examples/charts/file/hover_span.py | 8 | 2890 | from bokeh.models import HoverTool
from bokeh.charts import Line, Scatter, vplot, hplot, show, output_file, defaults
import pandas as pd
from bokeh.sampledata.degrees import xyvalues
defaults.width = 500
defaults.height = 300
xyvalues = xyvalues[['Biology', 'Business', 'Computer Science', "Year"]]
xyvalues = pd.melt(xyvalues, id_vars=['Year'],
value_vars=['Biology', 'Business', 'Computer Science'],
value_name='Count', var_name='Degree')
TOOLS='box_zoom,box_select,hover,crosshair,resize,reset'
output_file("hover_span.html", title="line.py example")
vline = Line(xyvalues, y='Count', color='Degree', title="Lines VLine", ylabel='measures',
tools=TOOLS)
hline = Line(xyvalues, y='Count', color='Degree', title="Lines HLine",
ylabel='measures', tools=TOOLS)
int_vline = Line(xyvalues, y='Count', color='Degree', title="Lines VLine Interp",
ylabel='measures', tools=TOOLS)
int_hline = Line(xyvalues, y='Count', color='Degree', title="Lines HLine Interp",
ylabel='measures', tools=TOOLS)
scatter_point = Scatter(xyvalues, x='Year', y='Count', color='Degree',
title="Scatter mouse", ylabel='measures', legend=True,
tools=TOOLS)
scatter = Scatter(xyvalues, x='Year', y='Count', color='Degree',
title="Scatter V Line", ylabel='measures', legend=True, tools=TOOLS)
int_point_line = Line(xyvalues, x='Year', y='Count', color='Degree',
title="Lines Mouse Interp.", ylabel='measures', tools=TOOLS)
point_line = Line(xyvalues, x='Year', y='Count', color='Degree',
title="Lines Mouse", ylabel='measures', tools=TOOLS)
hhover = hline.select(dict(type=HoverTool))
hhover.mode = 'hline'
hhover.line_policy = 'next'
vhover = vline.select(dict(type=HoverTool))
vhover.mode = 'vline'
vhover.line_policy = 'nearest'
int_hhover = int_hline.select(dict(type=HoverTool))
int_hhover.mode = 'hline'
int_hhover.line_policy = 'interp'
int_vhover = int_vline.select(dict(type=HoverTool))
int_vhover.mode = 'vline'
int_vhover.line_policy = 'interp'
iphover = int_point_line.select(dict(type=HoverTool))
iphover.mode = 'mouse'
iphover.line_policy = 'interp'
tphover = point_line.select(dict(type=HoverTool))
tphover.mode = 'mouse'
shover = scatter.select(dict(type=HoverTool))
shover.mode = 'vline'
shoverp = scatter_point.select(dict(type=HoverTool))
shoverp.mode = 'mouse'
TOOLTIPS = [
("y", "$~y"),
("x", "$~x"),
]
int_vhover.tooltips = int_hhover.tooltips = TOOLTIPS
tphover.tooltips = iphover.tooltips = TOOLTIPS
shover.tooltips = shoverp.tooltips = TOOLTIPS
vhover.tooltips = hhover.tooltips = TOOLTIPS
show(
vplot(
hplot(hline, vline),
hplot(int_hline, int_vline),
hplot(int_point_line, point_line),
hplot(scatter_point, scatter),
)
)
| bsd-3-clause |
LohithBlaze/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
evgchz/scikit-learn | sklearn/utils/extmath.py | 14 | 20521 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state, deprecated
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : arrays
The output of `linalg.svd` or `sklearn.utils.extmath.randomized_svd`,
with matching inner dimensions so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
@deprecated('to be removed in 0.17; use scipy.special.expit or log_logistic')
def logistic_sigmoid(X, log=False, out=None):
"""Logistic function, ``1 / (1 + e ** (-x))``, or its log."""
from .fixes import expit
fn = log_logistic if log else expit
return fn(X, out)
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
| bsd-3-clause |
jrleeman/MetPy | metpy/plots/wx_symbols.py | 2 | 8517 | # Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Simplify using the weather symbol font.
See WMO manual 485 Vol 1 for more info on the symbols.
"""
import matplotlib.font_manager as fm
from pkg_resources import resource_filename
from ..package_tools import Exporter
exporter = Exporter(globals())
# Create a matplotlib font object pointing to our weather symbol font
wx_symbol_font = fm.FontProperties(fname=resource_filename('metpy.plots',
'fonts/wx_symbols.ttf'))
# Deal with the fact that Python 2 chr() can't handle unicode, but unichr is gone
# on python 3
try:
code_point = unichr
except NameError:
code_point = chr
class CodePointMapping(object):
"""Map integer values to font code points."""
def __init__(self, num, font_start, font_jumps=None, char_jumps=None):
"""Initialize the instance.
Parameters
----------
num : int
The number of values that will be mapped
font_start : int
The first code point in the font to use in the mapping
font_jumps : list[int, int], optional
Sequence of code point jumps in the font. These are places where the next
font code point does not correspond to a new input code. This is usually caused
by there being multiple symbols for a single code. Defaults to :data:`None`, which
indicates no jumps.
char_jumps : list[int, int], optional
Sequence of code jumps. These are places where the next code value does not
have a valid code point in the font. This usually comes from place in the WMO
table where codes have no symbol. Defaults to :data:`None`, which indicates no
jumps.
"""
next_font_jump = self._safe_pop(font_jumps)
next_char_jump = self._safe_pop(char_jumps)
font_point = font_start
self.chrs = []
code = 0
while code < num:
if next_char_jump and code >= next_char_jump[0]:
jump_len = next_char_jump[1]
code += jump_len
self.chrs.extend([''] * jump_len)
next_char_jump = self._safe_pop(char_jumps)
else:
self.chrs.append(code_point(font_point))
if next_font_jump and code >= next_font_jump[0]:
font_point += next_font_jump[1]
next_font_jump = self._safe_pop(font_jumps)
code += 1
font_point += 1
@staticmethod
def _safe_pop(l):
"""Safely pop from a list.
Returns None if list empty.
"""
return l.pop(0) if l else None
def __call__(self, code):
"""Return the Unicode code point corresponding to `code`."""
return self.chrs[code]
def __len__(self):
"""Return the number of codes supported by this mapping."""
return len(self.chrs)
def alt_char(self, code, alt):
"""Get one of the alternate code points for a given value.
In the WMO tables, some code have multiple symbols. This allows getting that
symbol rather than main one.
Parameters
----------
code : int
The code for looking up the font code point
alt : int
The number of the alternate symbol
Returns
-------
int
The appropriate code point in the font
"""
return code_point(ord(self(code)) + alt)
#
# Set up mapping objects for various groups of symbols. The integer values follow from
# the WMO.
#
with exporter:
#: Current weather
current_weather = CodePointMapping(100, 0xE9A2, [(7, 2), (93, 2), (94, 2), (95, 2),
(97, 2)], [(0, 4)])
#: Current weather from an automated station
current_weather_auto = CodePointMapping(100, 0xE94B, [(92, 2), (95, 2)],
[(6, 4), (13, 5), (19, 1), (36, 4), (49, 1),
(59, 1), (69, 1), (79, 1), (88, 1), (97, 2)])
#: Low clouds
low_clouds = CodePointMapping(10, 0xE933, [(7, 1)], [(0, 1)])
#: Mid-altitude clouds
mid_clouds = CodePointMapping(10, 0xE93D, char_jumps=[(0, 1)])
#: High clouds
high_clouds = CodePointMapping(10, 0xE946, char_jumps=[(0, 1)])
#: Sky cover symbols
sky_cover = CodePointMapping(12, 0xE90A)
#: Pressure tendency
pressure_tendency = CodePointMapping(10, 0xE900)
#####################################################################
# This dictionary is for mapping METAR present weather text codes
# to WMO codes for plotting wx symbols along with the station plots.
# Pages II-4-3 and II-4-4 of this document describes the difference between
# manned and automated stations:
# https://github.com/Unidata/MetPy/files/1151142/485_Vol_I_en.pdf
# It may become necessary to add automated station wx_codes in the future,
# but that will also require knowing the status of all stations.
wx_code_map = {'': 0, 'M': 0, 'TSNO': 0, 'VA': 4, 'FU': 4,
'HZ': 5, 'DU': 6, 'BLDU': 7, 'SA': 7,
'BLSA': 7, 'VCBLSA': 7, 'VCBLDU': 7, 'BLPY': 7,
'PO': 8, 'VCPO': 8, 'VCDS': 9, 'VCSS': 9,
'BR': 10, 'BCBR': 10, 'BC': 11, 'MIFG': 12,
'VCTS': 13, 'VIRGA': 14, 'VCSH': 16, 'TS': 17,
'THDR': 17, 'VCTSHZ': 17, 'TSFZFG': 17, 'TSBR': 17,
'TSDZ': 17, 'SQ': 18, 'FC': 19, '+FC': 19,
'DS': 31, 'SS': 31, 'DRSA': 31, 'DRDU': 31,
'TSUP': 32, '+DS': 34, '+SS': 34, '-BLSN': 36,
'BLSN': 36, '+BLSN': 36, 'VCBLSN': 36, 'DRSN': 38,
'+DRSN': 38, 'VCFG': 40, 'BCFG': 41, 'PRFG': 44,
'FG': 45, 'FZFG': 49, '-VCTSDZ': 51, '-DZ': 51,
'-DZBR': 51, 'VCTSDZ': 53, 'DZ': 53, '+VCTSDZ': 55,
'+DZ': 55, '-FZDZ': 56, '-FZDZSN': 56, 'FZDZ': 57,
'+FZDZ': 57, 'FZDZSN': 57, '-DZRA': 58, 'DZRA': 59,
'+DZRA': 59, '-VCTSRA': 61, '-RA': 61, '-RABR': 61,
'VCTSRA': 63, 'RA': 63, 'RABR': 63, 'RAFG': 63,
'+VCTSRA': 65, '+RA': 65, '-FZRA': 66, '-FZRASN': 66,
'-FZRABR': 66, '-FZRAPL': 66, '-FZRASNPL': 66, 'TSFZRAPL': 67,
'-TSFZRA': 67, 'FZRA': 67, '+FZRA': 67, 'FZRASN': 67,
'TSFZRA': 67, '-DZSN': 68, '-RASN': 68, '-SNRA': 68,
'-SNDZ': 68, 'RASN': 69, '+RASN': 69, 'SNRA': 69,
'DZSN': 69, 'SNDZ': 69, '+DZSN': 69, '+SNDZ': 69,
'-VCTSSN': 71, '-SN': 71, '-SNBR': 71, 'VCTSSN': 73,
'SN': 73, '+VCTSSN': 75, '+SN': 75, 'VCTSUP': 76,
'IN': 76, '-UP': 76, 'UP': 76, '+UP': 76,
'-SNSG': 77, 'SG': 77, '-SG': 77, 'IC': 78,
'-FZDZPL': 79, '-FZDZPLSN': 79, 'FZDZPL': 79, '-FZRAPLSN': 79,
'FZRAPL': 79, '+FZRAPL': 79, '-RAPL': 79, '-RASNPL': 79,
'-RAPLSN': 79, '+RAPL': 79, 'RAPL': 79, '-SNPL': 79,
'SNPL': 79, '-PL': 79, 'PL': 79, '-PLSN': 79,
'-PLRA': 79, 'PLRA': 79, '-PLDZ': 79, '+PL': 79,
'PLSN': 79, 'PLUP': 79, '+PLSN': 79, '-SH': 80,
'-SHRA': 80, 'SH': 81, 'SHRA': 81, '+SH': 81,
'+SHRA': 81, '-SHRASN': 83, '-SHSNRA': 83, '+SHRABR': 84,
'SHRASN': 84, '+SHRASN': 84, 'SHSNRA': 84, '+SHSNRA': 84,
'-SHSN': 85, 'SHSN': 86, '+SHSN': 86, '-GS': 87,
'-SHGS': 87, 'FZRAPLGS': 88, '-SNGS': 88, 'GSPLSN': 88,
'GSPL': 88, 'PLGSSN': 88, 'GS': 88, 'SHGS': 88,
'+GS': 88, '+SHGS': 88, '-GR': 89, '-SHGR': 89,
'-SNGR': 90, 'GR': 90, 'SHGR': 90, '+GR': 90,
'+SHGR': 90, '-TSRA': 95, 'TSRA': 95, 'TSSN': 95,
'TSPL': 95, '-TSDZ': 95, '-TSSN': 95, '-TSPL': 95,
'TSPLSN': 95, 'TSSNPL': 95, '-TSSNPL': 95, 'TSRAGS': 96,
'TSGS': 96, 'TSGR': 96, '+TSRA': 97, '+TSSN': 97,
'+TSPL': 97, '+TSPLSN': 97, 'TSSA': 98, 'TSDS': 98,
'TSDU': 98, '+TSGS': 99, '+TSGR': 99}
| bsd-3-clause |
huggingface/pytorch-transformers | examples/pytorch/benchmarking/plot_csv_file.py | 4 | 6407 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def list_field(default=None, metadata=None):
return field(default_factory=lambda: default, metadata=metadata)
@dataclass
class PlotArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
csv_file: str = field(
metadata={"help": "The csv file to plot."},
)
plot_along_batch: bool = field(
default=False,
metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."},
)
is_time: bool = field(
default=False,
metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."},
)
no_log_scale: bool = field(
default=False,
metadata={"help": "Disable logarithmic scale when plotting"},
)
is_train: bool = field(
default=False,
metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
},
)
figure_png_file: Optional[str] = field(
default=None,
metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."},
)
short_model_names: Optional[List[str]] = list_field(
default=None, metadata={"help": "List of model names that are used instead of the ones in the csv file."}
)
def can_convert_to_int(string):
try:
int(string)
return True
except ValueError:
return False
def can_convert_to_float(string):
try:
float(string)
return True
except ValueError:
return False
class Plot:
def __init__(self, args):
self.args = args
self.result_dict = defaultdict(lambda: dict(bsz=[], seq_len=[], result={}))
with open(self.args.csv_file, newline="") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
model_name = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"]))
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
if can_convert_to_int(row["result"]):
# value is not None
self.result_dict[model_name]["result"][
(int(row["batch_size"]), int(row["sequence_length"]))
] = int(row["result"])
elif can_convert_to_float(row["result"]):
# value is not None
self.result_dict[model_name]["result"][
(int(row["batch_size"]), int(row["sequence_length"]))
] = float(row["result"])
def plot(self):
fig, ax = plt.subplots()
title_str = "Time usage" if self.args.is_time else "Memory usage"
title_str = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log")
ax.set_yscale("log")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
batch_sizes = sorted(list(set(self.result_dict[model_name]["bsz"])))
sequence_lengths = sorted(list(set(self.result_dict[model_name]["seq_len"])))
results = self.result_dict[model_name]["result"]
(x_axis_array, inner_loop_array) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
label_model_name = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
y_axis_array = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results],
dtype=np.int,
)
else:
y_axis_array = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results],
dtype=np.float32,
)
(x_axis_label, inner_loop_label) = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
x_axis_array = np.asarray(x_axis_array, np.int)[: len(y_axis_array)]
plt.scatter(
x_axis_array, y_axis_array, label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}"
)
plt.plot(x_axis_array, y_axis_array, "--")
title_str += f" {label_model_name} vs."
title_str = title_str[:-4]
y_axis_label = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(title_str)
plt.xlabel(x_axis_label)
plt.ylabel(y_axis_label)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def main():
parser = HfArgumentParser(PlotArguments)
plot_args = parser.parse_args_into_dataclasses()[0]
plot = Plot(args=plot_args)
plot.plot()
if __name__ == "__main__":
main()
| apache-2.0 |
madjelan/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
hadim/spindle_tracker | spindle_tracker/tracker/cost_function/abstract_cost_functions.py | 2 | 2395 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import pandas as pd
__all__ = []
class AbstractCostFunction(object):
"""Abstract class
Parameters
----------
context : dict
parameters : dict
"""
def __init__(self, context, parameters):
self.context = context
self.parameters = parameters
def get_block(self, *args, **kwargs):
"""This method will update the values in `self.mat`. It should be overwritten for any matrix
verification returned by self.build.
"""
self.mat = self._build(*args, **kwargs)
def _build(self):
"""This method needs to be overwritten by subclasses
"""
return None
def check_columns(self, objects, cols):
"""Check pandas.DataFrame column names.
Parameters
----------
objects : list of :class:`pandas.DataFrame` or :class:`pandas.DataFrame`
cols : list column names to check
"""
if isinstance(objects, pd.DataFrame):
objects = [objects]
cols_set = set(cols)
for obj in objects:
actual_cols_set = set(obj.columns.values)
if not cols_set.issubset(actual_cols_set):
raise ValueError("The passed dataframe doesn't"
" contain the required columns."
"Missing columns: {}".format(
cols_set.difference(actual_cols_set)))
def check_context(self, key, obj_type):
"""Check wether self.context contain a key on a specific type.
Parameters
----------
key : str
Key to find in self.context.
obj_type : class name
To check context value type.
Returns
-------
The desired key's value in context.
"""
message = "Context {} does not contain required key : {}"
if key not in self.context.keys():
raise ValueError(message.format(self.context, key))
message = "Context value {} does not have valid key type : {}"
if not isinstance(self.context[key], obj_type):
raise TypeError(message.format(self.context[key], obj_type))
return self.context[key]
| bsd-3-clause |
rroart/stockstat | python/cli/day.py | 1 | 2573 | import pdutils as pdu
import myutils as my
import numpy as np
import pandas as pd
import const
class DAY:
def __init__(self, stockdata, days):
self.count = 0;
self.stockdata = stockdata
self.days = days
def title(self):
return "DAY";
def names(self):
return "day";
def dfextend(self, df, period, periodtext, sort, interpolate = True, rebase = False, deltadays = 3, reverse = False, interpolation = 'linear'):
#df2 = stockdata.stocklistperiod[period][j + days]
daylist = []
for mydf in df.itertuples():
el = next(x for x in self.stockdata.listid if x.id.iloc[0] == mydf.id)
#df3 = df2[mydf.id == df2.id]
#print("df3", len(df), df3)
el = el.sort_values(by='date', ascending = 1)
#el = listid[mydf.id]
#el = el[order(el.date),]
myc = pdu.getonedfvalue(el, period)
dateslen = len(self.stockdata.listdates)
myclen = len(myc)
mycoffset = dateslen - myclen
#mycoffset = 0
if mycoffset < 0:
print("neg", el.id, el.name)
#print("lens", len(myc), len(self.stockdata.listdates), self.stockdata.dates.startindex, mycoffset, mydf.id, mydf.name)
#mycorig = myc
myc = myc.iloc[0 : self.stockdata.dates.startindex + 1 - mycoffset]
if periodtext == "Price" or periodtext == "Index":
myc = my.fixzero2(myc)
if interpolate:
myc = my.fixna(myc, interpolation)
#print("myc", myc.values)
alen = len(myc)
#print("alen",alen)
if alen - 1 - self.days >= 0:
keys = myc.keys()
valnow = myc[keys[alen - 1]]
valwas = myc[keys[alen - 1 - self.days]]
else:
valnow = np.nan
valwas = np.nan
if mydf.id == '1151606' or mydf.id == '1155463':
print("hiddn", valnow, valwas, valnow / valwas, myc.values)
if not np.isnan(valnow) and not np.isnan(valwas):
daylist.append(valnow / valwas)
else:
daylist.append(None)
df['days'] = pd.Series(data = daylist, name = 'days', index = df.index)
if reverse:
df = df.sort_values(by='days', ascending = 0)
else:
df = df.sort_values(by='days', ascending = 1)
return df
def titles(self):
return [ "day" ]
def values(self, df, i):
#import sys
#print(df.columns, file=sys.stderr)
return [ df.days.iloc[i] ]
def formats(self):
return [ "{:.2f}" ]
| agpl-3.0 |
jacobmontiel/cim | code/cascade.py | 1 | 11551 | # -*- coding: utf-8 -*-
"""Cascade Imputation
This is the implementation of the Cascade Imputation algorithm described in:
Jacob Montie, Jesse Read, Albert Bifet and Talel Abdessalem.
“Scalable Model-based Cascaded Imputation of Missing Data”.
ECML PKDD conference. Submitted.
2017.
"""
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier,\
ExtraTreesRegressor, ExtraTreesClassifier
from sklearn import dummy
import utilities as utl
def cascade_imputation(data, classifier, num_attr, cat_attr, verbose=False, fp_log=None, target_col=""):
""" Cascade Imputation
:param data: DataFrame to be processed
:param classifier: Classifier/regressor to be used for imputation
:param num_attr: List of numerical attributes
:param cat_attr: List of categorical attributes
:param verbose: Flag to enable debug messages
:param fp_log: Pointer to log file
:param target_col: Name of the target column, "target" by default
:return: DataFrame with imputed values
"""
vprint = print if verbose else lambda *a, **k: None
# If the data set has no Index column, create it from the index
idx_col = "ID"
if target_col == "":
target_col = "target"
keep_ids = True # Flag to keep ids if they are part of the original data
if idx_col not in data.columns:
print("Adding idx_col")
data[idx_col] = data.index
keep_ids = False
if classifier is None:
classifier = "RF" # Default classifier/regressor
vprint("Using RF as default classifier/regressor")
N, D = data.shape
# Save original order of indices
original_order = data[idx_col]
# Convert 'object' columns
data = utl.convert_obj_to_num(data)
# Mark nulls positions
nulls = pd.isnull(data)
# plt.spy(nulls, aspect='auto')
# Weights [0,1]. 1: Instance has no missing values, 0: Instance has missing values for all attributes
weights = (D - (nulls == 1).astype(int).sum(axis=1)) / D
# Concatenate weights
attr_names = list(data.columns.values) + list(["weights"])
data = pd.concat([data, weights], axis=1)
data.columns = attr_names
D += 1
# Update nulls positions
nulls = pd.isnull(data)
for i in range(2):
# Run the cascade twice in case there are missing values in the first column
count_nan = nulls.sum().sum()
if count_nan > 0:
print(file=fp_log)
print("Cascade imputation run {}: {} missing values to fill".format(i+1, count_nan), file=fp_log)
# Sort columns per missing values count. Ascending order from left to right.
empty_cnt = []
for col in nulls:
count = np.count_nonzero(nulls[col])
empty_cnt.append([col, count])
sorted_attr = [row[0] for row in (sorted(empty_cnt, key=lambda tup: tup[1]))]
print(sorted_attr)
# Move ID, target and weights to the right end of the DataFrame
sorted_attr.remove(idx_col)
sorted_attr.remove(target_col)
sorted_attr.remove('weights')
sorted_attr.append(idx_col)
sorted_attr.append(target_col)
sorted_attr.append('weights')
data = data.reindex(columns=sorted_attr, fill_value=0)
# The main loop to traverse columns
for k in range(1, D - 3):
print("---------------------", file=fp_log)
attr = data.columns[k]
if pd.isnull(data[attr]).sum() > 0:
# Process this feature
print("Feature ({}): {}".format(k+1, attr), file=fp_log)
# Re-order instances, full instances in the top
row_idx = np.argsort(pd.isnull(data[attr]))
# Reset the index to facilitate tracking of train/test split, we will rely on the idx_col
# to reconstruct the original data sets
data = data.reindex(index=row_idx, fill_value=0).reset_index(drop=True)
n = N - np.count_nonzero(pd.isnull(data[attr]))
print("Instances to fill = " + str(N - n), file=fp_log)
# Impute missing data
###################################
print("Make split at column {}/{} and row {}/{}".format(k+1, D, n+1, N), file=fp_log)
K = k + 1 # number of attributes in total (including target column/class)
Xy = data.ix[:, range(K)].as_matrix() # make numpy matrix
X = Xy[0:n, 0:k] # input for training
y = Xy[0:n, k] # target for training
# Protect corner case where train set X has missing values
X_nulls = np.isnan(X)
X_nulls_cnt = X_nulls.sum().sum()
if X_nulls_cnt > 0:
print("WARNING: found ", X_nulls_cnt, " missing values in train set X, will drop instances")
print("WARNING: found ", X_nulls_cnt, " missing values in train set X, will drop instances",
file=fp_log)
nulls_row_idx = np.where(X_nulls.sum(axis=1) > 0)[0]
X = np.delete(X, nulls_row_idx, axis=0)
y = np.delete(y, nulls_row_idx, axis=0)
Xp = Xy[n:, 0:k] # input for prediction
# Protect corner case where prediction set X has missing values
X_nulls = np.isnan(Xp)
X_nulls_cnt = X_nulls.sum().sum()
if X_nulls_cnt > 0:
print("WARNING: found ", X_nulls_cnt, " missing values in train set Xp, will fill value with 0")
print("WARNING: found ", X_nulls_cnt, " missing values in train set Xp, will fill value with 0",
file=fp_log)
Xp[X_nulls] = 0
if X.shape[0] == 0:
print("Not enough samples for training, skipping feature", file=fp_log)
continue
else:
vprint("We want to build a model on training set", X.shape, "and use it on test examples", Xp.shape)
h = None
if data.columns[k] in set(num_attr):
# REGRESSION
vprint("{} data type , using a Regressor".format(data.dtypes[k]), file=fp_log)
if classifier == 'LR':
h = LinearRegression(n_jobs=-1)
elif classifier == 'DT':
h = DecisionTreeRegressor(max_depth=5)
elif classifier == 'RF':
h = RandomForestRegressor(max_depth=4,
n_estimators=100,
random_state=1,
n_jobs=-1)
elif classifier == 'ET':
h = ExtraTreesRegressor(n_estimators=100,
max_features="auto",
criterion='mse',
min_samples_split=4,
max_depth=35,
min_samples_leaf=2,
n_jobs=-1)
else:
vprint("No such specification: ", classifier)
exit(1)
elif data.columns[k] in set(cat_attr):
# CLASSIFICATION
vprint("{} data type, using a Classifier".format(data.dtypes[k]), file=fp_log)
if classifier == 'LR':
if len(np.unique(y.astype("int64"))) == 1:
vprint("Only 1 class in training set, will use majority class", file=fp_log)
h = dummy.DummyClassifier(strategy="most_frequent")
else:
h = LogisticRegression(n_jobs=-1)
elif classifier == 'DT':
h = DecisionTreeClassifier(max_depth=5)
elif classifier == 'RF':
h = RandomForestClassifier(max_depth=4,
n_estimators=100,
random_state=1,
n_jobs=-1)
elif classifier == 'ET':
h = ExtraTreesClassifier(n_estimators=100,
max_features="auto",
criterion='entropy',
min_samples_split=4,
max_depth=35,
min_samples_leaf=2,
n_jobs=-1)
else:
vprint("No such specification: ", classifier)
exit(1)
elif data.dtypes[k] == 'object':
vprint("not expecting this!", file=fp_log)
exit(1)
else:
vprint("Unexpected data type!", file=fp_log)
exit(1)
print(" Training...")
if data.columns[k] in set(num_attr):
h.fit(X, y)
elif data.columns[k] in set(cat_attr):
h.fit(X, y.astype("int64"))
print(" Predicting...")
yp = h.predict(Xp)
print(" Filling...")
# make ", yp.shape, "fit into", (N-n), "rows"
idx_range = range(n, N)
data.ix[idx_range, attr] = yp
else:
print("Nothing to do for: " + attr, file=fp_log)
# Update nulls positions
nulls = pd.isnull(data)
else:
print("Cascade imputation run {}: {} missing values to fill".format(i + 1, count_nan), file=fp_log)
# Reindexing (back to original order)
data = data.set_index(idx_col, drop=False)
data = data.reindex(original_order)
# Move ID and target columns to the front
mid = data[target_col]
data.drop(labels=[target_col], axis=1, inplace=True)
data.insert(0, target_col, mid)
mid = data[idx_col]
data.drop(labels=[idx_col], axis=1, inplace=True)
if keep_ids:
data.insert(0, idx_col, mid)
vprint("Finished")
return data
| mit |
blaze/distributed | distributed/client.py | 1 | 161602 | import asyncio
import atexit
from collections import defaultdict
from collections.abc import Iterator
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import DoneAndNotDoneFutures
from contextlib import contextmanager, suppress
from contextvars import ContextVar
import copy
import errno
from functools import partial
import html
import inspect
import itertools
import json
import logging
from numbers import Number, Integral
import os
import sys
import uuid
import threading
import socket
from queue import Queue as pyQueue
import warnings
import weakref
import dask
from dask.base import tokenize, normalize_token, collections_to_dsk
from dask.core import flatten, get_dependencies
from dask.optimization import SubgraphCallable
from dask.compatibility import apply
from dask.utils import ensure_dict, format_bytes, funcname
from tlz import first, groupby, merge, valmap, keymap, partition_all
try:
from dask.delayed import single_key
except ImportError:
single_key = first
from tornado import gen
from tornado.ioloop import IOLoop, PeriodicCallback
from .batched import BatchedSend
from .utils_comm import (
WrappedKey,
unpack_remotedata,
pack_data,
subs_multiple,
scatter_to_workers,
gather_from_workers,
retry_operation,
)
from .cfexecutor import ClientExecutor
from .core import (
connect,
rpc,
clean_exception,
CommClosedError,
PooledRPCCall,
ConnectionPool,
)
from .metrics import time
from .protocol import to_serialize
from .protocol.pickle import dumps, loads
from .publish import Datasets
from .pubsub import PubSubClientExtension
from .security import Security
from .sizeof import sizeof
from .threadpoolexecutor import rejoin
from .worker import dumps_task, get_client, get_worker, secede
from .diagnostics.plugin import WorkerPlugin
from .utils import (
All,
sync,
tokey,
log_errors,
str_graph,
key_split,
thread_state,
no_default,
LoopRunner,
parse_timedelta,
shutting_down,
Any,
has_keyword,
format_dashboard_link,
TimeoutError,
CancelledError,
)
from . import versions as version_module
logger = logging.getLogger(__name__)
_global_clients = weakref.WeakValueDictionary()
_global_client_index = [0]
_current_client = ContextVar("_current_client", default=None)
DEFAULT_EXTENSIONS = [PubSubClientExtension]
def _get_global_client():
L = sorted(list(_global_clients), reverse=True)
for k in L:
c = _global_clients[k]
if c.status != "closed":
return c
else:
del _global_clients[k]
return None
def _set_global_client(c):
if c is not None:
_global_clients[_global_client_index[0]] = c
_global_client_index[0] += 1
def _del_global_client(c):
for k in list(_global_clients):
try:
if _global_clients[k] is c:
del _global_clients[k]
except KeyError:
pass
class Future(WrappedKey):
""" A remotely running computation
A Future is a local proxy to a result running on a remote worker. A user
manages future objects in the local Python process to determine what
happens in the larger cluster.
Parameters
----------
key: str, or tuple
Key of remote data to which this future refers
client: Client
Client that should own this future. Defaults to _get_global_client()
inform: bool
Do we inform the scheduler that we need an update on this future
Examples
--------
Futures typically emerge from Client computations
>>> my_future = client.submit(add, 1, 2) # doctest: +SKIP
We can track the progress and results of a future
>>> my_future # doctest: +SKIP
<Future: status: finished, key: add-8f6e709446674bad78ea8aeecfee188e>
We can get the result or the exception and traceback from the future
>>> my_future.result() # doctest: +SKIP
See Also
--------
Client: Creates futures
"""
_cb_executor = None
_cb_executor_pid = None
def __init__(self, key, client=None, inform=True, state=None):
self.key = key
self._cleared = False
tkey = tokey(key)
self.client = client or Client.current()
self.client._inc_ref(tkey)
self._generation = self.client.generation
if tkey in self.client.futures:
self._state = self.client.futures[tkey]
else:
self._state = self.client.futures[tkey] = FutureState()
if inform:
self.client._send_to_scheduler(
{
"op": "client-desires-keys",
"keys": [tokey(key)],
"client": self.client.id,
}
)
if state is not None:
try:
handler = self.client._state_handlers[state]
except KeyError:
pass
else:
handler(key=key)
@property
def executor(self):
return self.client
@property
def status(self):
return self._state.status
def done(self):
""" Is the computation complete? """
return self._state.done()
def result(self, timeout=None):
""" Wait until computation completes, gather result to local process.
If *timeout* seconds are elapsed before returning, a
``dask.distributed.TimeoutError`` is raised.
"""
if self.client.asynchronous:
return self.client.sync(self._result, callback_timeout=timeout)
# shorten error traceback
result = self.client.sync(self._result, callback_timeout=timeout, raiseit=False)
if self.status == "error":
typ, exc, tb = result
raise exc.with_traceback(tb)
elif self.status == "cancelled":
raise result
else:
return result
async def _result(self, raiseit=True):
await self._state.wait()
if self.status == "error":
exc = clean_exception(self._state.exception, self._state.traceback)
if raiseit:
typ, exc, tb = exc
raise exc.with_traceback(tb)
else:
return exc
elif self.status == "cancelled":
exception = CancelledError(self.key)
if raiseit:
raise exception
else:
return exception
else:
result = await self.client._gather([self])
return result[0]
async def _exception(self):
await self._state.wait()
if self.status == "error":
return self._state.exception
else:
return None
def exception(self, timeout=None, **kwargs):
""" Return the exception of a failed task
If *timeout* seconds are elapsed before returning, a
``dask.distributed.TimeoutError`` is raised.
See Also
--------
Future.traceback
"""
return self.client.sync(self._exception, callback_timeout=timeout, **kwargs)
def add_done_callback(self, fn):
""" Call callback on future when callback has finished
The callback ``fn`` should take the future as its only argument. This
will be called regardless of if the future completes successfully,
errs, or is cancelled
The callback is executed in a separate thread.
"""
cls = Future
if cls._cb_executor is None or cls._cb_executor_pid != os.getpid():
try:
cls._cb_executor = ThreadPoolExecutor(
1, thread_name_prefix="Dask-Callback-Thread"
)
except TypeError:
cls._cb_executor = ThreadPoolExecutor(1)
cls._cb_executor_pid = os.getpid()
def execute_callback(fut):
try:
fn(fut)
except BaseException:
logger.exception("Error in callback %s of %s:", fn, fut)
self.client.loop.add_callback(
done_callback, self, partial(cls._cb_executor.submit, execute_callback)
)
def cancel(self, **kwargs):
""" Cancel request to run this future
See Also
--------
Client.cancel
"""
return self.client.cancel([self], **kwargs)
def retry(self, **kwargs):
""" Retry this future if it has failed
See Also
--------
Client.retry
"""
return self.client.retry([self], **kwargs)
def cancelled(self):
""" Returns True if the future has been cancelled """
return self._state.status == "cancelled"
async def _traceback(self):
await self._state.wait()
if self.status == "error":
return self._state.traceback
else:
return None
def traceback(self, timeout=None, **kwargs):
""" Return the traceback of a failed task
This returns a traceback object. You can inspect this object using the
``traceback`` module. Alternatively if you call ``future.result()``
this traceback will accompany the raised exception.
If *timeout* seconds are elapsed before returning, a
``dask.distributed.TimeoutError`` is raised.
Examples
--------
>>> import traceback # doctest: +SKIP
>>> tb = future.traceback() # doctest: +SKIP
>>> traceback.format_tb(tb) # doctest: +SKIP
[...]
See Also
--------
Future.exception
"""
return self.client.sync(self._traceback, callback_timeout=timeout, **kwargs)
@property
def type(self):
return self._state.type
def release(self, _in_destructor=False):
# NOTE: this method can be called from different threads
# (see e.g. Client.get() or Future.__del__())
if not self._cleared and self.client.generation == self._generation:
self._cleared = True
try:
self.client.loop.add_callback(self.client._dec_ref, tokey(self.key))
except TypeError:
pass # Shutting down, add_callback may be None
def __getstate__(self):
return self.key, self.client.scheduler.address
def __setstate__(self, state):
key, address = state
try:
c = Client.current(allow_global=False)
except ValueError:
c = get_client(address)
Future.__init__(self, key, c)
c._send_to_scheduler(
{
"op": "update-graph",
"tasks": {},
"keys": [tokey(self.key)],
"client": c.id,
}
)
def __del__(self):
try:
self.release()
except RuntimeError: # closed event loop
pass
def __repr__(self):
if self.type:
try:
typ = self.type.__module__.split(".")[0] + "." + self.type.__name__
except AttributeError:
typ = str(self.type)
return "<Future: %s, type: %s, key: %s>" % (self.status, typ, self.key)
else:
return "<Future: %s, key: %s>" % (self.status, self.key)
def _repr_html_(self):
text = "<b>Future: %s</b> " % html.escape(key_split(self.key))
text += (
'<font color="gray">status: </font>'
'<font color="%(color)s">%(status)s</font>, '
) % {
"status": self.status,
"color": "red" if self.status == "error" else "black",
}
if self.type:
try:
typ = self.type.__module__.split(".")[0] + "." + self.type.__name__
except AttributeError:
typ = str(self.type)
text += '<font color="gray">type: </font>%s, ' % typ
text += '<font color="gray">key: </font>%s' % html.escape(str(self.key))
return text
def __await__(self):
return self.result().__await__()
class FutureState:
"""A Future's internal state.
This is shared between all Futures with the same key and client.
"""
__slots__ = ("_event", "status", "type", "exception", "traceback")
def __init__(self):
self._event = None
self.status = "pending"
self.type = None
def _get_event(self):
# Can't create Event eagerly in constructor as it can fetch
# its IOLoop from the wrong thread
# (https://github.com/tornadoweb/tornado/issues/2189)
event = self._event
if event is None:
event = self._event = asyncio.Event()
return event
def cancel(self):
self.status = "cancelled"
self.exception = CancelledError()
self._get_event().set()
def finish(self, type=None):
self.status = "finished"
self._get_event().set()
if type is not None:
self.type = type
def lose(self):
self.status = "lost"
self._get_event().clear()
def retry(self):
self.status = "pending"
self._get_event().clear()
def set_error(self, exception, traceback):
_, exception, traceback = clean_exception(exception, traceback)
self.status = "error"
self.exception = exception
self.traceback = traceback
self._get_event().set()
def done(self):
return self._event is not None and self._event.is_set()
def reset(self):
self.status = "pending"
if self._event is not None:
self._event.clear()
async def wait(self, timeout=None):
await asyncio.wait_for(self._get_event().wait(), timeout)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.status)
async def done_callback(future, callback):
""" Coroutine that waits on future, then calls callback """
while future.status == "pending":
await future._state.wait()
callback(future)
@partial(normalize_token.register, Future)
def normalize_future(f):
return [f.key, type(f)]
class AllExit(Exception):
"""Custom exception class to exit All(...) early.
"""
class Client:
""" Connect to and submit computation to a Dask cluster
The Client connects users to a Dask cluster. It provides an asynchronous
user interface around functions and futures. This class resembles
executors in ``concurrent.futures`` but also allows ``Future`` objects
within ``submit/map`` calls. When a Client is instantiated it takes over
all ``dask.compute`` and ``dask.persist`` calls by default.
It is also common to create a Client without specifying the scheduler
address , like ``Client()``. In this case the Client creates a
:class:`LocalCluster` in the background and connects to that. Any extra
keywords are passed from Client to LocalCluster in this case. See the
LocalCluster documentation for more information.
Parameters
----------
address: string, or Cluster
This can be the address of a ``Scheduler`` server like a string
``'127.0.0.1:8786'`` or a cluster object like ``LocalCluster()``
timeout: int
Timeout duration for initial connection to the scheduler
set_as_default: bool (True)
Claim this scheduler as the global dask scheduler
scheduler_file: string (optional)
Path to a file with scheduler information if available
security: Security or bool, optional
Optional security information. If creating a local cluster can also
pass in ``True``, in which case temporary self-signed credentials will
be created automatically.
asynchronous: bool (False by default)
Set to True if using this client within async/await functions or within
Tornado gen.coroutines. Otherwise this should remain False for normal
use.
name: string (optional)
Gives the client a name that will be included in logs generated on
the scheduler for matters relating to this client
direct_to_workers: bool (optional)
Whether or not to connect directly to the workers, or to ask
the scheduler to serve as intermediary.
heartbeat_interval: int
Time in milliseconds between heartbeats to scheduler
**kwargs:
If you do not pass a scheduler address, Client will create a
``LocalCluster`` object, passing any extra keyword arguments.
Examples
--------
Provide cluster's scheduler node address on initialization:
>>> client = Client('127.0.0.1:8786') # doctest: +SKIP
Use ``submit`` method to send individual computations to the cluster
>>> a = client.submit(add, 1, 2) # doctest: +SKIP
>>> b = client.submit(add, 10, 20) # doctest: +SKIP
Continue using submit or map on results to build up larger computations
>>> c = client.submit(add, a, b) # doctest: +SKIP
Gather results with the ``gather`` method.
>>> client.gather(c) # doctest: +SKIP
33
You can also call Client with no arguments in order to create your own
local cluster.
>>> client = Client() # makes your own local "cluster" # doctest: +SKIP
Extra keywords will be passed directly to LocalCluster
>>> client = Client(processes=False, threads_per_worker=1) # doctest: +SKIP
See Also
--------
distributed.scheduler.Scheduler: Internal scheduler
distributed.LocalCluster:
"""
_instances = weakref.WeakSet()
def __init__(
self,
address=None,
loop=None,
timeout=no_default,
set_as_default=True,
scheduler_file=None,
security=None,
asynchronous=False,
name=None,
heartbeat_interval=None,
serializers=None,
deserializers=None,
extensions=DEFAULT_EXTENSIONS,
direct_to_workers=None,
connection_limit=512,
**kwargs,
):
if timeout == no_default:
timeout = dask.config.get("distributed.comm.timeouts.connect")
if timeout is not None:
timeout = parse_timedelta(timeout, "s")
self._timeout = timeout
self.futures = dict()
self.refcount = defaultdict(lambda: 0)
self.coroutines = []
if name is None:
name = dask.config.get("client-name", None)
self.id = (
type(self).__name__
+ ("-" + name + "-" if name else "-")
+ str(uuid.uuid1(clock_seq=os.getpid()))
)
self.generation = 0
self.status = "newly-created"
self._pending_msg_buffer = []
self.extensions = {}
self.scheduler_file = scheduler_file
self._startup_kwargs = kwargs
self.cluster = None
self.scheduler = None
self._scheduler_identity = {}
# A reentrant-lock on the refcounts for futures associated with this
# client. Should be held by individual operations modifying refcounts,
# or any bulk operation that needs to ensure the set of futures doesn't
# change during operation.
self._refcount_lock = threading.RLock()
self.datasets = Datasets(self)
self._serializers = serializers
if deserializers is None:
deserializers = serializers
self._deserializers = deserializers
self.direct_to_workers = direct_to_workers
# Communication
self.scheduler_comm = None
if address is None:
address = dask.config.get("scheduler-address", None)
if address:
logger.info("Config value `scheduler-address` found: %s", address)
if address is not None and kwargs:
raise ValueError(
"Unexpected keyword arguments: {}".format(str(sorted(kwargs)))
)
if isinstance(address, (rpc, PooledRPCCall)):
self.scheduler = address
elif hasattr(address, "scheduler_address"):
# It's a LocalCluster or LocalCluster-compatible object
self.cluster = address
with suppress(AttributeError):
loop = address.loop
if security is None:
security = getattr(self.cluster, "security", None)
if security is None:
security = Security()
elif security is True:
security = Security.temporary()
self._startup_kwargs["security"] = security
elif not isinstance(security, Security):
raise TypeError("security must be a Security object")
self.security = security
if name == "worker":
self.connection_args = self.security.get_connection_args("worker")
else:
self.connection_args = self.security.get_connection_args("client")
self._connecting_to_scheduler = False
self._asynchronous = asynchronous
self._should_close_loop = not loop
self._loop_runner = LoopRunner(loop=loop, asynchronous=asynchronous)
self.io_loop = self.loop = self._loop_runner.loop
self._gather_keys = None
self._gather_future = None
if heartbeat_interval is None:
heartbeat_interval = dask.config.get("distributed.client.heartbeat")
heartbeat_interval = parse_timedelta(heartbeat_interval, default="ms")
scheduler_info_interval = parse_timedelta(
dask.config.get("distributed.client.scheduler-info-interval", default="ms")
)
self._periodic_callbacks = dict()
self._periodic_callbacks["scheduler-info"] = PeriodicCallback(
self._update_scheduler_info, scheduler_info_interval * 1000,
)
self._periodic_callbacks["heartbeat"] = PeriodicCallback(
self._heartbeat, heartbeat_interval * 1000
)
self._start_arg = address
if set_as_default:
self._set_config = dask.config.set(
scheduler="dask.distributed", shuffle="tasks"
)
self._stream_handlers = {
"key-in-memory": self._handle_key_in_memory,
"lost-data": self._handle_lost_data,
"cancelled-key": self._handle_cancelled_key,
"task-retried": self._handle_retried_key,
"task-erred": self._handle_task_erred,
"restart": self._handle_restart,
"error": self._handle_error,
}
self._state_handlers = {
"memory": self._handle_key_in_memory,
"lost": self._handle_lost_data,
"erred": self._handle_task_erred,
}
self.rpc = ConnectionPool(
limit=connection_limit,
serializers=serializers,
deserializers=deserializers,
deserialize=True,
connection_args=self.connection_args,
timeout=timeout,
server=self,
)
for ext in extensions:
ext(self)
self.start(timeout=timeout)
Client._instances.add(self)
from distributed.recreate_exceptions import ReplayExceptionClient
ReplayExceptionClient(self)
@contextmanager
def as_current(self):
"""Thread-local, Task-local context manager that causes the Client.current class
method to return self. Any Future objects deserialized inside this context
manager will be automatically attached to this Client.
"""
# In Python 3.6, contextvars are thread-local but not Task-local.
# We can still detect a race condition though.
if sys.version_info < (3, 7) and _current_client.get() not in (self, None):
raise RuntimeError(
"Detected race condition where multiple asynchronous clients tried "
"entering the as_current() context manager at the same time. "
"Please upgrade to Python 3.7+."
)
tok = _current_client.set(self)
try:
yield
finally:
_current_client.reset(tok)
@classmethod
def current(cls, allow_global=True):
"""When running within the context of `as_client`, return the context-local
current client. Otherwise, return the latest initialised Client.
If no Client instances exist, raise ValueError.
If allow_global is set to False, raise ValueError if running outside of the
`as_client` context manager.
"""
out = _current_client.get()
if out:
return out
if allow_global:
return default_client()
raise ValueError("Not running inside the `as_current` context manager")
@property
def asynchronous(self):
""" Are we running in the event loop?
This is true if the user signaled that we might be when creating the
client as in the following::
client = Client(asynchronous=True)
However, we override this expectation if we can definitively tell that
we are running from a thread that is not the event loop. This is
common when calling get_client() from within a worker task. Even
though the client was originally created in asynchronous mode we may
find ourselves in contexts when it is better to operate synchronously.
"""
return self._asynchronous and self.loop is IOLoop.current()
@property
def dashboard_link(self):
scheduler, info = self._get_scheduler_info()
try:
return self.cluster.dashboard_link
except AttributeError:
protocol, rest = scheduler.address.split("://")
port = info["services"]["dashboard"]
if protocol == "inproc":
host = "localhost"
else:
host = rest.split(":")[0]
return format_dashboard_link(host, port)
def sync(self, func, *args, asynchronous=None, callback_timeout=None, **kwargs):
if (
asynchronous
or self.asynchronous
or getattr(thread_state, "asynchronous", False)
):
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
return future
else:
return sync(
self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
)
def _get_scheduler_info(self):
from .scheduler import Scheduler
if (
self.cluster
and hasattr(self.cluster, "scheduler")
and isinstance(self.cluster.scheduler, Scheduler)
):
info = self.cluster.scheduler.identity()
scheduler = self.cluster.scheduler
elif (
self._loop_runner.is_started()
and self.scheduler
and not (self.asynchronous and self.loop is IOLoop.current())
):
info = sync(self.loop, self.scheduler.identity)
scheduler = self.scheduler
else:
info = self._scheduler_identity
scheduler = self.scheduler
return scheduler, info
def __repr__(self):
# Note: avoid doing I/O here...
info = self._scheduler_identity
addr = info.get("address")
if addr:
workers = info.get("workers", {})
nworkers = len(workers)
nthreads = sum(w["nthreads"] for w in workers.values())
text = "<%s: %r processes=%d threads=%d" % (
self.__class__.__name__,
addr,
nworkers,
nthreads,
)
memory = [w["memory_limit"] for w in workers.values()]
if all(memory):
text += ", memory=" + format_bytes(sum(memory))
text += ">"
return text
elif self.scheduler is not None:
return "<%s: scheduler=%r>" % (
self.__class__.__name__,
self.scheduler.address,
)
else:
return "<%s: not connected>" % (self.__class__.__name__,)
def _repr_html_(self):
scheduler, info = self._get_scheduler_info()
text = (
'<h3 style="text-align: left;">Client</h3>\n'
'<ul style="text-align: left; list-style: none; margin: 0; padding: 0;">\n'
)
if scheduler is not None:
text += " <li><b>Scheduler: </b>%s</li>\n" % scheduler.address
else:
text += " <li><b>Scheduler: not connected</b></li>\n"
if info and "dashboard" in info["services"]:
text += (
" <li><b>Dashboard: </b><a href='%(web)s' target='_blank'>%(web)s</a></li>\n"
% {"web": self.dashboard_link}
)
text += "</ul>\n"
if info:
workers = list(info["workers"].values())
cores = sum(w["nthreads"] for w in workers)
if all(isinstance(w["memory_limit"], Number) for w in workers):
memory = sum(w["memory_limit"] for w in workers)
memory = format_bytes(memory)
else:
memory = ""
text2 = (
'<h3 style="text-align: left;">Cluster</h3>\n'
'<ul style="text-align: left; list-style:none; margin: 0; padding: 0;">\n'
" <li><b>Workers: </b>%d</li>\n"
" <li><b>Cores: </b>%d</li>\n"
" <li><b>Memory: </b>%s</li>\n"
"</ul>\n"
) % (len(workers), cores, memory)
return (
'<table style="border: 2px solid white;">\n'
"<tr>\n"
'<td style="vertical-align: top; border: 0px solid white">\n%s</td>\n'
'<td style="vertical-align: top; border: 0px solid white">\n%s</td>\n'
"</tr>\n</table>"
) % (text, text2)
else:
return text
def start(self, **kwargs):
""" Start scheduler running in separate thread """
if self.status != "newly-created":
return
self._loop_runner.start()
_set_global_client(self)
self.status = "connecting"
if self.asynchronous:
self._started = asyncio.ensure_future(self._start(**kwargs))
else:
sync(self.loop, self._start, **kwargs)
def __await__(self):
if hasattr(self, "_started"):
return self._started.__await__()
else:
async def _():
return self
return _().__await__()
def _send_to_scheduler_safe(self, msg):
if self.status in ("running", "closing"):
try:
self.scheduler_comm.send(msg)
except (CommClosedError, AttributeError):
if self.status == "running":
raise
elif self.status in ("connecting", "newly-created"):
self._pending_msg_buffer.append(msg)
def _send_to_scheduler(self, msg):
if self.status in ("running", "closing", "connecting", "newly-created"):
self.loop.add_callback(self._send_to_scheduler_safe, msg)
else:
raise Exception(
"Tried sending message after closing. Status: %s\n"
"Message: %s" % (self.status, msg)
)
async def _start(self, timeout=no_default, **kwargs):
await self.rpc.start()
if timeout == no_default:
timeout = self._timeout
if timeout is not None:
timeout = parse_timedelta(timeout, "s")
address = self._start_arg
if self.cluster is not None:
# Ensure the cluster is started (no-op if already running)
try:
await self.cluster
except Exception:
logger.info(
"Tried to start cluster and received an error. Proceeding.",
exc_info=True,
)
address = self.cluster.scheduler_address
elif self.scheduler_file is not None:
while not os.path.exists(self.scheduler_file):
await asyncio.sleep(0.01)
for i in range(10):
try:
with open(self.scheduler_file) as f:
cfg = json.load(f)
address = cfg["address"]
break
except (ValueError, KeyError): # JSON file not yet flushed
await asyncio.sleep(0.01)
elif self._start_arg is None:
from .deploy import LocalCluster
try:
self.cluster = await LocalCluster(
loop=self.loop,
asynchronous=self._asynchronous,
**self._startup_kwargs,
)
except (OSError, socket.error) as e:
if e.errno != errno.EADDRINUSE:
raise
# The default port was taken, use a random one
self.cluster = await LocalCluster(
scheduler_port=0,
loop=self.loop,
asynchronous=True,
**self._startup_kwargs,
)
# Wait for all workers to be ready
# XXX should be a LocalCluster method instead
while not self.cluster.workers or len(self.cluster.scheduler.workers) < len(
self.cluster.workers
):
await asyncio.sleep(0.01)
address = self.cluster.scheduler_address
self._gather_semaphore = asyncio.Semaphore(5)
if self.scheduler is None:
self.scheduler = self.rpc(address)
self.scheduler_comm = None
try:
await self._ensure_connected(timeout=timeout)
except OSError:
await self._close()
raise
for pc in self._periodic_callbacks.values():
pc.start()
self._handle_scheduler_coroutine = asyncio.ensure_future(self._handle_report())
self.coroutines.append(self._handle_scheduler_coroutine)
return self
async def _reconnect(self):
with log_errors():
assert self.scheduler_comm.comm.closed()
self.status = "connecting"
self.scheduler_comm = None
for st in self.futures.values():
st.cancel()
self.futures.clear()
timeout = self._timeout
deadline = self.loop.time() + timeout
while timeout > 0 and self.status == "connecting":
try:
await self._ensure_connected(timeout=timeout)
break
except EnvironmentError:
# Wait a bit before retrying
await asyncio.sleep(0.1)
timeout = deadline - self.loop.time()
else:
logger.error(
"Failed to reconnect to scheduler after %.2f "
"seconds, closing client",
self._timeout,
)
await self._close()
async def _ensure_connected(self, timeout=None):
if (
self.scheduler_comm
and not self.scheduler_comm.closed()
or self._connecting_to_scheduler
or self.scheduler is None
):
return
self._connecting_to_scheduler = True
try:
comm = await connect(
self.scheduler.address, timeout=timeout, **self.connection_args
)
comm.name = "Client->Scheduler"
if timeout is not None:
await asyncio.wait_for(self._update_scheduler_info(), timeout)
else:
await self._update_scheduler_info()
await comm.write(
{
"op": "register-client",
"client": self.id,
"reply": False,
"versions": version_module.get_versions(),
}
)
except Exception as e:
if self.status == "closed":
return
else:
raise
finally:
self._connecting_to_scheduler = False
if timeout is not None:
msg = await asyncio.wait_for(comm.read(), timeout)
else:
msg = await comm.read()
assert len(msg) == 1
assert msg[0]["op"] == "stream-start"
if msg[0].get("warning"):
warnings.warn(version_module.VersionMismatchWarning(msg[0]["warning"]))
bcomm = BatchedSend(interval="10ms", loop=self.loop)
bcomm.start(comm)
self.scheduler_comm = bcomm
_set_global_client(self)
self.status = "running"
for msg in self._pending_msg_buffer:
self._send_to_scheduler(msg)
del self._pending_msg_buffer[:]
logger.debug("Started scheduling coroutines. Synchronized")
async def _update_scheduler_info(self):
if self.status not in ("running", "connecting"):
return
try:
self._scheduler_identity = await self.scheduler.identity()
except EnvironmentError:
logger.debug("Not able to query scheduler for identity")
async def _wait_for_workers(self, n_workers=0):
info = await self.scheduler.identity()
while n_workers and len(info["workers"]) < n_workers:
await asyncio.sleep(0.1)
info = await self.scheduler.identity()
def wait_for_workers(self, n_workers=0):
"""Blocking call to wait for n workers before continuing"""
return self.sync(self._wait_for_workers, n_workers)
def _heartbeat(self):
if self.scheduler_comm:
self.scheduler_comm.send({"op": "heartbeat-client"})
def __enter__(self):
if not self._loop_runner.is_started():
self.start()
return self
async def __aenter__(self):
await self._started
return self
async def __aexit__(self, typ, value, traceback):
await self._close()
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
def _inc_ref(self, key):
with self._refcount_lock:
self.refcount[key] += 1
def _dec_ref(self, key):
with self._refcount_lock:
self.refcount[key] -= 1
if self.refcount[key] == 0:
del self.refcount[key]
self._release_key(key)
def _release_key(self, key):
""" Release key from distributed memory """
logger.debug("Release key %s", key)
st = self.futures.pop(key, None)
if st is not None:
st.cancel()
if self.status != "closed":
self._send_to_scheduler(
{"op": "client-releases-keys", "keys": [key], "client": self.id}
)
async def _handle_report(self):
""" Listen to scheduler """
with log_errors():
try:
while True:
if self.scheduler_comm is None:
break
try:
msgs = await self.scheduler_comm.comm.read()
except CommClosedError:
if self.status == "running":
logger.info("Client report stream closed to scheduler")
logger.info("Reconnecting...")
self.status = "connecting"
await self._reconnect()
continue
else:
break
if not isinstance(msgs, (list, tuple)):
msgs = (msgs,)
breakout = False
for msg in msgs:
logger.debug("Client receives message %s", msg)
if "status" in msg and "error" in msg["status"]:
typ, exc, tb = clean_exception(**msg)
raise exc.with_traceback(tb)
op = msg.pop("op")
if op == "close" or op == "stream-closed":
breakout = True
break
try:
handler = self._stream_handlers[op]
result = handler(**msg)
if inspect.isawaitable(result):
await result
except Exception as e:
logger.exception(e)
if breakout:
break
except CancelledError:
pass
def _handle_key_in_memory(self, key=None, type=None, workers=None):
state = self.futures.get(key)
if state is not None:
if type and not state.type: # Type exists and not yet set
try:
type = loads(type)
except Exception:
type = None
# Here, `type` may be a str if actual type failed
# serializing in Worker
else:
type = None
state.finish(type)
def _handle_lost_data(self, key=None):
state = self.futures.get(key)
if state is not None:
state.lose()
def _handle_cancelled_key(self, key=None):
state = self.futures.get(key)
if state is not None:
state.cancel()
def _handle_retried_key(self, key=None):
state = self.futures.get(key)
if state is not None:
state.retry()
def _handle_task_erred(self, key=None, exception=None, traceback=None):
state = self.futures.get(key)
if state is not None:
state.set_error(exception, traceback)
def _handle_restart(self):
logger.info("Receive restart signal from scheduler")
for state in self.futures.values():
state.cancel()
self.futures.clear()
with suppress(AttributeError):
self._restart_event.set()
def _handle_error(self, exception=None):
logger.warning("Scheduler exception:")
logger.exception(exception)
async def _close(self, fast=False):
""" Send close signal and wait until scheduler completes """
if self.status == "closed":
return
self.status = "closing"
for pc in self._periodic_callbacks.values():
pc.stop()
with log_errors():
_del_global_client(self)
self._scheduler_identity = {}
with suppress(AttributeError):
# clear the dask.config set keys
with self._set_config:
pass
if self.get == dask.config.get("get", None):
del dask.config.config["get"]
if (
self.scheduler_comm
and self.scheduler_comm.comm
and not self.scheduler_comm.comm.closed()
):
self._send_to_scheduler({"op": "close-client"})
self._send_to_scheduler({"op": "close-stream"})
# Give the scheduler 'stream-closed' message 100ms to come through
# This makes the shutdown slightly smoother and quieter
with suppress(AttributeError, asyncio.CancelledError, TimeoutError):
await asyncio.wait_for(
asyncio.shield(self._handle_scheduler_coroutine), 0.1
)
if (
self.scheduler_comm
and self.scheduler_comm.comm
and not self.scheduler_comm.comm.closed()
):
await self.scheduler_comm.close()
for key in list(self.futures):
self._release_key(key=key)
if self._start_arg is None:
with suppress(AttributeError):
await self.cluster.close()
await self.rpc.close()
self.status = "closed"
if _get_global_client() is self:
_set_global_client(None)
coroutines = set(self.coroutines)
for f in self.coroutines:
# cancel() works on asyncio futures (Tornado 5)
# but is a no-op on Tornado futures
with suppress(RuntimeError):
f.cancel()
if f.cancelled():
coroutines.remove(f)
del self.coroutines[:]
if not fast:
with suppress(TimeoutError, asyncio.CancelledError):
await asyncio.wait_for(asyncio.gather(*coroutines), 2)
with suppress(AttributeError):
await self.scheduler.close_rpc()
self.scheduler = None
self.status = "closed"
_shutdown = _close
def close(self, timeout=no_default):
""" Close this client
Clients will also close automatically when your Python session ends
If you started a client without arguments like ``Client()`` then this
will also close the local cluster that was started at the same time.
See Also
--------
Client.restart
"""
if timeout == no_default:
timeout = self._timeout * 2
# XXX handling of self.status here is not thread-safe
if self.status == "closed":
if self.asynchronous:
future = asyncio.Future()
future.set_result(None)
return future
return
self.status = "closing"
for pc in self._periodic_callbacks.values():
pc.stop()
if self.asynchronous:
future = self._close()
if timeout:
future = asyncio.wait_for(future, timeout)
return future
if self._start_arg is None:
with suppress(AttributeError):
f = self.cluster.close()
if asyncio.iscoroutine(f):
async def _():
await f
self.sync(_)
sync(self.loop, self._close, fast=True)
assert self.status == "closed"
if self._should_close_loop and not shutting_down():
self._loop_runner.stop()
async def _shutdown(self):
logger.info("Shutting down scheduler from Client")
if self.cluster:
await self.cluster.close()
else:
with suppress(CommClosedError):
self.status = "closing"
await self.scheduler.terminate(close_workers=True)
def shutdown(self):
""" Shut down the connected scheduler and workers
Note, this may disrupt other clients that may be using the same
scheduler and workers.
See also
--------
Client.close: close only this client
"""
return self.sync(self._shutdown)
def get_executor(self, **kwargs):
"""
Return a concurrent.futures Executor for submitting tasks on this Client
Parameters
----------
**kwargs:
Any submit()- or map()- compatible arguments, such as
`workers` or `resources`.
Returns
-------
An Executor object that's fully compatible with the concurrent.futures
API.
"""
return ClientExecutor(self, **kwargs)
def submit(
self,
func,
*args,
key=None,
workers=None,
resources=None,
retries=None,
priority=0,
fifo_timeout="100 ms",
allow_other_workers=False,
actor=False,
actors=False,
pure=None,
**kwargs,
):
""" Submit a function application to the scheduler
Parameters
----------
func: callable
*args:
**kwargs:
pure: bool (defaults to True)
Whether or not the function is pure. Set ``pure=False`` for
impure functions like ``np.random.random``.
workers: set, iterable of sets
A set of worker hostnames on which computations may be performed.
Leave empty to default to all workers (common case)
key: str
Unique identifier for the task. Defaults to function-name and hash
allow_other_workers: bool (defaults to False)
Used with `workers`. Indicates whether or not the computations
may be performed on workers that are not in the `workers` set(s).
retries: int (default to 0)
Number of allowed automatic retries if the task fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
fifo_timeout: str timedelta (default '100ms')
Allowed amount of time between calls to consider the same priority
resources: dict (defaults to {})
Defines the `resources` this job requires on the worker; e.g.
``{'GPU': 2}``. See :doc:`worker resources <resources>` for details
on defining resources.
actor: bool (default False)
Whether this task should exist on the worker as a stateful actor.
See :doc:`actors` for additional details.
actors: bool (default False)
Alias for `actor`
Examples
--------
>>> c = client.submit(add, a, b) # doctest: +SKIP
Returns
-------
Future
See Also
--------
Client.map: Submit on many arguments at once
"""
if not callable(func):
raise TypeError("First input to submit must be a callable function")
actor = actor or actors
if pure is None:
pure = not actor
if allow_other_workers not in (True, False, None):
raise TypeError("allow_other_workers= must be True or False")
if key is None:
if pure:
key = funcname(func) + "-" + tokenize(func, kwargs, *args)
else:
key = funcname(func) + "-" + str(uuid.uuid4())
skey = tokey(key)
with self._refcount_lock:
if skey in self.futures:
return Future(key, self, inform=False)
if allow_other_workers and workers is None:
raise ValueError("Only use allow_other_workers= if using workers=")
if isinstance(workers, (str, Number)):
workers = [workers]
if workers is not None:
restrictions = {skey: workers}
loose_restrictions = [skey] if allow_other_workers else []
else:
restrictions = {}
loose_restrictions = []
if kwargs:
dsk = {skey: (apply, func, list(args), kwargs)}
else:
dsk = {skey: (func,) + tuple(args)}
futures = self._graph_to_futures(
dsk,
[skey],
restrictions,
loose_restrictions,
priority={skey: 0},
user_priority=priority,
resources={skey: resources} if resources else None,
retries=retries,
fifo_timeout=fifo_timeout,
actors=actor,
)
logger.debug("Submit %s(...), %s", funcname(func), key)
return futures[skey]
def map(
self,
func,
*iterables,
key=None,
workers=None,
retries=None,
resources=None,
priority=0,
allow_other_workers=False,
fifo_timeout="100 ms",
actor=False,
actors=False,
pure=None,
batch_size=None,
**kwargs,
):
""" Map a function on a sequence of arguments
Arguments can be normal objects or Futures
Parameters
----------
func: callable
iterables: Iterables
List-like objects to map over. They should have the same length.
key: str, list
Prefix for task names if string. Explicit names if list.
pure: bool (defaults to True)
Whether or not the function is pure. Set ``pure=False`` for
impure functions like ``np.random.random``.
workers: set, iterable of sets
A set of worker hostnames on which computations may be performed.
Leave empty to default to all workers (common case)
allow_other_workers: bool (defaults to False)
Used with `workers`. Indicates whether or not the computations
may be performed on workers that are not in the `workers` set(s).
retries: int (default to 0)
Number of allowed automatic retries if a task fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
fifo_timeout: str timedelta (default '100ms')
Allowed amount of time between calls to consider the same priority
resources: dict (defaults to {})
Defines the `resources` each instance of this mapped task requires
on the worker; e.g. ``{'GPU': 2}``. See
:doc:`worker resources <resources>` for details on defining
resources.
actor: bool (default False)
Whether these tasks should exist on the worker as stateful actors.
See :doc:`actors` for additional details.
actors: bool (default False)
Alias for `actor`
batch_size : int, optional
Submit tasks to the scheduler in batches of (at most) ``batch_size``.
Larger batch sizes can be useful for very large ``iterables``,
as the cluster can start processing tasks while later ones are
submitted asynchronously.
**kwargs: dict
Extra keywords to send to the function.
Large values will be included explicitly in the task graph.
Examples
--------
>>> L = client.map(func, sequence) # doctest: +SKIP
Returns
-------
List, iterator, or Queue of futures, depending on the type of the
inputs.
See also
--------
Client.submit: Submit a single function
"""
if not callable(func):
raise TypeError("First input to map must be a callable function")
if all(isinstance(it, pyQueue) for it in iterables) or all(
isinstance(i, Iterator) for i in iterables
):
raise TypeError(
"Dask no longer supports mapping over Iterators or Queues."
"Consider using a normal for loop and Client.submit"
)
total_length = sum(len(x) for x in iterables)
if batch_size and batch_size > 1 and total_length > batch_size:
batches = list(
zip(*[partition_all(batch_size, iterable) for iterable in iterables])
)
return sum(
[
self.map(
func,
*batch,
key=key,
workers=workers,
retries=retries,
priority=priority,
allow_other_workers=allow_other_workers,
fifo_timeout=fifo_timeout,
resources=resources,
actor=actor,
actors=actors,
pure=pure,
**kwargs,
)
for batch in batches
],
[],
)
key = key or funcname(func)
actor = actor or actors
if pure is None:
pure = not actor
if allow_other_workers and workers is None:
raise ValueError("Only use allow_other_workers= if using workers=")
iterables = list(zip(*zip(*iterables)))
if isinstance(key, list):
keys = key
else:
if pure:
keys = [
key + "-" + tokenize(func, kwargs, *args)
for args in zip(*iterables)
]
else:
uid = str(uuid.uuid4())
keys = (
[
key + "-" + uid + "-" + str(i)
for i in range(min(map(len, iterables)))
]
if iterables
else []
)
if not kwargs:
dsk = {key: (func,) + args for key, args in zip(keys, zip(*iterables))}
else:
kwargs2 = {}
dsk = {}
for k, v in kwargs.items():
if sizeof(v) > 1e5:
vv = dask.delayed(v)
kwargs2[k] = vv._key
dsk.update(vv.dask)
else:
kwargs2[k] = v
dsk.update(
{
key: (apply, func, (tuple, list(args)), kwargs2)
for key, args in zip(keys, zip(*iterables))
}
)
if isinstance(workers, (str, Number)):
workers = [workers]
if isinstance(workers, (list, set)):
if workers and isinstance(first(workers), (list, set)):
if len(workers) != len(keys):
raise ValueError(
"You only provided %d worker restrictions"
" for a sequence of length %d" % (len(workers), len(keys))
)
restrictions = dict(zip(keys, workers))
else:
restrictions = {k: workers for k in keys}
elif workers is None:
restrictions = {}
else:
raise TypeError("Workers must be a list or set of workers or None")
if allow_other_workers not in (True, False, None):
raise TypeError("allow_other_workers= must be True or False")
if allow_other_workers is True:
loose_restrictions = set(keys)
else:
loose_restrictions = set()
internal_priority = dict(zip(keys, range(len(keys))))
if resources:
resources = {k: resources for k in keys}
else:
resources = None
futures = self._graph_to_futures(
dsk,
keys,
restrictions,
loose_restrictions,
priority=internal_priority,
resources=resources,
retries=retries,
user_priority=priority,
fifo_timeout=fifo_timeout,
actors=actor,
)
logger.debug("map(%s, ...)", funcname(func))
return [futures[tokey(k)] for k in keys]
async def _gather(self, futures, errors="raise", direct=None, local_worker=None):
unpacked, future_set = unpack_remotedata(futures, byte_keys=True)
keys = [tokey(future.key) for future in future_set]
bad_data = dict()
data = {}
if direct is None:
direct = self.direct_to_workers
if direct is None:
try:
w = get_worker()
except Exception:
direct = False
else:
if w.scheduler.address == self.scheduler.address:
direct = True
async def wait(k):
""" Want to stop the All(...) early if we find an error """
st = self.futures[k]
await st.wait()
if st.status != "finished" and errors == "raise":
raise AllExit()
while True:
logger.debug("Waiting on futures to clear before gather")
with suppress(AllExit):
await All(
[wait(key) for key in keys if key in self.futures],
quiet_exceptions=AllExit,
)
failed = ("error", "cancelled")
exceptions = set()
bad_keys = set()
for key in keys:
if key not in self.futures or self.futures[key].status in failed:
exceptions.add(key)
if errors == "raise":
try:
st = self.futures[key]
exception = st.exception
traceback = st.traceback
except (KeyError, AttributeError):
exc = CancelledError(key)
else:
raise exception.with_traceback(traceback)
raise exc
if errors == "skip":
bad_keys.add(key)
bad_data[key] = None
else:
raise ValueError("Bad value, `errors=%s`" % errors)
keys = [k for k in keys if k not in bad_keys and k not in data]
if local_worker: # look inside local worker
data.update(
{k: local_worker.data[k] for k in keys if k in local_worker.data}
)
keys = [k for k in keys if k not in data]
# We now do an actual remote communication with workers or scheduler
if self._gather_future: # attach onto another pending gather request
self._gather_keys |= set(keys)
response = await self._gather_future
else: # no one waiting, go ahead
self._gather_keys = set(keys)
future = asyncio.ensure_future(
self._gather_remote(direct, local_worker)
)
if self._gather_keys is None:
self._gather_future = None
else:
self._gather_future = future
response = await future
if response["status"] == "error":
log = logger.warning if errors == "raise" else logger.debug
log(
"Couldn't gather %s keys, rescheduling %s",
len(response["keys"]),
response["keys"],
)
for key in response["keys"]:
self._send_to_scheduler({"op": "report-key", "key": key})
for key in response["keys"]:
try:
self.futures[key].reset()
except KeyError: # TODO: verify that this is safe
pass
else:
break
if bad_data and errors == "skip" and isinstance(unpacked, list):
unpacked = [f for f in unpacked if f not in bad_data]
data.update(response["data"])
result = pack_data(unpacked, merge(data, bad_data))
return result
async def _gather_remote(self, direct, local_worker):
""" Perform gather with workers or scheduler
This method exists to limit and batch many concurrent gathers into a
few. In controls access using a Tornado semaphore, and picks up keys
from other requests made recently.
"""
async with self._gather_semaphore:
keys = list(self._gather_keys)
self._gather_keys = None # clear state, these keys are being sent off
self._gather_future = None
if direct or local_worker: # gather directly from workers
who_has = await retry_operation(self.scheduler.who_has, keys=keys)
data2, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, close=False
)
response = {"status": "OK", "data": data2}
if missing_keys:
keys2 = [key for key in keys if key not in data2]
response = await retry_operation(self.scheduler.gather, keys=keys2)
if response["status"] == "OK":
response["data"].update(data2)
else: # ask scheduler to gather data for us
response = await retry_operation(self.scheduler.gather, keys=keys)
return response
def gather(self, futures, errors="raise", direct=None, asynchronous=None):
""" Gather futures from distributed memory
Accepts a future, nested container of futures, iterator, or queue.
The return type will match the input type.
Parameters
----------
futures: Collection of futures
This can be a possibly nested collection of Future objects.
Collections can be lists, sets, or dictionaries
errors: string
Either 'raise' or 'skip' if we should raise if a future has erred
or skip its inclusion in the output collection
direct: boolean
Whether or not to connect directly to the workers, or to ask
the scheduler to serve as intermediary. This can also be set when
creating the Client.
Returns
-------
results: a collection of the same type as the input, but now with
gathered results rather than futures
Examples
--------
>>> from operator import add # doctest: +SKIP
>>> c = Client('127.0.0.1:8787') # doctest: +SKIP
>>> x = c.submit(add, 1, 2) # doctest: +SKIP
>>> c.gather(x) # doctest: +SKIP
3
>>> c.gather([x, [x], x]) # support lists and dicts # doctest: +SKIP
[3, [3], 3]
See Also
--------
Client.scatter: Send data out to cluster
"""
if isinstance(futures, pyQueue):
raise TypeError(
"Dask no longer supports gathering over Iterators and Queues. "
"Consider using a normal for loop and Client.submit/gather"
)
elif isinstance(futures, Iterator):
return (self.gather(f, errors=errors, direct=direct) for f in futures)
else:
if hasattr(thread_state, "execution_state"): # within worker task
local_worker = thread_state.execution_state["worker"]
else:
local_worker = None
return self.sync(
self._gather,
futures,
errors=errors,
direct=direct,
local_worker=local_worker,
asynchronous=asynchronous,
)
async def _scatter(
self,
data,
workers=None,
broadcast=False,
direct=None,
local_worker=None,
timeout=no_default,
hash=True,
):
if timeout == no_default:
timeout = self._timeout
if isinstance(workers, (str, Number)):
workers = [workers]
if isinstance(data, dict) and not all(
isinstance(k, (bytes, str)) for k in data
):
d = await self._scatter(keymap(tokey, data), workers, broadcast)
return {k: d[tokey(k)] for k in data}
if isinstance(data, type(range(0))):
data = list(data)
input_type = type(data)
names = False
unpack = False
if isinstance(data, Iterator):
data = list(data)
if isinstance(data, (set, frozenset)):
data = list(data)
if not isinstance(data, (dict, list, tuple, set, frozenset)):
unpack = True
data = [data]
if isinstance(data, (list, tuple)):
if hash:
names = [type(x).__name__ + "-" + tokenize(x) for x in data]
else:
names = [type(x).__name__ + "-" + uuid.uuid4().hex for x in data]
data = dict(zip(names, data))
assert isinstance(data, dict)
types = valmap(type, data)
if direct is None:
direct = self.direct_to_workers
if direct is None:
try:
w = get_worker()
except Exception:
direct = False
else:
if w.scheduler.address == self.scheduler.address:
direct = True
if local_worker: # running within task
local_worker.update_data(data=data, report=False)
await self.scheduler.update_data(
who_has={key: [local_worker.address] for key in data},
nbytes=valmap(sizeof, data),
client=self.id,
)
else:
data2 = valmap(to_serialize, data)
if direct:
nthreads = None
start = time()
while not nthreads:
if nthreads is not None:
await asyncio.sleep(0.1)
if time() > start + timeout:
raise TimeoutError("No valid workers found")
nthreads = await self.scheduler.ncores(workers=workers)
if not nthreads:
raise ValueError("No valid workers")
_, who_has, nbytes = await scatter_to_workers(
nthreads, data2, report=False, rpc=self.rpc
)
await self.scheduler.update_data(
who_has=who_has, nbytes=nbytes, client=self.id
)
else:
await self.scheduler.scatter(
data=data2,
workers=workers,
client=self.id,
broadcast=broadcast,
timeout=timeout,
)
out = {k: Future(k, self, inform=False) for k in data}
for key, typ in types.items():
self.futures[key].finish(type=typ)
if direct and broadcast:
n = None if broadcast is True else broadcast
await self._replicate(list(out.values()), workers=workers, n=n)
if issubclass(input_type, (list, tuple, set, frozenset)):
out = input_type(out[k] for k in names)
if unpack:
assert len(out) == 1
out = list(out.values())[0]
return out
def scatter(
self,
data,
workers=None,
broadcast=False,
direct=None,
hash=True,
timeout=no_default,
asynchronous=None,
):
""" Scatter data into distributed memory
This moves data from the local client process into the workers of the
distributed scheduler. Note that it is often better to submit jobs to
your workers to have them load the data rather than loading data
locally and then scattering it out to them.
Parameters
----------
data: list, dict, or object
Data to scatter out to workers. Output type matches input type.
workers: list of tuples (optional)
Optionally constrain locations of data.
Specify workers as hostname/port pairs, e.g. ``('127.0.0.1', 8787)``.
broadcast: bool (defaults to False)
Whether to send each data element to all workers.
By default we round-robin based on number of cores.
direct: bool (defaults to automatically check)
Whether or not to connect directly to the workers, or to ask
the scheduler to serve as intermediary. This can also be set when
creating the Client.
hash: bool (optional)
Whether or not to hash data to determine key.
If False then this uses a random key
Returns
-------
List, dict, iterator, or queue of futures matching the type of input.
Examples
--------
>>> c = Client('127.0.0.1:8787') # doctest: +SKIP
>>> c.scatter(1) # doctest: +SKIP
<Future: status: finished, key: c0a8a20f903a4915b94db8de3ea63195>
>>> c.scatter([1, 2, 3]) # doctest: +SKIP
[<Future: status: finished, key: c0a8a20f903a4915b94db8de3ea63195>,
<Future: status: finished, key: 58e78e1b34eb49a68c65b54815d1b158>,
<Future: status: finished, key: d3395e15f605bc35ab1bac6341a285e2>]
>>> c.scatter({'x': 1, 'y': 2, 'z': 3}) # doctest: +SKIP
{'x': <Future: status: finished, key: x>,
'y': <Future: status: finished, key: y>,
'z': <Future: status: finished, key: z>}
Constrain location of data to subset of workers
>>> c.scatter([1, 2, 3], workers=[('hostname', 8788)]) # doctest: +SKIP
Broadcast data to all workers
>>> [future] = c.scatter([element], broadcast=True) # doctest: +SKIP
Send scattered data to parallelized function using client futures
interface
>>> data = c.scatter(data, broadcast=True) # doctest: +SKIP
>>> res = [c.submit(func, data, i) for i in range(100)]
See Also
--------
Client.gather: Gather data back to local process
"""
if timeout == no_default:
timeout = self._timeout
if isinstance(data, pyQueue) or isinstance(data, Iterator):
raise TypeError(
"Dask no longer supports mapping over Iterators or Queues."
"Consider using a normal for loop and Client.submit"
)
if hasattr(thread_state, "execution_state"): # within worker task
local_worker = thread_state.execution_state["worker"]
else:
local_worker = None
return self.sync(
self._scatter,
data,
workers=workers,
broadcast=broadcast,
direct=direct,
local_worker=local_worker,
timeout=timeout,
asynchronous=asynchronous,
hash=hash,
)
async def _cancel(self, futures, force=False):
keys = list({tokey(f.key) for f in futures_of(futures)})
await self.scheduler.cancel(keys=keys, client=self.id, force=force)
for k in keys:
st = self.futures.pop(k, None)
if st is not None:
st.cancel()
def cancel(self, futures, asynchronous=None, force=False):
"""
Cancel running futures
This stops future tasks from being scheduled if they have not yet run
and deletes them if they have already run. After calling, this result
and all dependent results will no longer be accessible
Parameters
----------
futures: list of Futures
force: boolean (False)
Cancel this future even if other clients desire it
"""
return self.sync(self._cancel, futures, asynchronous=asynchronous, force=force)
async def _retry(self, futures):
keys = list({tokey(f.key) for f in futures_of(futures)})
response = await self.scheduler.retry(keys=keys, client=self.id)
for key in response:
st = self.futures[key]
st.retry()
def retry(self, futures, asynchronous=None):
"""
Retry failed futures
Parameters
----------
futures: list of Futures
"""
return self.sync(self._retry, futures, asynchronous=asynchronous)
async def _publish_dataset(self, *args, name=None, **kwargs):
with log_errors():
coroutines = []
def add_coro(name, data):
keys = [tokey(f.key) for f in futures_of(data)]
coroutines.append(
self.scheduler.publish_put(
keys=keys, name=name, data=to_serialize(data), client=self.id
)
)
if name:
if len(args) == 0:
raise ValueError(
"If name is provided, expecting call signature like"
" publish_dataset(df, name='ds')"
)
# in case this is a singleton, collapse it
elif len(args) == 1:
args = args[0]
add_coro(name, args)
for name, data in kwargs.items():
add_coro(name, data)
await asyncio.gather(*coroutines)
def publish_dataset(self, *args, **kwargs):
"""
Publish named datasets to scheduler
This stores a named reference to a dask collection or list of futures
on the scheduler. These references are available to other Clients
which can download the collection or futures with ``get_dataset``.
Datasets are not immediately computed. You may wish to call
``Client.persist`` prior to publishing a dataset.
Parameters
----------
args : list of objects to publish as name
name : optional name of the dataset to publish
kwargs: dict
named collections to publish on the scheduler
Examples
--------
Publishing client:
>>> df = dd.read_csv('s3://...') # doctest: +SKIP
>>> df = c.persist(df) # doctest: +SKIP
>>> c.publish_dataset(my_dataset=df) # doctest: +SKIP
Alternative invocation
>>> c.publish_dataset(df, name='my_dataset')
Receiving client:
>>> c.list_datasets() # doctest: +SKIP
['my_dataset']
>>> df2 = c.get_dataset('my_dataset') # doctest: +SKIP
Returns
-------
None
See Also
--------
Client.list_datasets
Client.get_dataset
Client.unpublish_dataset
Client.persist
"""
return self.sync(self._publish_dataset, *args, **kwargs)
def unpublish_dataset(self, name, **kwargs):
"""
Remove named datasets from scheduler
Examples
--------
>>> c.list_datasets() # doctest: +SKIP
['my_dataset']
>>> c.unpublish_datasets('my_dataset') # doctest: +SKIP
>>> c.list_datasets() # doctest: +SKIP
[]
See Also
--------
Client.publish_dataset
"""
return self.sync(self.scheduler.publish_delete, name=name, **kwargs)
def list_datasets(self, **kwargs):
"""
List named datasets available on the scheduler
See Also
--------
Client.publish_dataset
Client.get_dataset
"""
return self.sync(self.scheduler.publish_list, **kwargs)
async def _get_dataset(self, name):
with self.as_current():
out = await self.scheduler.publish_get(name=name, client=self.id)
if out is None:
raise KeyError(f"Dataset '{name}' not found")
return out["data"]
def get_dataset(self, name, **kwargs):
"""
Get named dataset from the scheduler
See Also
--------
Client.publish_dataset
Client.list_datasets
"""
return self.sync(self._get_dataset, name, **kwargs)
async def _run_on_scheduler(self, function, *args, wait=True, **kwargs):
response = await self.scheduler.run_function(
function=dumps(function), args=dumps(args), kwargs=dumps(kwargs), wait=wait
)
if response["status"] == "error":
typ, exc, tb = clean_exception(**response)
raise exc.with_traceback(tb)
else:
return response["result"]
def run_on_scheduler(self, function, *args, **kwargs):
""" Run a function on the scheduler process
This is typically used for live debugging. The function should take a
keyword argument ``dask_scheduler=``, which will be given the scheduler
object itself.
Examples
--------
>>> def get_number_of_tasks(dask_scheduler=None):
... return len(dask_scheduler.tasks)
>>> client.run_on_scheduler(get_number_of_tasks) # doctest: +SKIP
100
Run asynchronous functions in the background:
>>> async def print_state(dask_scheduler): # doctest: +SKIP
... while True:
... print(dask_scheduler.status)
... await asyncio.sleep(1)
>>> c.run(print_state, wait=False) # doctest: +SKIP
See Also
--------
Client.run: Run a function on all workers
Client.start_ipython_scheduler: Start an IPython session on scheduler
"""
return self.sync(self._run_on_scheduler, function, *args, **kwargs)
async def _run(
self, function, *args, nanny=False, workers=None, wait=True, **kwargs
):
responses = await self.scheduler.broadcast(
msg=dict(
op="run",
function=dumps(function),
args=dumps(args),
wait=wait,
kwargs=dumps(kwargs),
),
workers=workers,
nanny=nanny,
)
results = {}
for key, resp in responses.items():
if resp["status"] == "OK":
results[key] = resp["result"]
elif resp["status"] == "error":
typ, exc, tb = clean_exception(**resp)
raise exc.with_traceback(tb)
if wait:
return results
def run(self, function, *args, **kwargs):
"""
Run a function on all workers outside of task scheduling system
This calls a function on all currently known workers immediately,
blocks until those results come back, and returns the results
asynchronously as a dictionary keyed by worker address. This method
if generally used for side effects, such and collecting diagnostic
information or installing libraries.
If your function takes an input argument named ``dask_worker`` then
that variable will be populated with the worker itself.
Parameters
----------
function: callable
*args: arguments for remote function
**kwargs: keyword arguments for remote function
workers: list
Workers on which to run the function. Defaults to all known workers.
wait: boolean (optional)
If the function is asynchronous whether or not to wait until that
function finishes.
nanny : bool, defualt False
Whether to run ``function`` on the nanny. By default, the function
is run on the worker process. If specified, the addresses in
``workers`` should still be the worker addresses, not the nanny addresses.
Examples
--------
>>> c.run(os.getpid) # doctest: +SKIP
{'192.168.0.100:9000': 1234,
'192.168.0.101:9000': 4321,
'192.168.0.102:9000': 5555}
Restrict computation to particular workers with the ``workers=``
keyword argument.
>>> c.run(os.getpid, workers=['192.168.0.100:9000',
... '192.168.0.101:9000']) # doctest: +SKIP
{'192.168.0.100:9000': 1234,
'192.168.0.101:9000': 4321}
>>> def get_status(dask_worker):
... return dask_worker.status
>>> c.run(get_hostname) # doctest: +SKIP
{'192.168.0.100:9000': 'running',
'192.168.0.101:9000': 'running}
Run asynchronous functions in the background:
>>> async def print_state(dask_worker): # doctest: +SKIP
... while True:
... print(dask_worker.status)
... await asyncio.sleep(1)
>>> c.run(print_state, wait=False) # doctest: +SKIP
"""
return self.sync(self._run, function, *args, **kwargs)
def run_coroutine(self, function, *args, **kwargs):
"""
Spawn a coroutine on all workers.
This spaws a coroutine on all currently known workers and then waits
for the coroutine on each worker. The coroutines' results are returned
as a dictionary keyed by worker address.
Parameters
----------
function: a coroutine function
(typically a function wrapped in gen.coroutine or
a Python 3.5+ async function)
*args: arguments for remote function
**kwargs: keyword arguments for remote function
wait: boolean (default True)
Whether to wait for coroutines to end.
workers: list
Workers on which to run the function. Defaults to all known workers.
"""
warnings.warn(
"This method has been deprecated. "
"Instead use Client.run which detects async functions "
"automatically",
stacklevel=2,
)
return self.run(function, *args, **kwargs)
def _graph_to_futures(
self,
dsk,
keys,
restrictions=None,
loose_restrictions=None,
priority=None,
user_priority=0,
resources=None,
retries=None,
fifo_timeout=0,
actors=None,
):
with self._refcount_lock:
if resources:
resources = self._expand_resources(
resources, all_keys=itertools.chain(dsk, keys)
)
resources = {tokey(k): v for k, v in resources.items()}
if retries:
retries = self._expand_retries(
retries, all_keys=itertools.chain(dsk, keys)
)
if actors is not None and actors is not True and actors is not False:
actors = list(self._expand_key(actors))
keyset = set(keys)
values = {
k: v
for k, v in dsk.items()
if isinstance(v, Future) and k not in keyset
}
if values:
dsk = subs_multiple(dsk, values)
d = {k: unpack_remotedata(v, byte_keys=True) for k, v in dsk.items()}
extra_futures = set.union(*[v[1] for v in d.values()]) if d else set()
extra_keys = {tokey(future.key) for future in extra_futures}
dsk2 = str_graph({k: v[0] for k, v in d.items()}, extra_keys)
dsk3 = {k: v for k, v in dsk2.items() if k is not v}
for future in extra_futures:
if future.client is not self:
msg = "Inputs contain futures that were created by another client."
raise ValueError(msg)
if restrictions:
restrictions = keymap(tokey, restrictions)
restrictions = valmap(list, restrictions)
if loose_restrictions is not None:
loose_restrictions = list(map(tokey, loose_restrictions))
future_dependencies = {
tokey(k): {tokey(f.key) for f in v[1]} for k, v in d.items()
}
for s in future_dependencies.values():
for v in s:
if v not in self.futures:
raise CancelledError(v)
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
if priority is None:
priority = dask.order.order(dsk, dependencies=dependencies)
priority = keymap(tokey, priority)
dependencies = {
tokey(k): [tokey(dep) for dep in deps]
for k, deps in dependencies.items()
if deps
}
for k, deps in future_dependencies.items():
if deps:
dependencies[k] = list(set(dependencies.get(k, ())) | deps)
if isinstance(retries, Number) and retries > 0:
retries = {k: retries for k in dsk3}
futures = {key: Future(key, self, inform=False) for key in keyset}
self._send_to_scheduler(
{
"op": "update-graph",
"tasks": valmap(dumps_task, dsk3),
"dependencies": dependencies,
"keys": list(map(tokey, keys)),
"restrictions": restrictions or {},
"loose_restrictions": loose_restrictions,
"priority": priority,
"user_priority": user_priority,
"resources": resources,
"submitting_task": getattr(thread_state, "key", None),
"retries": retries,
"fifo_timeout": fifo_timeout,
"actors": actors,
}
)
return futures
def get(
self,
dsk,
keys,
restrictions=None,
loose_restrictions=None,
resources=None,
sync=True,
asynchronous=None,
direct=None,
retries=None,
priority=0,
fifo_timeout="60s",
actors=None,
**kwargs,
):
""" Compute dask graph
Parameters
----------
dsk: dict
keys: object, or nested lists of objects
restrictions: dict (optional)
A mapping of {key: {set of worker hostnames}} that restricts where
jobs can take place
retries: int (default to 0)
Number of allowed automatic retries if computing a result fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
sync: bool (optional)
Returns Futures if False or concrete values if True (default).
direct: bool
Whether or not to connect directly to the workers, or to ask
the scheduler to serve as intermediary. This can also be set when
creating the Client.
Examples
--------
>>> from operator import add # doctest: +SKIP
>>> c = Client('127.0.0.1:8787') # doctest: +SKIP
>>> c.get({'x': (add, 1, 2)}, 'x') # doctest: +SKIP
3
See Also
--------
Client.compute: Compute asynchronous collections
"""
futures = self._graph_to_futures(
dsk,
keys=set(flatten([keys])),
restrictions=restrictions,
loose_restrictions=loose_restrictions,
resources=resources,
fifo_timeout=fifo_timeout,
retries=retries,
user_priority=priority,
actors=actors,
)
packed = pack_data(keys, futures)
if sync:
if getattr(thread_state, "key", False):
try:
secede()
should_rejoin = True
except Exception:
should_rejoin = False
try:
results = self.gather(packed, asynchronous=asynchronous, direct=direct)
finally:
for f in futures.values():
f.release()
if getattr(thread_state, "key", False) and should_rejoin:
rejoin()
return results
return packed
def _optimize_insert_futures(self, dsk, keys):
""" Replace known keys in dask graph with Futures
When given a Dask graph that might have overlapping keys with our known
results we replace the values of that graph with futures. This can be
used as an optimization to avoid recomputation.
This returns the same graph if unchanged but a new graph if any changes
were necessary.
"""
with self._refcount_lock:
changed = False
for key in list(dsk):
if tokey(key) in self.futures:
if not changed:
changed = True
dsk = ensure_dict(dsk)
dsk[key] = Future(key, self, inform=False)
if changed:
dsk, _ = dask.optimization.cull(dsk, keys)
return dsk
def normalize_collection(self, collection):
"""
Replace collection's tasks by already existing futures if they exist
This normalizes the tasks within a collections task graph against the
known futures within the scheduler. It returns a copy of the
collection with a task graph that includes the overlapping futures.
Examples
--------
>>> len(x.__dask_graph__()) # x is a dask collection with 100 tasks # doctest: +SKIP
100
>>> set(client.futures).intersection(x.__dask_graph__()) # some overlap exists # doctest: +SKIP
10
>>> x = client.normalize_collection(x) # doctest: +SKIP
>>> len(x.__dask_graph__()) # smaller computational graph # doctest: +SKIP
20
See Also
--------
Client.persist: trigger computation of collection's tasks
"""
dsk_orig = collection.__dask_graph__()
dsk = self._optimize_insert_futures(dsk_orig, collection.__dask_keys__())
if dsk is dsk_orig:
return collection
else:
return redict_collection(collection, dsk)
def compute(
self,
collections,
sync=False,
optimize_graph=True,
workers=None,
allow_other_workers=False,
resources=None,
retries=0,
priority=0,
fifo_timeout="60s",
actors=None,
traverse=True,
**kwargs,
):
""" Compute dask collections on cluster
Parameters
----------
collections: iterable of dask objects or single dask object
Collections like dask.array or dataframe or dask.value objects
sync: bool (optional)
Returns Futures if False (default) or concrete values if True
optimize_graph: bool
Whether or not to optimize the underlying graphs
workers: str, list, dict
Which workers can run which parts of the computation
If a string a list then the output collections will run on the listed
workers, but other sub-computations can run anywhere
If a dict then keys should be (tuples of) collections and values
should be addresses or lists.
allow_other_workers: bool, list
If True then all restrictions in workers= are considered loose
If a list then only the keys for the listed collections are loose
retries: int (default to 0)
Number of allowed automatic retries if computing a result fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
fifo_timeout: timedelta str (defaults to '60s')
Allowed amount of time between calls to consider the same priority
traverse: bool (defaults to True)
By default dask traverses builtin python collections looking for
dask objects passed to ``compute``. For large collections this can
be expensive. If none of the arguments contain any dask objects,
set ``traverse=False`` to avoid doing this traversal.
resources: dict (defaults to {})
Defines the `resources` these tasks require on the worker. Can
specify global resources (``{'GPU': 2}``), or per-task resources
(``{'x': {'GPU': 1}, 'y': {'SSD': 4}}``), but not both.
See :doc:`worker resources <resources>` for details on defining
resources.
actors: bool or dict (default None)
Whether these tasks should exist on the worker as stateful actors.
Specified on a global (True/False) or per-task (``{'x': True,
'y': False}``) basis. See :doc:`actors` for additional details.
**kwargs:
Options to pass to the graph optimize calls
Returns
-------
List of Futures if input is a sequence, or a single future otherwise
Examples
--------
>>> from dask import delayed
>>> from operator import add
>>> x = delayed(add)(1, 2)
>>> y = delayed(add)(x, x)
>>> xx, yy = client.compute([x, y]) # doctest: +SKIP
>>> xx # doctest: +SKIP
<Future: status: finished, key: add-8f6e709446674bad78ea8aeecfee188e>
>>> xx.result() # doctest: +SKIP
3
>>> yy.result() # doctest: +SKIP
6
Also support single arguments
>>> xx = client.compute(x) # doctest: +SKIP
See Also
--------
Client.get: Normal synchronous dask.get function
"""
if isinstance(collections, (list, tuple, set, frozenset)):
singleton = False
else:
collections = [collections]
singleton = True
if traverse:
collections = tuple(
dask.delayed(a)
if isinstance(a, (list, set, tuple, dict, Iterator))
else a
for a in collections
)
variables = [a for a in collections if dask.is_dask_collection(a)]
dsk = self.collections_to_dsk(variables, optimize_graph, **kwargs)
names = ["finalize-%s" % tokenize(v) for v in variables]
dsk2 = {}
for i, (name, v) in enumerate(zip(names, variables)):
func, extra_args = v.__dask_postcompute__()
keys = v.__dask_keys__()
if func is single_key and len(keys) == 1 and not extra_args:
names[i] = keys[0]
else:
dsk2[name] = (func, keys) + extra_args
restrictions, loose_restrictions = self.get_restrictions(
collections, workers, allow_other_workers
)
if not isinstance(priority, Number):
priority = {k: p for c, p in priority.items() for k in self._expand_key(c)}
futures_dict = self._graph_to_futures(
merge(dsk2, dsk),
names,
restrictions,
loose_restrictions,
resources=resources,
retries=retries,
user_priority=priority,
fifo_timeout=fifo_timeout,
actors=actors,
)
i = 0
futures = []
for arg in collections:
if dask.is_dask_collection(arg):
futures.append(futures_dict[names[i]])
i += 1
else:
futures.append(arg)
if sync:
result = self.gather(futures)
else:
result = futures
if singleton:
return first(result)
else:
return result
def persist(
self,
collections,
optimize_graph=True,
workers=None,
allow_other_workers=None,
resources=None,
retries=None,
priority=0,
fifo_timeout="60s",
actors=None,
**kwargs,
):
""" Persist dask collections on cluster
Starts computation of the collection on the cluster in the background.
Provides a new dask collection that is semantically identical to the
previous one, but now based off of futures currently in execution.
Parameters
----------
collections: sequence or single dask object
Collections like dask.array or dataframe or dask.value objects
optimize_graph: bool
Whether or not to optimize the underlying graphs
workers: str, list, dict
Which workers can run which parts of the computation
If a string a list then the output collections will run on the listed
workers, but other sub-computations can run anywhere
If a dict then keys should be (tuples of) collections and values
should be addresses or lists.
allow_other_workers: bool, list
If True then all restrictions in workers= are considered loose
If a list then only the keys for the listed collections are loose
retries: int (default to 0)
Number of allowed automatic retries if computing a result fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
fifo_timeout: timedelta str (defaults to '60s')
Allowed amount of time between calls to consider the same priority
resources: dict (defaults to {})
Defines the `resources` these tasks require on the worker. Can
specify global resources (``{'GPU': 2}``), or per-task resources
(``{'x': {'GPU': 1}, 'y': {'SSD': 4}}``), but not both.
See :doc:`worker resources <resources>` for details on defining
resources.
actors: bool or dict (default None)
Whether these tasks should exist on the worker as stateful actors.
Specified on a global (True/False) or per-task (``{'x': True,
'y': False}``) basis. See :doc:`actors` for additional details.
**kwargs:
Options to pass to the graph optimize calls
Returns
-------
List of collections, or single collection, depending on type of input.
Examples
--------
>>> xx = client.persist(x) # doctest: +SKIP
>>> xx, yy = client.persist([x, y]) # doctest: +SKIP
See Also
--------
Client.compute
"""
if isinstance(collections, (tuple, list, set, frozenset)):
singleton = False
else:
singleton = True
collections = [collections]
assert all(map(dask.is_dask_collection, collections))
dsk = self.collections_to_dsk(collections, optimize_graph, **kwargs)
names = {k for c in collections for k in flatten(c.__dask_keys__())}
restrictions, loose_restrictions = self.get_restrictions(
collections, workers, allow_other_workers
)
if not isinstance(priority, Number):
priority = {k: p for c, p in priority.items() for k in self._expand_key(c)}
futures = self._graph_to_futures(
dsk,
names,
restrictions,
loose_restrictions,
resources=resources,
retries=retries,
user_priority=priority,
fifo_timeout=fifo_timeout,
actors=actors,
)
postpersists = [c.__dask_postpersist__() for c in collections]
result = [
func({k: futures[k] for k in flatten(c.__dask_keys__())}, *args)
for (func, args), c in zip(postpersists, collections)
]
if singleton:
return first(result)
else:
return result
async def _restart(self, timeout=no_default):
if timeout == no_default:
timeout = self._timeout * 2
self._send_to_scheduler({"op": "restart", "timeout": timeout})
self._restart_event = asyncio.Event()
try:
await asyncio.wait_for(
self._restart_event.wait(), self.loop.time() + timeout
)
except TimeoutError:
logger.error("Restart timed out after %f seconds", timeout)
pass
self.generation += 1
with self._refcount_lock:
self.refcount.clear()
return self
def restart(self, **kwargs):
""" Restart the distributed network
This kills all active work, deletes all data on the network, and
restarts the worker processes.
"""
return self.sync(self._restart, **kwargs)
async def _upload_file(self, filename, raise_on_error=True):
with open(filename, "rb") as f:
data = f.read()
_, fn = os.path.split(filename)
d = await self.scheduler.broadcast(
msg={"op": "upload_file", "filename": fn, "data": to_serialize(data)}
)
if any(v["status"] == "error" for v in d.values()):
exceptions = [v["exception"] for v in d.values() if v["status"] == "error"]
if raise_on_error:
raise exceptions[0]
else:
return exceptions[0]
assert all(len(data) == v["nbytes"] for v in d.values())
async def _upload_large_file(self, local_filename, remote_filename=None):
if remote_filename is None:
remote_filename = os.path.split(local_filename)[1]
with open(local_filename, "rb") as f:
data = f.read()
[future] = await self._scatter([data])
key = future.key
await self._replicate(future)
def dump_to_file(dask_worker=None):
if not os.path.isabs(remote_filename):
fn = os.path.join(dask_worker.local_directory, remote_filename)
else:
fn = remote_filename
with open(fn, "wb") as f:
f.write(dask_worker.data[key])
return len(dask_worker.data[key])
response = await self._run(dump_to_file)
assert all(len(data) == v for v in response.values())
def upload_file(self, filename, **kwargs):
""" Upload local package to workers
This sends a local file up to all worker nodes. This file is placed
into a temporary directory on Python's system path so any .py, .egg
or .zip files will be importable.
Parameters
----------
filename: string
Filename of .py, .egg or .zip file to send to workers
Examples
--------
>>> client.upload_file('mylibrary.egg') # doctest: +SKIP
>>> from mylibrary import myfunc # doctest: +SKIP
>>> L = c.map(myfunc, seq) # doctest: +SKIP
"""
result = self.sync(
self._upload_file, filename, raise_on_error=self.asynchronous, **kwargs
)
if isinstance(result, Exception):
raise result
else:
return result
async def _rebalance(self, futures=None, workers=None):
await _wait(futures)
keys = list({tokey(f.key) for f in self.futures_of(futures)})
result = await self.scheduler.rebalance(keys=keys, workers=workers)
if result["status"] == "missing-data":
raise ValueError(
f"During rebalance {len(result['keys'])} keys were found to be missing"
)
assert result["status"] == "OK"
def rebalance(self, futures=None, workers=None, **kwargs):
""" Rebalance data within network
Move data between workers to roughly balance memory burden. This
either affects a subset of the keys/workers or the entire network,
depending on keyword arguments.
This operation is generally not well tested against normal operation of
the scheduler. It is not recommended to use it while waiting on
computations.
Parameters
----------
futures: list, optional
A list of futures to balance, defaults all data
workers: list, optional
A list of workers on which to balance, defaults to all workers
"""
return self.sync(self._rebalance, futures, workers, **kwargs)
async def _replicate(self, futures, n=None, workers=None, branching_factor=2):
futures = self.futures_of(futures)
await _wait(futures)
keys = {tokey(f.key) for f in futures}
await self.scheduler.replicate(
keys=list(keys), n=n, workers=workers, branching_factor=branching_factor
)
def replicate(self, futures, n=None, workers=None, branching_factor=2, **kwargs):
""" Set replication of futures within network
Copy data onto many workers. This helps to broadcast frequently
accessed data and it helps to improve resilience.
This performs a tree copy of the data throughout the network
individually on each piece of data. This operation blocks until
complete. It does not guarantee replication of data to future workers.
Parameters
----------
futures: list of futures
Futures we wish to replicate
n: int, optional
Number of processes on the cluster on which to replicate the data.
Defaults to all.
workers: list of worker addresses
Workers on which we want to restrict the replication.
Defaults to all.
branching_factor: int, optional
The number of workers that can copy data in each generation
Examples
--------
>>> x = c.submit(func, *args) # doctest: +SKIP
>>> c.replicate([x]) # send to all workers # doctest: +SKIP
>>> c.replicate([x], n=3) # send to three workers # doctest: +SKIP
>>> c.replicate([x], workers=['alice', 'bob']) # send to specific # doctest: +SKIP
>>> c.replicate([x], n=1, workers=['alice', 'bob']) # send to one of specific workers # doctest: +SKIP
>>> c.replicate([x], n=1) # reduce replications # doctest: +SKIP
See also
--------
Client.rebalance
"""
return self.sync(
self._replicate,
futures,
n=n,
workers=workers,
branching_factor=branching_factor,
**kwargs,
)
def nthreads(self, workers=None, **kwargs):
""" The number of threads/cores available on each worker node
Parameters
----------
workers: list (optional)
A list of workers that we care about specifically.
Leave empty to receive information about all workers.
Examples
--------
>>> c.threads() # doctest: +SKIP
{'192.168.1.141:46784': 8,
'192.167.1.142:47548': 8,
'192.167.1.143:47329': 8,
'192.167.1.144:37297': 8}
See Also
--------
Client.who_has
Client.has_what
"""
if isinstance(workers, tuple) and all(
isinstance(i, (str, tuple)) for i in workers
):
workers = list(workers)
if workers is not None and not isinstance(workers, (tuple, list, set)):
workers = [workers]
return self.sync(self.scheduler.ncores, workers=workers, **kwargs)
ncores = nthreads
def who_has(self, futures=None, **kwargs):
""" The workers storing each future's data
Parameters
----------
futures: list (optional)
A list of futures, defaults to all data
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> wait([x, y, z]) # doctest: +SKIP
>>> c.who_has() # doctest: +SKIP
{'inc-1c8dd6be1c21646c71f76c16d09304ea': ['192.168.1.141:46784'],
'inc-1e297fc27658d7b67b3a758f16bcf47a': ['192.168.1.141:46784'],
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b': ['192.168.1.141:46784']}
>>> c.who_has([x, y]) # doctest: +SKIP
{'inc-1c8dd6be1c21646c71f76c16d09304ea': ['192.168.1.141:46784'],
'inc-1e297fc27658d7b67b3a758f16bcf47a': ['192.168.1.141:46784']}
See Also
--------
Client.has_what
Client.nthreads
"""
if futures is not None:
futures = self.futures_of(futures)
keys = list(map(tokey, {f.key for f in futures}))
else:
keys = None
return self.sync(self.scheduler.who_has, keys=keys, **kwargs)
def has_what(self, workers=None, **kwargs):
""" Which keys are held by which workers
This returns the keys of the data that are held in each worker's
memory.
Parameters
----------
workers: list (optional)
A list of worker addresses, defaults to all
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> wait([x, y, z]) # doctest: +SKIP
>>> c.has_what() # doctest: +SKIP
{'192.168.1.141:46784': ['inc-1c8dd6be1c21646c71f76c16d09304ea',
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b',
'inc-1e297fc27658d7b67b3a758f16bcf47a']}
See Also
--------
Client.who_has
Client.nthreads
Client.processing
"""
if isinstance(workers, tuple) and all(
isinstance(i, (str, tuple)) for i in workers
):
workers = list(workers)
if workers is not None and not isinstance(workers, (tuple, list, set)):
workers = [workers]
return self.sync(self.scheduler.has_what, workers=workers, **kwargs)
def processing(self, workers=None):
""" The tasks currently running on each worker
Parameters
----------
workers: list (optional)
A list of worker addresses, defaults to all
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> c.processing() # doctest: +SKIP
{'192.168.1.141:46784': ['inc-1c8dd6be1c21646c71f76c16d09304ea',
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b',
'inc-1e297fc27658d7b67b3a758f16bcf47a']}
See Also
--------
Client.who_has
Client.has_what
Client.nthreads
"""
if isinstance(workers, tuple) and all(
isinstance(i, (str, tuple)) for i in workers
):
workers = list(workers)
if workers is not None and not isinstance(workers, (tuple, list, set)):
workers = [workers]
return self.sync(self.scheduler.processing, workers=workers)
def nbytes(self, keys=None, summary=True, **kwargs):
""" The bytes taken up by each key on the cluster
This is as measured by ``sys.getsizeof`` which may not accurately
reflect the true cost.
Parameters
----------
keys: list (optional)
A list of keys, defaults to all keys
summary: boolean, (optional)
Summarize keys into key types
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> c.nbytes(summary=False) # doctest: +SKIP
{'inc-1c8dd6be1c21646c71f76c16d09304ea': 28,
'inc-1e297fc27658d7b67b3a758f16bcf47a': 28,
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b': 28}
>>> c.nbytes(summary=True) # doctest: +SKIP
{'inc': 84}
See Also
--------
Client.who_has
"""
return self.sync(self.scheduler.nbytes, keys=keys, summary=summary, **kwargs)
def call_stack(self, futures=None, keys=None):
""" The actively running call stack of all relevant keys
You can specify data of interest either by providing futures or
collections in the ``futures=`` keyword or a list of explicit keys in
the ``keys=`` keyword. If neither are provided then all call stacks
will be returned.
Parameters
----------
futures: list (optional)
List of futures, defaults to all data
keys: list (optional)
List of key names, defaults to all data
Examples
--------
>>> df = dd.read_parquet(...).persist() # doctest: +SKIP
>>> client.call_stack(df) # call on collections
>>> client.call_stack() # Or call with no arguments for all activity # doctest: +SKIP
"""
keys = keys or []
if futures is not None:
futures = self.futures_of(futures)
keys += list(map(tokey, {f.key for f in futures}))
return self.sync(self.scheduler.call_stack, keys=keys or None)
def profile(
self,
key=None,
start=None,
stop=None,
workers=None,
merge_workers=True,
plot=False,
filename=None,
server=False,
scheduler=False,
):
""" Collect statistical profiling information about recent work
Parameters
----------
key: str
Key prefix to select, this is typically a function name like 'inc'
Leave as None to collect all data
start: time
stop: time
workers: list
List of workers to restrict profile information
server : bool
If true, return the profile of the worker's administrative thread
rather than the worker threads.
This is useful when profiling Dask itself, rather than user code.
scheduler: bool
If true, return the profile information from the scheduler's
administrative thread rather than the workers.
This is useful when profiling Dask's scheduling itself.
plot: boolean or string
Whether or not to return a plot object
filename: str
Filename to save the plot
Examples
--------
>>> client.profile() # call on collections
>>> client.profile(filename='dask-profile.html') # save to html file
"""
return self.sync(
self._profile,
key=key,
workers=workers,
merge_workers=merge_workers,
start=start,
stop=stop,
plot=plot,
filename=filename,
server=server,
scheduler=scheduler,
)
async def _profile(
self,
key=None,
start=None,
stop=None,
workers=None,
merge_workers=True,
plot=False,
filename=None,
server=False,
scheduler=False,
):
if isinstance(workers, (str, Number)):
workers = [workers]
state = await self.scheduler.profile(
key=key,
workers=workers,
merge_workers=merge_workers,
start=start,
stop=stop,
server=server,
scheduler=scheduler,
)
if filename:
plot = True
if plot:
from . import profile
data = profile.plot_data(state)
figure, source = profile.plot_figure(data, sizing_mode="stretch_both")
if plot == "save" and not filename:
filename = "dask-profile.html"
if filename:
from bokeh.plotting import output_file, save
output_file(filename=filename, title="Dask Profile")
save(figure, filename=filename)
return (state, figure)
else:
return state
def scheduler_info(self, **kwargs):
""" Basic information about the workers in the cluster
Examples
--------
>>> c.scheduler_info() # doctest: +SKIP
{'id': '2de2b6da-69ee-11e6-ab6a-e82aea155996',
'services': {},
'type': 'Scheduler',
'workers': {'127.0.0.1:40575': {'active': 0,
'last-seen': 1472038237.4845693,
'name': '127.0.0.1:40575',
'services': {},
'stored': 0,
'time-delay': 0.0061032772064208984}}}
"""
if not self.asynchronous:
self.sync(self._update_scheduler_info)
return self._scheduler_identity
def write_scheduler_file(self, scheduler_file):
""" Write the scheduler information to a json file.
This facilitates easy sharing of scheduler information using a file
system. The scheduler file can be used to instantiate a second Client
using the same scheduler.
Parameters
----------
scheduler_file: str
Path to a write the scheduler file.
Examples
--------
>>> client = Client() # doctest: +SKIP
>>> client.write_scheduler_file('scheduler.json') # doctest: +SKIP
# connect to previous client's scheduler
>>> client2 = Client(scheduler_file='scheduler.json') # doctest: +SKIP
"""
if self.scheduler_file:
raise ValueError("Scheduler file already set")
else:
self.scheduler_file = scheduler_file
with open(self.scheduler_file, "w") as f:
json.dump(self.scheduler_info(), f, indent=2)
def get_metadata(self, keys, default=no_default):
""" Get arbitrary metadata from scheduler
See set_metadata for the full docstring with examples
Parameters
----------
keys: key or list
Key to access. If a list then gets within a nested collection
default: optional
If the key does not exist then return this value instead.
If not provided then this raises a KeyError if the key is not
present
See also
--------
Client.set_metadata
"""
if not isinstance(keys, (list, tuple)):
keys = (keys,)
return self.sync(self.scheduler.get_metadata, keys=keys, default=default)
def get_scheduler_logs(self, n=None):
""" Get logs from scheduler
Parameters
----------
n : int
Number of logs to retrive. Maxes out at 10000 by default,
confiruable in config.yaml::log-length
Returns
-------
Logs in reversed order (newest first)
"""
return self.sync(self.scheduler.logs, n=n)
def get_worker_logs(self, n=None, workers=None, nanny=False):
""" Get logs from workers
Parameters
----------
n : int
Number of logs to retrive. Maxes out at 10000 by default,
confiruable in config.yaml::log-length
workers : iterable
List of worker addresses to retrieve. Gets all workers by default.
nanny : bool, default False
Whether to get the logs from the workers (False) or the nannies (True). If
specified, the addresses in `workers` should still be the worker addresses,
not the nanny addresses.
Returns
-------
Dictionary mapping worker address to logs.
Logs are returned in reversed order (newest first)
"""
return self.sync(self.scheduler.worker_logs, n=n, workers=workers, nanny=nanny)
def retire_workers(self, workers=None, close_workers=True, **kwargs):
""" Retire certain workers on the scheduler
See dask.distributed.Scheduler.retire_workers for the full docstring.
Examples
--------
You can get information about active workers using the following:
>>> workers = client.scheduler_info()['workers']
From that list you may want to select some workers to close
>>> client.retire_workers(workers=['tcp://address:port', ...])
See Also
--------
dask.distributed.Scheduler.retire_workers
"""
return self.sync(
self.scheduler.retire_workers,
workers=workers,
close_workers=close_workers,
**kwargs,
)
def set_metadata(self, key, value):
""" Set arbitrary metadata in the scheduler
This allows you to store small amounts of data on the central scheduler
process for administrative purposes. Data should be msgpack
serializable (ints, strings, lists, dicts)
If the key corresponds to a task then that key will be cleaned up when
the task is forgotten by the scheduler.
If the key is a list then it will be assumed that you want to index
into a nested dictionary structure using those keys. For example if
you call the following::
>>> client.set_metadata(['a', 'b', 'c'], 123)
Then this is the same as setting
>>> scheduler.task_metadata['a']['b']['c'] = 123
The lower level dictionaries will be created on demand.
Examples
--------
>>> client.set_metadata('x', 123) # doctest: +SKIP
>>> client.get_metadata('x') # doctest: +SKIP
123
>>> client.set_metadata(['x', 'y'], 123) # doctest: +SKIP
>>> client.get_metadata('x') # doctest: +SKIP
{'y': 123}
>>> client.set_metadata(['x', 'w', 'z'], 456) # doctest: +SKIP
>>> client.get_metadata('x') # doctest: +SKIP
{'y': 123, 'w': {'z': 456}}
>>> client.get_metadata(['x', 'w']) # doctest: +SKIP
{'z': 456}
See Also
--------
get_metadata
"""
if not isinstance(key, list):
key = (key,)
return self.sync(self.scheduler.set_metadata, keys=key, value=value)
def get_versions(self, check=False, packages=[]):
""" Return version info for the scheduler, all workers and myself
Parameters
----------
check : boolean, default False
raise ValueError if all required & optional packages
do not match
packages : List[str]
Extra package names to check
Examples
--------
>>> c.get_versions() # doctest: +SKIP
>>> c.get_versions(packages=['sklearn', 'geopandas']) # doctest: +SKIP
"""
return self.sync(self._get_versions, check=check, packages=packages)
async def _get_versions(self, check=False, packages=[]):
client = version_module.get_versions(packages=packages)
try:
scheduler = await self.scheduler.versions(packages=packages)
except KeyError:
scheduler = None
except TypeError: # packages keyword not supported
scheduler = await self.scheduler.versions() # this raises
workers = await self.scheduler.broadcast(
msg={"op": "versions", "packages": packages}
)
result = {"scheduler": scheduler, "workers": workers, "client": client}
if check:
msg = version_module.error_message(scheduler, workers, client)
if msg:
raise ValueError(msg)
return result
def futures_of(self, futures):
return futures_of(futures, client=self)
def start_ipython(self, *args, **kwargs):
raise Exception("Method moved to start_ipython_workers")
async def _start_ipython_workers(self, workers):
if workers is None:
workers = await self.scheduler.ncores()
responses = await self.scheduler.broadcast(
msg=dict(op="start_ipython"), workers=workers
)
return workers, responses
def start_ipython_workers(
self, workers=None, magic_names=False, qtconsole=False, qtconsole_args=None
):
""" Start IPython kernels on workers
Parameters
----------
workers: list (optional)
A list of worker addresses, defaults to all
magic_names: str or list(str) (optional)
If defined, register IPython magics with these names for
executing code on the workers. If string has asterix then expand
asterix into 0, 1, ..., n for n workers
qtconsole: bool (optional)
If True, launch a Jupyter QtConsole connected to the worker(s).
qtconsole_args: list(str) (optional)
Additional arguments to pass to the qtconsole on startup.
Examples
--------
>>> info = c.start_ipython_workers() # doctest: +SKIP
>>> %remote info['192.168.1.101:5752'] worker.data # doctest: +SKIP
{'x': 1, 'y': 100}
>>> c.start_ipython_workers('192.168.1.101:5752', magic_names='w') # doctest: +SKIP
>>> %w worker.data # doctest: +SKIP
{'x': 1, 'y': 100}
>>> c.start_ipython_workers('192.168.1.101:5752', qtconsole=True) # doctest: +SKIP
Add asterix * in magic names to add one magic per worker
>>> c.start_ipython_workers(magic_names='w_*') # doctest: +SKIP
>>> %w_0 worker.data # doctest: +SKIP
{'x': 1, 'y': 100}
>>> %w_1 worker.data # doctest: +SKIP
{'z': 5}
Returns
-------
iter_connection_info: list
List of connection_info dicts containing info necessary
to connect Jupyter clients to the workers.
See Also
--------
Client.start_ipython_scheduler: start ipython on the scheduler
"""
if isinstance(workers, (str, Number)):
workers = [workers]
(workers, info_dict) = sync(self.loop, self._start_ipython_workers, workers)
if magic_names and isinstance(magic_names, str):
if "*" in magic_names:
magic_names = [
magic_names.replace("*", str(i)) for i in range(len(workers))
]
else:
magic_names = [magic_names]
if "IPython" in sys.modules:
from ._ipython_utils import register_remote_magic
register_remote_magic()
if magic_names:
from ._ipython_utils import register_worker_magic
for worker, magic_name in zip(workers, magic_names):
connection_info = info_dict[worker]
register_worker_magic(connection_info, magic_name)
if qtconsole:
from ._ipython_utils import connect_qtconsole
for worker, connection_info in info_dict.items():
name = "dask-" + worker.replace(":", "-").replace("/", "-")
connect_qtconsole(connection_info, name=name, extra_args=qtconsole_args)
return info_dict
def start_ipython_scheduler(
self, magic_name="scheduler_if_ipython", qtconsole=False, qtconsole_args=None
):
""" Start IPython kernel on the scheduler
Parameters
----------
magic_name: str or None (optional)
If defined, register IPython magic with this name for
executing code on the scheduler.
If not defined, register %scheduler magic if IPython is running.
qtconsole: bool (optional)
If True, launch a Jupyter QtConsole connected to the worker(s).
qtconsole_args: list(str) (optional)
Additional arguments to pass to the qtconsole on startup.
Examples
--------
>>> c.start_ipython_scheduler() # doctest: +SKIP
>>> %scheduler scheduler.processing # doctest: +SKIP
{'127.0.0.1:3595': {'inc-1', 'inc-2'},
'127.0.0.1:53589': {'inc-2', 'add-5'}}
>>> c.start_ipython_scheduler(qtconsole=True) # doctest: +SKIP
Returns
-------
connection_info: dict
connection_info dict containing info necessary
to connect Jupyter clients to the scheduler.
See Also
--------
Client.start_ipython_workers: Start IPython on the workers
"""
info = sync(self.loop, self.scheduler.start_ipython)
if magic_name == "scheduler_if_ipython":
# default to %scheduler if in IPython, no magic otherwise
in_ipython = False
if "IPython" in sys.modules:
from IPython import get_ipython
in_ipython = bool(get_ipython())
if in_ipython:
magic_name = "scheduler"
else:
magic_name = None
if magic_name:
from ._ipython_utils import register_worker_magic
register_worker_magic(info, magic_name)
if qtconsole:
from ._ipython_utils import connect_qtconsole
connect_qtconsole(info, name="dask-scheduler", extra_args=qtconsole_args)
return info
@classmethod
def _expand_key(cls, k):
"""
Expand a user-provided task key specification, e.g. in a resources
or retries dictionary.
"""
if not isinstance(k, tuple):
k = (k,)
for kk in k:
if dask.is_dask_collection(kk):
for kkk in kk.__dask_keys__():
yield tokey(kkk)
else:
yield tokey(kk)
@classmethod
def _expand_retries(cls, retries, all_keys):
"""
Expand the user-provided "retries" specification
to a {task key: Integral} dictionary.
"""
if retries and isinstance(retries, dict):
result = {
name: value
for key, value in retries.items()
for name in cls._expand_key(key)
}
elif isinstance(retries, Integral):
# Each task unit may potentially fail, allow retrying all of them
result = {name: retries for name in all_keys}
else:
raise TypeError(
"`retries` should be an integer or dict, got %r" % (type(retries))
)
return keymap(tokey, result)
def _expand_resources(cls, resources, all_keys):
"""
Expand the user-provided "resources" specification
to a {task key: {resource name: Number}} dictionary.
"""
# Resources can either be a single dict such as {'GPU': 2},
# indicating a requirement for all keys, or a nested dict
# such as {'x': {'GPU': 1}, 'y': {'SSD': 4}} indicating
# per-key requirements
if not isinstance(resources, dict):
raise TypeError("`resources` should be a dict, got %r" % (type(resources)))
per_key_reqs = {}
global_reqs = {}
all_keys = list(all_keys)
for k, v in resources.items():
if isinstance(v, dict):
# It's a per-key requirement
per_key_reqs.update((kk, v) for kk in cls._expand_key(k))
else:
# It's a global requirement
global_reqs.update((kk, {k: v}) for kk in all_keys)
if global_reqs and per_key_reqs:
raise ValueError(
"cannot have both per-key and all-key requirements "
"in resources dict %r" % (resources,)
)
return global_reqs or per_key_reqs
@classmethod
def get_restrictions(cls, collections, workers, allow_other_workers):
""" Get restrictions from inputs to compute/persist """
if isinstance(workers, (str, tuple, list)):
workers = {tuple(collections): workers}
if isinstance(workers, dict):
restrictions = {}
for colls, ws in workers.items():
if isinstance(ws, str):
ws = [ws]
if dask.is_dask_collection(colls):
keys = flatten(colls.__dask_keys__())
else:
keys = list(
{k for c in flatten(colls) for k in flatten(c.__dask_keys__())}
)
restrictions.update({k: ws for k in keys})
else:
restrictions = {}
if allow_other_workers is True:
loose_restrictions = list(restrictions)
elif allow_other_workers:
loose_restrictions = list(
{k for c in flatten(allow_other_workers) for k in c.__dask_keys__()}
)
else:
loose_restrictions = []
return restrictions, loose_restrictions
@staticmethod
def collections_to_dsk(collections, *args, **kwargs):
return collections_to_dsk(collections, *args, **kwargs)
def get_task_stream(
self,
start=None,
stop=None,
count=None,
plot=False,
filename="task-stream.html",
bokeh_resources=None,
):
""" Get task stream data from scheduler
This collects the data present in the diagnostic "Task Stream" plot on
the dashboard. It includes the start, stop, transfer, and
deserialization time of every task for a particular duration.
Note that the task stream diagnostic does not run by default. You may
wish to call this function once before you start work to ensure that
things start recording, and then again after you have completed.
Parameters
----------
start: Number or string
When you want to start recording
If a number it should be the result of calling time()
If a string then it should be a time difference before now,
like '60s' or '500 ms'
stop: Number or string
When you want to stop recording
count: int
The number of desired records, ignored if both start and stop are
specified
plot: boolean, str
If true then also return a Bokeh figure
If plot == 'save' then save the figure to a file
filename: str (optional)
The filename to save to if you set ``plot='save'``
bokeh_resources: bokeh.resources.Resources (optional)
Specifies if the resource component is INLINE or CDN
Examples
--------
>>> client.get_task_stream() # prime plugin if not already connected
>>> x.compute() # do some work
>>> client.get_task_stream()
[{'task': ...,
'type': ...,
'thread': ...,
...}]
Pass the ``plot=True`` or ``plot='save'`` keywords to get back a Bokeh
figure
>>> data, figure = client.get_task_stream(plot='save', filename='myfile.html')
Alternatively consider the context manager
>>> from dask.distributed import get_task_stream
>>> with get_task_stream() as ts:
... x.compute()
>>> ts.data
[...]
Returns
-------
L: List[Dict]
See Also
--------
get_task_stream: a context manager version of this method
"""
return self.sync(
self._get_task_stream,
start=start,
stop=stop,
count=count,
plot=plot,
filename=filename,
bokeh_resources=bokeh_resources,
)
async def _get_task_stream(
self,
start=None,
stop=None,
count=None,
plot=False,
filename="task-stream.html",
bokeh_resources=None,
):
msgs = await self.scheduler.get_task_stream(start=start, stop=stop, count=count)
if plot:
from .diagnostics.task_stream import rectangles
rects = rectangles(msgs)
from .dashboard.components.scheduler import task_stream_figure
source, figure = task_stream_figure(sizing_mode="stretch_both")
source.data.update(rects)
if plot == "save":
from bokeh.plotting import save, output_file
output_file(filename=filename, title="Dask Task Stream")
save(figure, filename=filename, resources=bokeh_resources)
return (msgs, figure)
else:
return msgs
def register_worker_callbacks(self, setup=None):
"""
Registers a setup callback function for all current and future workers.
This registers a new setup function for workers in this cluster. The
function will run immediately on all currently connected workers. It
will also be run upon connection by any workers that are added in the
future. Multiple setup functions can be registered - these will be
called in the order they were added.
If the function takes an input argument named ``dask_worker`` then
that variable will be populated with the worker itself.
Parameters
----------
setup : callable(dask_worker: Worker) -> None
Function to register and run on all workers
"""
return self.register_worker_plugin(_WorkerSetupPlugin(setup))
async def _register_worker_plugin(self, plugin=None, name=None):
responses = await self.scheduler.register_worker_plugin(
plugin=dumps(plugin), name=name
)
for response in responses.values():
if response["status"] == "error":
exc = response["exception"]
typ = type(exc)
tb = response["traceback"]
raise exc.with_traceback(tb)
return responses
def register_worker_plugin(self, plugin=None, name=None):
"""
Registers a lifecycle worker plugin for all current and future workers.
This registers a new object to handle setup, task state transitions and
teardown for workers in this cluster. The plugin will instantiate itself
on all currently connected workers. It will also be run on any worker
that connects in the future.
The plugin may include methods ``setup``, ``teardown``, and
``transition``. See the ``dask.distributed.WorkerPlugin`` class or the
examples below for the interface and docstrings. It must be
serializable with the pickle or cloudpickle modules.
If the plugin has a ``name`` attribute, or if the ``name=`` keyword is
used then that will control idempotency. A a plugin with that name has
already registered then any future plugins will not run.
For alternatives to plugins, you may also wish to look into preload
scripts.
Parameters
----------
plugin: WorkerPlugin
The plugin object to pass to the workers
name: str, optional
A name for the plugin.
Registering a plugin with the same name will have no effect.
Examples
--------
>>> class MyPlugin(WorkerPlugin):
... def __init__(self, *args, **kwargs):
... pass # the constructor is up to you
... def setup(self, worker: dask.distributed.Worker):
... pass
... def teardown(self, worker: dask.distributed.Worker):
... pass
... def transition(self, key: str, start: str, finish: str, **kwargs):
... pass
>>> plugin = MyPlugin(1, 2, 3)
>>> client.register_worker_plugin(plugin)
You can get access to the plugin with the ``get_worker`` function
>>> client.register_worker_plugin(other_plugin, name='my-plugin')
>>> def f():
... worker = get_worker()
... plugin = worker.plugins['my-plugin']
... return plugin.my_state
>>> future = client.run(f)
See Also
--------
distributed.WorkerPlugin
"""
return self.sync(self._register_worker_plugin, plugin=plugin, name=name)
class _WorkerSetupPlugin(WorkerPlugin):
""" This is used to support older setup functions as callbacks """
def __init__(self, setup):
self._setup = setup
def setup(self, worker):
if has_keyword(self._setup, "dask_worker"):
return self._setup(dask_worker=worker)
else:
return self._setup()
class Executor(Client):
""" Deprecated: see Client """
def __init__(self, *args, **kwargs):
warnings.warn("Executor has been renamed to Client")
super(Executor, self).__init__(*args, **kwargs)
def CompatibleExecutor(*args, **kwargs):
raise Exception("This has been moved to the Client.get_executor() method")
ALL_COMPLETED = "ALL_COMPLETED"
FIRST_COMPLETED = "FIRST_COMPLETED"
async def _wait(fs, timeout=None, return_when=ALL_COMPLETED):
if timeout is not None and not isinstance(timeout, Number):
raise TypeError(
"timeout= keyword received a non-numeric value.\n"
"Beware that wait expects a list of values\n"
" Bad: wait(x, y, z)\n"
" Good: wait([x, y, z])"
)
fs = futures_of(fs)
if return_when == ALL_COMPLETED:
wait_for = All
elif return_when == FIRST_COMPLETED:
wait_for = Any
else:
raise NotImplementedError(
"Only return_when='ALL_COMPLETED' and 'FIRST_COMPLETED' are supported"
)
future = wait_for({f._state.wait() for f in fs})
if timeout is not None:
future = asyncio.wait_for(future, timeout)
await future
done, not_done = (
{fu for fu in fs if fu.status != "pending"},
{fu for fu in fs if fu.status == "pending"},
)
cancelled = [f.key for f in done if f.status == "cancelled"]
if cancelled:
raise CancelledError(cancelled)
return DoneAndNotDoneFutures(done, not_done)
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
""" Wait until all/any futures are finished
Parameters
----------
fs: list of futures
timeout: number, optional
Time in seconds after which to raise a ``dask.distributed.TimeoutError``
return_when: str, optional
One of `ALL_COMPLETED` or `FIRST_COMPLETED`
Returns
-------
Named tuple of completed, not completed
"""
client = default_client()
result = client.sync(_wait, fs, timeout=timeout, return_when=return_when)
return result
async def _as_completed(fs, queue):
fs = futures_of(fs)
groups = groupby(lambda f: f.key, fs)
firsts = [v[0] for v in groups.values()]
wait_iterator = gen.WaitIterator(
*map(asyncio.ensure_future, [f._state.wait() for f in firsts])
)
while not wait_iterator.done():
await wait_iterator.next()
# TODO: handle case of restarted futures
future = firsts[wait_iterator.current_index]
for f in groups[future.key]:
queue.put_nowait(f)
async def _first_completed(futures):
""" Return a single completed future
See Also:
_as_completed
"""
q = asyncio.Queue()
await _as_completed(futures, q)
result = await q.get()
return result
class as_completed:
"""
Return futures in the order in which they complete
This returns an iterator that yields the input future objects in the order
in which they complete. Calling ``next`` on the iterator will block until
the next future completes, irrespective of order.
Additionally, you can also add more futures to this object during
computation with the ``.add`` method
Parameters
----------
futures: Collection of futures
A list of Future objects to be iterated over in the order in which they
complete
with_results: bool (False)
Whether to wait and include results of futures as well;
in this case `as_completed` yields a tuple of (future, result)
raise_errors: bool (True)
Whether we should raise when the result of a future raises an exception;
only affects behavior when `with_results=True`.
Examples
--------
>>> x, y, z = client.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> for future in as_completed([x, y, z]): # doctest: +SKIP
... print(future.result()) # doctest: +SKIP
3
2
4
Add more futures during computation
>>> x, y, z = client.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> ac = as_completed([x, y, z]) # doctest: +SKIP
>>> for future in ac: # doctest: +SKIP
... print(future.result()) # doctest: +SKIP
... if random.random() < 0.5: # doctest: +SKIP
... ac.add(c.submit(double, future)) # doctest: +SKIP
4
2
8
3
6
12
24
Optionally wait until the result has been gathered as well
>>> ac = as_completed([x, y, z], with_results=True) # doctest: +SKIP
>>> for future, result in ac: # doctest: +SKIP
... print(result) # doctest: +SKIP
2
4
3
"""
def __init__(self, futures=None, loop=None, with_results=False, raise_errors=True):
if futures is None:
futures = []
self.futures = defaultdict(lambda: 0)
self.queue = pyQueue()
self.lock = threading.Lock()
self.loop = loop or default_client().loop
self.thread_condition = threading.Condition()
self.with_results = with_results
self.raise_errors = raise_errors
if futures:
self.update(futures)
@property
def condition(self):
try:
return self._condition
except AttributeError:
self._condition = asyncio.Condition()
return self._condition
async def _track_future(self, future):
try:
await _wait(future)
except CancelledError:
pass
if self.with_results:
try:
result = await future._result(raiseit=False)
except CancelledError as exc:
result = exc
with self.lock:
if future in self.futures:
self.futures[future] -= 1
if not self.futures[future]:
del self.futures[future]
if self.with_results:
self.queue.put_nowait((future, result))
else:
self.queue.put_nowait(future)
async with self.condition:
self.condition.notify()
with self.thread_condition:
self.thread_condition.notify()
def update(self, futures):
""" Add multiple futures to the collection.
The added futures will emit from the iterator once they finish"""
with self.lock:
for f in futures:
if not isinstance(f, Future):
raise TypeError("Input must be a future, got %s" % f)
self.futures[f] += 1
self.loop.add_callback(self._track_future, f)
def add(self, future):
""" Add a future to the collection
This future will emit from the iterator once it finishes
"""
self.update((future,))
def is_empty(self):
"""Returns True if there no completed or computing futures"""
return not self.count()
def has_ready(self):
"""Returns True if there are completed futures available."""
return not self.queue.empty()
def count(self):
""" Return the number of futures yet to be returned
This includes both the number of futures still computing, as well as
those that are finished, but have not yet been returned from this
iterator.
"""
with self.lock:
return len(self.futures) + len(self.queue.queue)
def __repr__(self):
return "<as_completed: waiting={} done={}>".format(
len(self.futures), len(self.queue.queue)
)
def __iter__(self):
return self
def __aiter__(self):
return self
def _get_and_raise(self):
res = self.queue.get()
if self.with_results:
future, result = res
if self.raise_errors and future.status == "error":
typ, exc, tb = result
raise exc.with_traceback(tb)
return res
def __next__(self):
while self.queue.empty():
if self.is_empty():
raise StopIteration()
with self.thread_condition:
self.thread_condition.wait(timeout=0.100)
return self._get_and_raise()
async def __anext__(self):
if not self.futures and self.queue.empty():
raise StopAsyncIteration
while self.queue.empty():
if not self.futures:
raise StopAsyncIteration
async with self.condition:
await self.condition.wait()
return self._get_and_raise()
next = __next__
def next_batch(self, block=True):
""" Get the next batch of completed futures.
Parameters
----------
block: bool, optional
If True then wait until we have some result, otherwise return
immediately, even with an empty list. Defaults to True.
Examples
--------
>>> ac = as_completed(futures) # doctest: +SKIP
>>> client.gather(ac.next_batch()) # doctest: +SKIP
[4, 1, 3]
>>> client.gather(ac.next_batch(block=False)) # doctest: +SKIP
[]
Returns
-------
List of futures or (future, result) tuples
"""
if block:
batch = [next(self)]
else:
batch = []
while not self.queue.empty():
batch.append(self.queue.get())
return batch
def batches(self):
"""
Yield all finished futures at once rather than one-by-one
This returns an iterator of lists of futures or lists of
(future, result) tuples rather than individual futures or individual
(future, result) tuples. It will yield these as soon as possible
without waiting.
Examples
--------
>>> for batch in as_completed(futures).batches(): # doctest: +SKIP
... results = client.gather(batch)
... print(results)
[4, 2]
[1, 3, 7]
[5]
[6]
"""
while True:
try:
yield self.next_batch(block=True)
except StopIteration:
return
def clear(self):
""" Clear out all submitted futures """
with self.lock:
self.futures.clear()
while not self.queue.empty():
self.queue.get()
def AsCompleted(*args, **kwargs):
raise Exception("This has moved to as_completed")
def default_client(c=None):
""" Return a client if one has started """
c = c or _get_global_client()
if c:
return c
else:
raise ValueError(
"No clients found\n"
"Start a client and point it to the scheduler address\n"
" from distributed import Client\n"
" client = Client('ip-addr-of-scheduler:8786')\n"
)
def ensure_default_get(client):
dask.config.set(scheduler="dask.distributed")
_set_global_client(client)
def redict_collection(c, dsk):
from dask.delayed import Delayed
if isinstance(c, Delayed):
return Delayed(c.key, dsk)
else:
cc = copy.copy(c)
cc.dask = dsk
return cc
def futures_of(o, client=None):
""" Future objects in a collection
Parameters
----------
o: collection
A possibly nested collection of Dask objects
Examples
--------
>>> futures_of(my_dask_dataframe)
[<Future: finished key: ...>,
<Future: pending key: ...>]
Returns
-------
futures : List[Future]
A list of futures held by those collections
"""
stack = [o]
seen = set()
futures = list()
while stack:
x = stack.pop()
if type(x) in (tuple, set, list):
stack.extend(x)
elif type(x) is dict:
stack.extend(x.values())
elif type(x) is SubgraphCallable:
stack.extend(x.dsk.values())
elif isinstance(x, Future):
if x not in seen:
seen.add(x)
futures.append(x)
elif dask.is_dask_collection(x):
stack.extend(x.__dask_graph__().values())
if client is not None:
bad = {f for f in futures if f.cancelled()}
if bad:
raise CancelledError(bad)
return futures[::-1]
def fire_and_forget(obj):
""" Run tasks at least once, even if we release the futures
Under normal operation Dask will not run any tasks for which there is not
an active future (this avoids unnecessary work in many situations).
However sometimes you want to just fire off a task, not track its future,
and expect it to finish eventually. You can use this function on a future
or collection of futures to ask Dask to complete the task even if no active
client is tracking it.
The results will not be kept in memory after the task completes (unless
there is an active future) so this is only useful for tasks that depend on
side effects.
Parameters
----------
obj: Future, list, dict, dask collection
The futures that you want to run at least once
Examples
--------
>>> fire_and_forget(client.submit(func, *args)) # doctest: +SKIP
"""
futures = futures_of(obj)
for future in futures:
future.client._send_to_scheduler(
{
"op": "client-desires-keys",
"keys": [tokey(future.key)],
"client": "fire-and-forget",
}
)
class get_task_stream:
"""
Collect task stream within a context block
This provides diagnostic information about every task that was run during
the time when this block was active.
This must be used as a context manager.
Parameters
----------
plot: boolean, str
If true then also return a Bokeh figure
If plot == 'save' then save the figure to a file
filename: str (optional)
The filename to save to if you set ``plot='save'``
Examples
--------
>>> with get_task_stream() as ts:
... x.compute()
>>> ts.data
[...]
Get back a Bokeh figure and optionally save to a file
>>> with get_task_stream(plot='save', filename='task-stream.html') as ts:
... x.compute()
>>> ts.figure
<Bokeh Figure>
To share this file with others you may wish to upload and serve it online.
A common way to do this is to upload the file as a gist, and then serve it
on https://raw.githack.com ::
$ python -m pip install gist
$ gist task-stream.html
https://gist.github.com/8a5b3c74b10b413f612bb5e250856ceb
You can then navigate to that site, click the "Raw" button to the right of
the ``task-stream.html`` file, and then provide that URL to
https://raw.githack.com . This process should provide a sharable link that
others can use to see your task stream plot.
See Also
--------
Client.get_task_stream: Function version of this context manager
"""
def __init__(self, client=None, plot=False, filename="task-stream.html"):
self.data = []
self._plot = plot
self._filename = filename
self.figure = None
self.client = client or default_client()
self.client.get_task_stream(start=0, stop=0) # ensure plugin
def __enter__(self):
self.start = time()
return self
def __exit__(self, typ, value, traceback):
L = self.client.get_task_stream(
start=self.start, plot=self._plot, filename=self._filename
)
if self._plot:
L, self.figure = L
self.data.extend(L)
async def __aenter__(self):
return self
async def __aexit__(self, typ, value, traceback):
L = await self.client.get_task_stream(
start=self.start, plot=self._plot, filename=self._filename
)
if self._plot:
L, self.figure = L
self.data.extend(L)
class performance_report:
""" Gather performance report
This creates a static HTML file that includes many of the same plots of the
dashboard for later viewing.
The resulting file uses JavaScript, and so must be viewed with a web
browser. Locally we recommend using ``python -m http.server`` or hosting
the file live online.
Examples
--------
>>> with performance_report(filename="myfile.html"):
... x.compute()
$ python -m http.server
$ open myfile.html
"""
def __init__(self, filename="dask-report.html"):
self.filename = filename
async def __aenter__(self):
self.start = time()
await get_client().get_task_stream(start=0, stop=0) # ensure plugin
async def __aexit__(self, typ, value, traceback, code=None):
if not code:
try:
frame = inspect.currentframe().f_back
code = inspect.getsource(frame)
except Exception:
code = ""
data = await get_client().scheduler.performance_report(
start=self.start, code=code
)
with open(self.filename, "w") as f:
f.write(data)
def __enter__(self):
get_client().sync(self.__aenter__)
def __exit__(self, typ, value, traceback):
try:
frame = inspect.currentframe().f_back
code = inspect.getsource(frame)
except Exception:
code = ""
get_client().sync(self.__aexit__, type, value, traceback, code=code)
@contextmanager
def temp_default_client(c):
""" Set the default client for the duration of the context
.. note::
This function should be used exclusively for unit testing the default client
functionality. In all other cases, please use ``Client.as_current`` instead.
.. note::
Unlike ``Client.as_current``, this context manager is neither thread-local nor
task-local.
Parameters
----------
c : Client
This is what default_client() will return within the with-block.
"""
old_exec = default_client()
_set_global_client(c)
try:
yield
finally:
_set_global_client(old_exec)
def _close_global_client():
"""
Force close of global client. This cleans up when a client
wasn't close explicitly, e.g. interactive sessions.
"""
c = _get_global_client()
if c is not None:
c._should_close_loop = False
c.close(timeout=2)
atexit.register(_close_global_client)
| bsd-3-clause |
cauchycui/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
harisbal/pandas | doc/make.py | 4 | 13799 | #!/usr/bin/env python
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
Usage
-----
$ python make.py clean
$ python make.py html
$ python make.py latex
"""
import importlib
import sys
import os
import shutil
# import subprocess
import argparse
from contextlib import contextmanager
import webbrowser
import jinja2
DOC_PATH = os.path.dirname(os.path.abspath(__file__))
SOURCE_PATH = os.path.join(DOC_PATH, 'source')
BUILD_PATH = os.path.join(DOC_PATH, 'build')
BUILD_DIRS = ['doctrees', 'html', 'latex', 'plots', '_static', '_templates']
@contextmanager
def _maybe_exclude_notebooks():
"""Skip building the notebooks if pandoc is not installed.
This assumes that nbsphinx is installed.
Skip notebook conversion if:
1. nbconvert isn't installed, or
2. nbconvert is installed, but pandoc isn't
"""
# TODO move to exclude_pattern
base = os.path.dirname(__file__)
notebooks = [os.path.join(base, 'source', nb)
for nb in ['style.ipynb']]
contents = {}
def _remove_notebooks():
for nb in notebooks:
with open(nb, 'rt') as f:
contents[nb] = f.read()
os.remove(nb)
try:
import nbconvert
except ImportError:
sys.stderr.write('Warning: nbconvert not installed. '
'Skipping notebooks.\n')
_remove_notebooks()
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
sys.stderr.write('Warning: Pandoc is not installed. '
'Skipping notebooks.\n')
_remove_notebooks()
yield
for nb, content in contents.items():
with open(nb, 'wt') as f:
f.write(content)
class DocBuilder:
"""Class to wrap the different commands of this script.
All public methods of this class can be called as parameters of the
script.
"""
def __init__(self, num_jobs=1, include_api=True, single_doc=None,
verbosity=0):
self.num_jobs = num_jobs
self.include_api = include_api
self.verbosity = verbosity
self.single_doc = None
self.single_doc_type = None
if single_doc is not None:
self._process_single_doc(single_doc)
self.exclude_patterns = self._exclude_patterns
self._generate_index()
if self.single_doc_type == 'docstring':
self._run_os('sphinx-autogen', '-o',
'source/generated_single', 'source/index.rst')
@property
def _exclude_patterns(self):
"""Docs source files that will be excluded from building."""
# TODO move maybe_exclude_notebooks here
if self.single_doc is not None:
rst_files = [f for f in os.listdir(SOURCE_PATH)
if ((f.endswith('.rst') or f.endswith('.ipynb'))
and (f != 'index.rst')
and (f != '{0}.rst'.format(self.single_doc)))]
if self.single_doc_type != 'api':
rst_files += ['generated/*.rst']
elif not self.include_api:
rst_files = ['api.rst', 'generated/*.rst']
else:
rst_files = ['generated_single/*.rst']
exclude_patterns = ','.join(
'{!r}'.format(i) for i in ['**.ipynb_checkpoints'] + rst_files)
return exclude_patterns
def _process_single_doc(self, single_doc):
"""Extract self.single_doc (base name) and self.single_doc_type from
passed single_doc kwarg.
"""
self.include_api = False
if single_doc == 'api.rst' or single_doc == 'api':
self.single_doc_type = 'api'
self.single_doc = 'api'
elif os.path.exists(os.path.join(SOURCE_PATH, single_doc)):
self.single_doc_type = 'rst'
self.single_doc = os.path.splitext(os.path.basename(single_doc))[0]
elif os.path.exists(
os.path.join(SOURCE_PATH, '{}.rst'.format(single_doc))):
self.single_doc_type = 'rst'
self.single_doc = single_doc
elif single_doc is not None:
try:
obj = pandas # noqa: F821
for name in single_doc.split('.'):
obj = getattr(obj, name)
except AttributeError:
raise ValueError('Single document not understood, it should '
'be a file in doc/source/*.rst (e.g. '
'"contributing.rst" or a pandas function or '
'method (e.g. "pandas.DataFrame.head")')
else:
self.single_doc_type = 'docstring'
if single_doc.startswith('pandas.'):
self.single_doc = single_doc[len('pandas.'):]
else:
self.single_doc = single_doc
def _copy_generated_docstring(self):
"""Copy existing generated (from api.rst) docstring page because
this is more correct in certain cases (where a custom autodoc
template is used).
"""
fname = os.path.join(SOURCE_PATH, 'generated',
'pandas.{}.rst'.format(self.single_doc))
temp_dir = os.path.join(SOURCE_PATH, 'generated_single')
try:
os.makedirs(temp_dir)
except OSError:
pass
if os.path.exists(fname):
try:
# copying to make sure sphinx always thinks it is new
# and needs to be re-generated (to pick source code changes)
shutil.copy(fname, temp_dir)
except: # noqa
pass
def _generate_index(self):
"""Create index.rst file with the specified sections."""
if self.single_doc_type == 'docstring':
self._copy_generated_docstring()
with open(os.path.join(SOURCE_PATH, 'index.rst.template')) as f:
t = jinja2.Template(f.read())
with open(os.path.join(SOURCE_PATH, 'index.rst'), 'w') as f:
f.write(t.render(include_api=self.include_api,
single_doc=self.single_doc,
single_doc_type=self.single_doc_type))
@staticmethod
def _create_build_structure():
"""Create directories required to build documentation."""
for dirname in BUILD_DIRS:
try:
os.makedirs(os.path.join(BUILD_PATH, dirname))
except OSError:
pass
@staticmethod
def _run_os(*args):
"""Execute a command as a OS terminal.
Parameters
----------
*args : list of str
Command and parameters to be executed
Examples
--------
>>> DocBuilder()._run_os('python', '--version')
"""
# TODO check_call should be more safe, but it fails with
# exclude patterns, needs investigation
# subprocess.check_call(args, stderr=subprocess.STDOUT)
os.system(' '.join(args))
def _sphinx_build(self, kind):
"""Call sphinx to build documentation.
Attribute `num_jobs` from the class is used.
Parameters
----------
kind : {'html', 'latex'}
Examples
--------
>>> DocBuilder(num_jobs=4)._sphinx_build('html')
"""
if kind not in ('html', 'latex', 'spelling'):
raise ValueError('kind must be html, latex or '
'spelling, not {}'.format(kind))
self._run_os('sphinx-build',
'-j{}'.format(self.num_jobs),
'-b{}'.format(kind),
'-{}'.format(
'v' * self.verbosity) if self.verbosity else '',
'-d"{}"'.format(os.path.join(BUILD_PATH, 'doctrees')),
'-Dexclude_patterns={}'.format(self.exclude_patterns),
'"{}"'.format(SOURCE_PATH),
'"{}"'.format(os.path.join(BUILD_PATH, kind)))
def _open_browser(self):
base_url = os.path.join('file://', DOC_PATH, 'build', 'html')
if self.single_doc_type == 'docstring':
url = os.path.join(
base_url,
'generated_single', 'pandas.{}.html'.format(self.single_doc))
else:
url = os.path.join(base_url, '{}.html'.format(self.single_doc))
webbrowser.open(url, new=2)
def html(self):
"""Build HTML documentation."""
self._create_build_structure()
with _maybe_exclude_notebooks():
self._sphinx_build('html')
zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip')
if os.path.exists(zip_fname):
os.remove(zip_fname)
if self.single_doc is not None:
self._open_browser()
shutil.rmtree(os.path.join(SOURCE_PATH, 'generated_single'),
ignore_errors=True)
def latex(self, force=False):
"""Build PDF documentation."""
self._create_build_structure()
if sys.platform == 'win32':
sys.stderr.write('latex build has not been tested on windows\n')
else:
self._sphinx_build('latex')
os.chdir(os.path.join(BUILD_PATH, 'latex'))
if force:
for i in range(3):
self._run_os('pdflatex',
'-interaction=nonstopmode',
'pandas.tex')
raise SystemExit('You should check the file '
'"build/latex/pandas.pdf" for problems.')
else:
self._run_os('make')
def latex_forced(self):
"""Build PDF documentation with retries to find missing references."""
self.latex(force=True)
@staticmethod
def clean():
"""Clean documentation generated files."""
shutil.rmtree(BUILD_PATH, ignore_errors=True)
shutil.rmtree(os.path.join(SOURCE_PATH, 'generated'),
ignore_errors=True)
def zip_html(self):
"""Compress HTML documentation into a zip file."""
zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip')
if os.path.exists(zip_fname):
os.remove(zip_fname)
dirname = os.path.join(BUILD_PATH, 'html')
fnames = os.listdir(dirname)
os.chdir(dirname)
self._run_os('zip',
zip_fname,
'-r',
'-q',
*fnames)
def spellcheck(self):
"""Spell check the documentation."""
self._sphinx_build('spelling')
output_location = os.path.join('build', 'spelling', 'output.txt')
with open(output_location) as output:
lines = output.readlines()
if lines:
raise SyntaxError(
'Found misspelled words.'
' Check pandas/doc/build/spelling/output.txt'
' for more details.')
def main():
cmds = [method for method in dir(DocBuilder) if not method.startswith('_')]
argparser = argparse.ArgumentParser(
description='pandas documentation builder',
epilog='Commands: {}'.format(','.join(cmds)))
argparser.add_argument('command',
nargs='?',
default='html',
help='command to run: {}'.format(', '.join(cmds)))
argparser.add_argument('--num-jobs',
type=int,
default=1,
help='number of jobs used by sphinx-build')
argparser.add_argument('--no-api',
default=False,
help='ommit api and autosummary',
action='store_true')
argparser.add_argument('--single',
metavar='FILENAME',
type=str,
default=None,
help=('filename of section or method name to '
'compile, e.g. "indexing", "DataFrame.join"'))
argparser.add_argument('--python-path',
type=str,
default=os.path.dirname(DOC_PATH),
help='path')
argparser.add_argument('-v', action='count', dest='verbosity', default=0,
help=('increase verbosity (can be repeated), '
'passed to the sphinx build command'))
args = argparser.parse_args()
if args.command not in cmds:
raise ValueError('Unknown command {}. Available options: {}'.format(
args.command, ', '.join(cmds)))
# Below we update both os.environ and sys.path. The former is used by
# external libraries (namely Sphinx) to compile this module and resolve
# the import of `python_path` correctly. The latter is used to resolve
# the import within the module, injecting it into the global namespace
os.environ['PYTHONPATH'] = args.python_path
sys.path.append(args.python_path)
globals()['pandas'] = importlib.import_module('pandas')
# Set the matplotlib backend to the non-interactive Agg backend for all
# child processes.
os.environ['MPLBACKEND'] = 'module://matplotlib.backends.backend_agg'
builder = DocBuilder(args.num_jobs, not args.no_api, args.single,
args.verbosity)
getattr(builder, args.command)()
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
mcnowinski/various-and-sundry | lightcurve/lpn.py | 1 | 26313 | import pandas as pd
from collections import defaultdict
import math
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
import os
#SDSS MOC4 data file
path = 'ADR4.dat'
#solar colors (reverse calculated from Carvano)
#reference to g
solar_color_ug = 3.81
solar_color_rg = 2.04
solar_color_ig = 1.94
solar_color_zg = 1.90
solar_color_gg = 2.5 #to make LRgg = 1
#4.27, 2.96, 2.5, 2.4, 2.36
#2.32, 0.46, 0, -0.1, -0.14
#reference to r
solar_color_ur = solar_color_ug - solar_color_rg
solar_color_gr = solar_color_gg - solar_color_rg
solar_color_rr = 0.0
solar_color_ir = solar_color_ig - solar_color_rg
solar_color_zr = solar_color_zg - solar_color_rg
#print solar_color_ur, solar_color_gr, solar_color_rr, solar_color_ir, solar_color_zr
#os.sys.exit(1)
#sdss wavelengths (microns)
#0.354, 0.477, 0.6230, 0.7630 and 0.913 um
u_wavelength=0.3543
g_wavelength=0.4770
r_wavelength=0.6231
i_wavelength=0.7625
z_wavelength=0.9134
#carvano taxonomy limits
#TAX LRug LRgg LRrg LRig LRzg CGguL CGguU CGrgL CGrgU CGirL CGirU CGziL CGziU
# O 0.884 1.000 1.057 1.053 0.861 0.784 1.666 0.175 0.505 -0.143 0.106 -0.833 -0.467
# V 0.810 1.000 1.099 1.140 0.854 1.087 2.095 0.511 2.374 -0.077 0.445 -2.018 -0.683
# Q 0.842 1.000 1.082 1.094 0.989 0.757 2.122 0.421 0.967 -0.032 0.229 -0.719 -0.200
# S 0.839 1.000 1.099 1.148 1.096 0.868 1.960 0.379 0.910 0.148 0.601 -0.530 -0.047
# A 0.736 1.000 1.156 1.209 1.137 1.264 4.210 0.937 1.342 0.151 0.505 -0.521 -0.089
# C 0.907 1.000 1.008 1.011 1.021 0.385 1.990 -0.140 0.403 -0.203 0.202 -0.221 0.259
# X 0.942 1.000 1.029 1.063 1.073 0.178 1.081 -0.089 0.481 0.136 0.478 -0.182 0.187
# L 0.858 1.000 1.071 1.109 1.116 0.913 2.089 0.253 0.871 0.136 0.622 -0.125 0.160
# D 0.942 1.000 1.075 1.135 1.213 0.085 1.717 -0.080 0.589 0.142 0.625 0.121 0.502
LR_means = {}
LR_means['O'] = {'LRug': 0.884, 'LRgg': 1.000, 'LRrg': 1.057, 'LRig': 1.053, 'LRzg': 0.861}
LR_means['V'] = {'LRug': 0.810, 'LRgg': 1.000, 'LRrg': 1.099, 'LRig': 1.140, 'LRzg': 0.854}
LR_means['Q'] = {'LRug': 0.842, 'LRgg': 1.000, 'LRrg': 1.082, 'LRig': 1.094, 'LRzg': 0.989}
LR_means['S'] = {'LRug': 0.839, 'LRgg': 1.000, 'LRrg': 1.099, 'LRig': 1.148, 'LRzg': 1.096}
LR_means['A'] = {'LRug': 0.736, 'LRgg': 1.000, 'LRrg': 1.156, 'LRig': 1.209, 'LRzg': 1.137}
LR_means['C'] = {'LRug': 0.907, 'LRgg': 1.000, 'LRrg': 1.008, 'LRig': 1.011, 'LRzg': 1.021}
LR_means['X'] = {'LRug': 0.942, 'LRgg': 1.000, 'LRrg': 1.029, 'LRig': 1.063, 'LRzg': 1.073}
LR_means['L'] = {'LRug': 0.858, 'LRgg': 1.000, 'LRrg': 1.071, 'LRig': 1.109, 'LRzg': 1.116}
LR_means['D'] = {'LRug': 0.942, 'LRgg': 1.000, 'LRrg': 1.075, 'LRig': 1.135, 'LRzg': 1.213}
#K type calc from Wabash 2453
LR_means['K'] = {'LRug': 0.871, 'LRgg': 1.000, 'LRrg': 1.053, 'LRig': 1.088, 'LRzg': 1.077}
#calc slope and bd (Carvano 2015) for the mean taxonomic shapes (Carvano 2011)
#LR_means['O'] = {'LRug': 0.884, 'LRgg': 1.000, 'LRrg': 1.057, 'LRig': 1.053, 'LRzg': 0.861}
log_mean=open('moc4.mean.txt', 'w')
log_mean.write('%s,%f,%f,%f,%f,%f\n'%('space', u_wavelength, g_wavelength, r_wavelength, i_wavelength, z_wavelength))
log_mean.write('%s,%s,%s,%s,%s,%s,%s,%s\n'%('class', 'Rur', 'Rgr', 'Rrr', 'Rir', 'Rzr', 'slope', 'bd'))
for key in LR_means:
LRug = LR_means[key]['LRug']
LRgg = LR_means[key]['LRgg']
LRrg = LR_means[key]['LRrg']
LRig = LR_means[key]['LRig']
LRzg = LR_means[key]['LRzg']
#
Cug = -2.5*LRug
Cgg = -2.5*LRgg
Crg = -2.5*LRrg
Cig = -2.5*LRig
Czg = -2.5*LRzg
#
Cur = Cug - Crg
Cgr = Cgg - Crg
Crr = 0.0
Cir = Cig - Crg
Czr = Czg - Crg
#
LRur = -Cur/2.5
LRgr = -Cgr/2.5
LRrr = -Crr/2.5
LRir = -Cir/2.5
LRzr = -Czr/2.5
#
Rur = pow(10,LRur)
Rgr = pow(10,LRgr)
Rrr = pow(10,LRrr)
Rir = pow(10,LRir)
Rzr = pow(10,LRzr)
#Carvano 2015 parameters
slope = (Rir-Rgr)/(i_wavelength-g_wavelength)
bd = Rzr - Rir
log_mean.write('%s,%f,%f,%f,%f,%f,%f,%f\n'%(key, Rur, Rgr, Rrr, Rir, Rzr, slope, bd))
log_mean.close()
#os.sys.exit(1)
CG_limits = {}
CG_limits['O'] = {'CGguL': 0.784, 'CGguU': 1.666, 'CGrgL': 0.175, 'CGrgU': 0.505, 'CGirL':-0.143, 'CGirU': 0.106, 'CGziL': -0.833, 'CGziU': -0.467}
CG_limits['V'] = {'CGguL': 1.087, 'CGguU': 2.095, 'CGrgL': 0.511, 'CGrgU': 2.374, 'CGirL':-0.077, 'CGirU': 0.445, 'CGziL': -2.018, 'CGziU': -0.683}
CG_limits['Q'] = {'CGguL': 0.757, 'CGguU': 2.122, 'CGrgL': 0.421, 'CGrgU': 0.967, 'CGirL':-0.032, 'CGirU': 0.229, 'CGziL': -0.719, 'CGziU': -0.200}
CG_limits['S'] = {'CGguL': 0.868, 'CGguU': 1.960, 'CGrgL': 0.379, 'CGrgU': 0.910, 'CGirL': 0.148, 'CGirU': 0.601, 'CGziL': -0.530, 'CGziU': -0.047}
CG_limits['A'] = {'CGguL': 1.264, 'CGguU': 4.210, 'CGrgL': 0.937, 'CGrgU': 1.342, 'CGirL': 0.151, 'CGirU': 0.505, 'CGziL': -0.521, 'CGziU': -0.089}
CG_limits['C'] = {'CGguL': 0.385, 'CGguU': 1.990, 'CGrgL':-0.140, 'CGrgU': 0.403, 'CGirL':-0.203, 'CGirU': 0.202, 'CGziL': -0.221, 'CGziU': 0.259}
CG_limits['X'] = {'CGguL': 0.178, 'CGguU': 1.081, 'CGrgL':-0.089, 'CGrgU': 0.481, 'CGirL': 0.136, 'CGirU': 0.478, 'CGziL': -0.182, 'CGziU': 0.187}
CG_limits['L'] = {'CGguL': 0.913, 'CGguU': 2.089, 'CGrgL': 0.253, 'CGrgU': 0.871, 'CGirL': 0.136, 'CGirU': 0.622, 'CGziL': -0.125, 'CGziU': 0.160}
CG_limits['D'] = {'CGguL': 0.085, 'CGguU': 1.717, 'CGrgL':-0.080, 'CGrgU': 0.589, 'CGirL': 0.142, 'CGirU': 0.625, 'CGziL': 0.121, 'CGziU': 0.502}
#1 x sigma
#1.243181211 0.516802843 0.357449432 0.074183133
#0.870581826 0.209380322 0.137706511 -0.216456472
#CG_limits['K'] = {'CGguL': 0.870581826, 'CGguU': 1.243181211, 'CGrgL':0.209380322, 'CGrgU': 0.516802843, 'CGirL': 0.137706511, 'CGirU': 0.357449432, 'CGziL': -0.216456472, 'CGziU': 0.074183133}
#2x sigma
#1.429480904 0.670514103 0.467320892 0.219502936
#0.684282133 0.055669061 0.027835051 -0.361776275
CG_limits['K'] = {'CGguL': 0.684282133, 'CGguU': 1.429480904, 'CGrgL':0.055669061, 'CGrgU': 0.670514103, 'CGirL': 0.027835051, 'CGirU': 0.467320892, 'CGziL': -0.361776275, 'CGziU': 0.219502936}
#asteroid dictionary
asteroids = defaultdict(dict)
#===============================================================================
# 1 1 - 7 moID Unique SDSS moving-object ID
# 2 8 - 13 Run SDSS object IDs, for details see SDSS EDR paper
# 3 14 - 15 Col
# 4 16 - 20 Field
# 5 21 - 26 Object
# 6 27 - 35 rowc Pixel row
# 7 36 - 44 colc Pixel col
# -- Astrometry --
# 8 47 - 59 Time (MJD) Modified Julian Day for the mean observation time
# 9 60 - 70 R.A. J2000 right ascension of the object at the time of the (r band) SDSS observation
# 10 71 - 81 Dec J2000 declination of the object at the time of the (r band) SDSS observation
# 11 82 - 92 Lambda Ecliptic longitude at the time of observation
# 12 93 - 103 Beta Ecliptic latitude at the time of observation
# 13 104 - 115 Phi Distance from the opposition at the time of observation
# 14 117 - 124 vMu The velocity component parallel to the SDSS scanning direction, and its error (deg/day)
# 15 125 - 131 vMu Error
# 16 132 - 139 vNu The velocity component perpendicular to the SDSS scanning direction, and its error (deg/day)
# 17 140 - 146 vNu Error
# 18 147 - 154
# vLambda
# The velocity component parallel to the Ecliptic (deg/day)
# 19 155 - 162
# vBeta
# The velocity component perpendicular to the Ecliptic (deg/day)
# -- Photometry --
# 20 164 - 169 u SDSS u'g'r'i'z' psf magnitudes and corresponding errors
# 21 170 - 174 uErr
# 22 175 - 180 g
# 23 181 - 185 gErr
# 24 186 - 191 r
# 25 192 - 196 rErr
# 26 197 - 202 i
# 27 203 - 207 iErr
# 28 208 - 213 z
# 29 214 - 218 zErr
# 30 219 - 224 a a* color = 0.89 (g - r) + 0.45 (r - i) - 0.57 (see Paper I)
# 31 225 - 229 aErr
# 32 231 - 236 V Johnson-V band magnitude, synthetized from SDSS magnitudes
# 33 237 - 242 B Johnson-B band magnitude, synthetized from SDSS magnitudes
# -- Identification --
# 34 243 - 244 Identification flag Has this moving object been linked to a known asteroid (0/1)? See Paper II.
# 35 245 - 252 Numeration Numeration of the asteroid. If the asteroid is not numbered, or this moving object has not yet been linked to a known asteroid, it's 0.
# 36 253 - 273 Designation Asteroid designation or name. If this moving object has not yet been linked to a known asteroid, it's '-'
# 37 274 - 276
# Detection Counter
# Detection counter of this object in SDSS data
# 38 277 - 279 Total Detection Count Total number of SDSS observations of this asteroid
# 39 280 - 288 Flags Flags that encode SDSSMOC processing information (internal)
# -- Matching information --
# 40 290 - 300 Computed R.A. Predicted position and magnitude at the time of SDSS observation for an associated known object computed using ASTORB data See a note about an error in the first three releases
# 41 301 - 311 Computed Dec
# 42 312 - 317 Computed App. Mag.
# 43 319 - 326 R Heliocentric distance at the time of observation
# 44 327 - 334 Geocentric Geocentric distance at the time of observation
# 45 335 - 340 Phase Phase angle at the time of observation
# -- Osculating elements --
# 46 342 - 352 Catalog ID Identification of the catalog from which the osculating elements and (H, G) values were extracted
# 47 363 - 368 H Absolute magnitude and slope parameter
# 48 369 - 373 G
# 49 374 - 379 Arc Arc of observations used to derive the elements
# 50 380 - 393 Epoch Osculating elements
# 51 394 - 406 a
# 52 407 - 417 e
# 53 418 - 428 i
# 54 429 - 439 Lon. of asc. node
# 55 440 - 450 Arg. of perihelion
# 56 451 - 461 M
# -- Proper elements --
# 57 463 - 483 Proper elements catalog ID Identification of the catalog from which the proper elements were extracted
# 58 484 - 496 a' Proper elements
# 59 497 - 507 e'
# 60 508 - 518 sin(i')
# 61-124 519 - 646 binary processing flags Only since the 3rd release!!
#===============================================================================
#using pandas with a column specification defined above
col_specification =[ (0, 6), (7, 12), (13, 14), (15, 19), (20, 25), (26, 34), (35, 43), (46, 58), (59, 69), (70, 80), (81, 91), (92, 102), (103, 114), (116, 123), (124, 130), (131, 138), (139, 145), (146, 153), (154, 161), (163, 168), (169, 173), (174, 179), (180, 184), (185, 190), (191, 195), (196, 201), (202, 206), (207, 212), (213, 217), (218, 223), (224, 228), (230, 235), (236, 241), (242, 243), (244, 251), (252, 272), (273, 275), (276, 278), (279, 287), (289, 299), (300, 310), (311, 316), (318, 325), (326, 333), (334, 339), (341, 351), (362, 367), (368, 372), (373, 378), (379, 392), (393, 405), (406, 416), (417, 427), (428, 438), (439, 449), (450, 460), (462, 482), (483, 495), (496, 506), (507, 517), (518, 645)]
print 'Reading SDSS MOC data from %s...'%path
#read all lines from MOC 4 data file
#variables to process big ole MOC4 data file
skipRows = 0
nRowsMax = 100000
nRows=nRowsMax
#is this a known moving object?
id_flag = 0
#track observation and unique asteroid count
asteroid_count = 0
observation_count = 0
#log files
log=open('moc4.log.txt', 'w')
log_tax=open('moc4.tax.txt', 'w')
log_tax_final=open('moc4.tax.final.txt', 'w')
#organize the observations by asteroid
observation={}
while nRows >= nRowsMax:
try:
data = pd.read_fwf(path, colspecs=col_specification, skiprows=skipRows, nrows=nRowsMax, header=None)
except:
break
nRows = data.shape[0]
for irow in range(0,nRows):
id_flag = data.iat[irow, 33]
#is this a known asteroid?
if id_flag == 1:
designation = data.iat[irow, 35]
if not asteroids.has_key(designation):
asteroids[designation]={}
asteroids[designation]['numeration'] = data.iat[irow, 34]
asteroids[designation]['observations'] = []
asteroid_count += 1
#add a new observation to this asteroid
observation={}
observation['moID'] = data.iat[irow, 0]
observation['mjd'] = float(data.iat[irow, 7])
observation['u'] = float(data.iat[irow, 19])
observation['uErr'] = float(data.iat[irow, 20])
observation['g'] = float(data.iat[irow, 21])
observation['gErr'] = float(data.iat[irow, 22])
observation['r'] = float(data.iat[irow, 23])
observation['rErr'] = float(data.iat[irow, 24])
observation['i'] = float(data.iat[irow, 25])
observation['iErr'] = float(data.iat[irow, 26])
observation['z'] = float(data.iat[irow, 27])
observation['zErr'] = float(data.iat[irow, 28])
observation['a'] = float(data.iat[irow, 29])
observation['aErr'] = float(data.iat[irow, 30])
observation['V'] = float(data.iat[irow, 31])
observation['B'] = float(data.iat[irow, 32])
observation['Phase'] = float(data.iat[irow, 44])
#print observation['moID'], observation['Phase']
#calc asteroid colors, relative to g-band and with solar color subtracted
#Cxg = mx - mg - (C(solar)x - C(solar)g)
observation['Cug'] = observation['u'] - observation['g'] - solar_color_ug
observation['Cgg'] = -solar_color_gg
observation['Crg'] = observation['r'] - observation['g'] - solar_color_rg
observation['Cig'] = observation['i'] - observation['g'] - solar_color_ig
observation['Czg'] = observation['z'] - observation['g'] - solar_color_zg
#calc asteroid color error
##propagate errors using quadrature, e.g. for Cug, error is sqrt(uErr*uErr+gErr*gErr)??
##observation['CugErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])
##observation['CggErr'] = observation['gErr']
##observation['CrgErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['rErr']*observation['rErr'])
##observation['CigErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['iErr']*observation['iErr'])
##observation['CzgErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['zErr']*observation['zErr'])
#from the Carvano data, this is what it seems they are doing
observation['CugErr'] = observation['uErr']
observation['CggErr'] = observation['gErr']
observation['CrgErr'] = observation['rErr']
observation['CigErr'] = observation['iErr']
observation['CzgErr'] = observation['zErr']
#calc asteroid log reflectance, relative to g-band
#Cxg = -2.5(logRx-logRg) = -2.5(log(Rx/Rg)) = -2.5*LRx
#LRx = LRxg = -Cxg/2.5
observation['LRug'] = -observation['Cug']/2.5
observation['LRgg'] = 1.0
observation['LRrg'] = -observation['Crg']/2.5
observation['LRig'] = -observation['Cig']/2.5
observation['LRzg'] = -observation['Czg']/2.5
#calc asteroid log reflectance errors by propagating the Cxg errors
observation['LRugErr'] = observation['CugErr']/2.5
observation['LRggErr'] = observation['CggErr']/2.5
observation['LRrgErr'] = observation['CrgErr']/2.5
observation['LRigErr'] = observation['CigErr']/2.5
observation['LRzgErr'] = observation['CzgErr']/2.5
#calc asteroid color gradients, basis of Carvano taxonomy
#CGx = -0.4*(Cxg-C(x-1)g)/(lambdax-lambda(x-1))
observation['CGgu'] = -0.4*(observation['Cgg']-observation['Cug'])/(g_wavelength-u_wavelength)
observation['CGrg'] = -0.4*(observation['Crg']-observation['Cgg'])/(r_wavelength-g_wavelength)
observation['CGir'] = -0.4*(observation['Cig']-observation['Crg'])/(i_wavelength-r_wavelength)
observation['CGzi'] = -0.4*(observation['Czg']-observation['Cig'])/(z_wavelength-i_wavelength)
#observation['CGguErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])
#observation['CGrgErr'] = math.sqrt(observation['rErr']*observation['rErr']+observation['gErr']*observation['gErr'])
#observation['CGirErr'] = math.sqrt(observation['iErr']*observation['iErr']+observation['rErr']*observation['rErr'])
#observation['CGziErr'] = math.sqrt(observation['zErr']*observation['zErr']+observation['iErr']*observation['iErr'])
#observation['CGguErr'] = observation['gErr'] + observation['uErr']
#observation['CGrgErr'] = observation['rErr'] + observation['gErr']
#observation['CGirErr'] = observation['iErr'] + observation['rErr']
#observation['CGziErr'] = observation['zErr'] + observation['iErr']
#observation['CGguErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])*0.4/(g_wavelength-u_wavelength)
#observation['CGrgErr'] = math.sqrt(observation['rErr']*observation['rErr']+observation['gErr']*observation['gErr'])*0.4/(r_wavelength-g_wavelength)
#observation['CGirErr'] = math.sqrt(observation['iErr']*observation['iErr']+observation['rErr']*observation['rErr'])*0.4/(i_wavelength-r_wavelength)
#observation['CGziErr'] = math.sqrt(observation['zErr']*observation['zErr']+observation['iErr']*observation['iErr'])*0.4/(z_wavelength-i_wavelength)
observation['CGguErr'] = math.sqrt(observation['LRggErr']*observation['LRggErr']+observation['LRugErr']*observation['LRugErr'])/(g_wavelength-u_wavelength)
observation['CGrgErr'] = math.sqrt(observation['LRrgErr']*observation['LRrgErr']+observation['LRggErr']*observation['LRggErr'])/(r_wavelength-g_wavelength)
observation['CGirErr'] = math.sqrt(observation['LRigErr']*observation['LRigErr']+observation['LRrgErr']*observation['LRrgErr'])/(i_wavelength-r_wavelength)
observation['CGziErr'] = math.sqrt(observation['LRzgErr']*observation['LRzgErr']+observation['LRigErr']*observation['LRigErr'])/(z_wavelength-i_wavelength)
#observation['CGguErr'] = (observation['gErr']+observation['uErr'])*0.4/(g_wavelength-u_wavelength)
#observation['CGrgErr'] = (observation['rErr']+observation['gErr'])*0.4/(r_wavelength-g_wavelength)
#observation['CGirErr'] = (observation['iErr']+observation['rErr'])*0.4/(i_wavelength-r_wavelength)
#observation['CGziErr'] = (observation['zErr']+observation['iErr'])*0.4/(z_wavelength-i_wavelength)
#
#this is for phase angle analysis (Carvano et al. 2015)
#color gradients based on r'
observation['Cur'] = observation['u'] - observation['r'] - solar_color_ur
observation['Cgr'] = observation['g'] - observation['r'] - solar_color_gr
observation['Crr'] = 0.0 #-solar_color_rr
observation['Cir'] = observation['i'] - observation['r'] - solar_color_ir
observation['Czr'] = observation['z'] - observation['r'] - solar_color_zr
#from the Carvano data, this is what it seems they are doing
observation['CurErr'] = math.sqrt(observation['uErr']*observation['uErr']+observation['rErr']*observation['rErr'])
observation['CgrErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['rErr']*observation['rErr'])
observation['CrrErr'] = 0.0
observation['CirErr'] = math.sqrt(observation['iErr']*observation['iErr']+observation['rErr']*observation['rErr'])
observation['CzrErr'] = math.sqrt(observation['zErr']*observation['zErr']+observation['rErr']*observation['rErr'])
#calc asteroid reflectance, relative to r-band
#Cxr = -2.5(logRx-logRr) = -2.5(log(Rx/Rr))
#Rx/Rr = 10^(-Cxr/2.5)
observation['Rur'] = pow(10,-observation['Cur']/2.5)
observation['Rgr'] = pow(10, -observation['Cgr']/2.5)
observation['Rrr'] = 1.0
observation['Rir'] = pow(10, -observation['Cir']/2.5)
observation['Rzr'] = pow(10, -observation['Czr']/2.5)
#
#http://slittlefair.staff.shef.ac.uk/teaching/phy217/lectures/stats/L18/index.html
observation['RurErr'] = observation['CurErr']/1.09*observation['Rur']
observation['RgrErr'] = observation['CgrErr']/1.09*observation['Rgr']
observation['RrrErr'] = 0.0
observation['RirErr'] = observation['CirErr']/1.09*observation['Rir']
observation['RzrErr'] = observation['CzrErr']/1.09*observation['Rzr']
#observation['RurErr'] = (abs(observation['Rur']*(pow(10,-observation['CurErr']/2.5)-1))+abs(observation['Rur']*(pow(10,observation['CurErr']/2.5)-1)))/2.0
#observation['RgrErr'] = (abs(observation['Rgr']*(pow(10,-observation['CgrErr']/2.5)-1))+abs(observation['Rgr']*(pow(10,observation['CgrErr']/2.5)-1)))/2.0
#observation['RrrErr'] = (abs(observation['Rrr']*(pow(10,-observation['CrrErr']/2.5)-1))+abs(observation['Rrr']*(pow(10,observation['CrrErr']/2.5)-1)))/2.0
#observation['RirErr'] = (abs(observation['Rir']*(pow(10,-observation['CirErr']/2.5)-1))+abs(observation['Rir']*(pow(10,observation['CirErr']/2.5)-1)))/2.0
#observation['RzrErr'] = (abs(observation['Rzr']*(pow(10,-observation['CzrErr']/2.5)-1))+abs(observation['Rzr']*(pow(10,observation['CzrErr']/2.5)-1)))/2.0
#calc slope and bd parameters from Carvano et al. 2015
#eq 1: Rir-Rgr/(lambdai-lambdag)
#eq 2: Rzr-Rir
observation['slope'] = (observation['Rir']-observation['Rgr'])/(i_wavelength-g_wavelength)
observation['slopeErr'] = math.sqrt(observation['RirErr']*observation['RirErr']+observation['RgrErr']*observation['RgrErr'])/(i_wavelength-g_wavelength)
observation['bd'] = observation['Rzr'] - observation['Rir']
observation['bdErr'] = math.sqrt(observation['RzrErr']*observation['RzrErr']+observation['RirErr']*observation['RirErr'])
#calc asteroid log reflectance errors by propagating the Cxg errors
#observation['RurErr'] = ?
#observation['RgrErr'] = ?
#observation['RrrErr'] = ?
#observation['RirErr'] = ?
#observation['RzrErr'] = ?
#
asteroids[designation]['observations'].append(observation)
#print asteroids[designation]
skipRows += nRows
print 'Read %d row(s).'%(skipRows)
print 'Found %d asteroid(s).'%asteroid_count
print 'Calculating taxonomic classes for each observation...'
log_tax.write('%s,%s,%s,%s,%s,%s,%s\n'%('designation', 'moid', 'phase', 'slope', 'slopeErr', 'bd', 'bdErr'))
for designation in asteroids:
log.write('%s\n'%designation)
print 'Processing observations for %s...'%designation
for observation in asteroids[designation]['observations']:
log.write('\t%s\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\n'%('moID', 'LRug', 'LRugErr', 'LRgg', 'LRggErr', 'LRrg', 'LRrgErr', 'LRig', 'LRigErr', 'LRzg', 'LRzgErr'))
log.write('\t%s\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n'%(observation['moID'], observation['LRug'], observation['LRugErr'], observation['LRgg'], observation['LRggErr'], observation['LRrg'], observation['LRrgErr'], observation['LRig'], observation['LRigErr'], observation['LRzg'], observation['LRzgErr']))
log.write('\t%s\t\t%s\t\t%s\t\t%s\n'%('CGgu', 'CGrg', 'CGir', 'CGzi'))
log.write('\t%f\t%f\t%f\t%f\n'%(observation['CGgu'], observation['CGrg'], observation['CGir'], observation['CGzi']))
log.write('\t%s\t\t%s\t\t%s\t\t%s\n'%('CGguErr', 'CGrgErr', 'CGirErr', 'CGziErr'))
log.write('\t%f\t%f\t%f\t%f\n'%(observation['CGguErr'], observation['CGrgErr'], observation['CGirErr'], observation['CGziErr']))
log_tax.write('%s,%s,%s,%f,%f,%f,%f\n'%(designation, observation['moID'], observation['Phase'], observation['slope'], observation['slopeErr'], observation['bd'], observation['bdErr']))
log.write('\t***************************************\n')
log.close()
log_tax.close()
print 'Processed %d asteroid(s).'%asteroid_count
# 1 1 - 7 moID Unique SDSS moving-object ID
# 8 47 - 59 Time (MJD) Modified Julian Day for the mean observation time
# 34 243 - 244 Identification flag Has this moving object been linked to a known asteroid (0/1)? See Paper II.
# 35 245 - 252 Numeration Numeration of the asteroid. If the asteroid is not numbered, or this moving object has not yet been linked to a known asteroid, it's 0.
# 36 253 - 273 Designation Asteroid designation or name. If this moving object has not yet been linked to a known asteroid, it's '-'
# 20 164 - 169 u SDSS u'g'r'i'z' psf magnitudes and corresponding errors
# 21 170 - 174 uErr
# 22 175 - 180 g
# 23 181 - 185 gErr
# 24 186 - 191 r
# 25 192 - 196 rErr
# 26 197 - 202 i
# 27 203 - 207 iErr
# 28 208 - 213 z
# 29 214 - 218 zErr
# 30 219 - 224 a a* color = 0.89 (g - r) + 0.45 (r - i) - 0.57 (see Paper I)
# 31 225 - 229 aErr
# 32 231 - 236 V Johnson-V band magnitude, synthetized from SDSS magnitudes
# 33 237 - 242 B Johnson-B band magnitude, synthetized from SDSS magnitudes
| mit |
sherazkasi/SabreSoftware | Lib/site-packages/numpy/lib/polynomial.py | 58 | 35930 | """
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
| gpl-3.0 |
siutanwong/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
Autoplectic/dit | dit/profiles/base_profile.py | 1 | 2654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The base information profile.
"""
from abc import ABCMeta, abstractmethod
from six import with_metaclass
import numpy as np
from .. import Distribution
profile_docstring = """
{name}
Static Attributes
-----------------
xlabel : str
The label for the x-axis when plotting.
ylabel : str
The label for the y-axis when plotting.
{static_attributes}
Attributes
----------
dist : Distribution
profile : dict
widths : [float]
{attributes}
Methods
-------
draw
Plot the profile
{methods}
Private Methods
---------------
_compute
Compute the profile
"""
class BaseProfile(with_metaclass(ABCMeta, object)):
"""
BaseProfile
Static Attributes
-----------------
xlabel : str
The label for the x-axis when plotting.
ylabel : str
The label for the y-axis when plotting.
Attributes
----------
dist : Distribution
profile : dict
widths : [float]
Methods
-------
draw
Plot the profile.
Abstract Methods
----------------
_compute
Compute the profile.
"""
xlabel = 'scale'
ylabel = 'information [bits]'
align = 'center'
def __init__(self, dist):
"""
Initialize the profile.
Parameters
----------
dist : Distribution
The distribution to compute the profile for.
"""
super(BaseProfile, self).__init__()
outcomes, pmf = zip(*dist.zipped(mode='atoms'))
self.dist = Distribution(outcomes, pmf)
self._compute()
@abstractmethod
def _compute(self):
"""
Abstract method to compute the profile.
"""
pass
def draw(self, ax=None): # pragma: no cover
"""
Draw the profile using matplotlib.
Parameters
----------
ax : axis
The axis to draw the profile on. If None, a new axis is created.
Returns
-------
ax : axis
The axis with profile.
"""
if ax is None:
import matplotlib.pyplot as plt
ax = plt.figure().gca()
# pylint: disable=no-member
left, height = zip(*sorted(self.profile.items()))
ax.bar(left, height, width=self.widths, align=self.align)
ax.set_xticks(sorted(self.profile.keys()))
ax.set_xlabel(self.xlabel)
ax.set_ylabel(self.ylabel)
low, high = ax.get_ylim()
if np.isclose(low, 0, atol=1e-5):
low = -0.1
if np.isclose(high, 0, atol=1e-5):
high = 0.1
ax.set_ylim((low, high))
return ax
| bsd-3-clause |
hotpxl/mxnet | example/bayesian-methods/bdk_demo.py | 15 | 15051 | from __future__ import print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import logging
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import argparse
from algos import *
from data_loader import *
from utils import *
class CrossEntropySoftmax(mx.operator.NumpyOp):
def __init__(self):
super(CrossEntropySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (y - l)
class LogSoftmax(mx.operator.NumpyOp):
def __init__(self):
super(LogSoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
# y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
# y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
def classification_student_grad(student_outputs, teacher_pred):
return [student_outputs[0] - teacher_pred]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
def get_mnist_sym(output_op=None, num_hidden=400):
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta2 / v2
grad[:] = grad_npy
return grad
def get_toy_sym(teacher=True, teacher_noise_precision=None):
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net
def dev():
return mx.gpu()
def run_mnist_SGD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, exe_params, _ = SGD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
lr=5E-6, prior_precision=1.0, minibatch_size=100)
def run_mnist_SGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, sample_pool = SGLD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
thin_interval=100, burn_in_iter_num=1000)
def run_mnist_DistilledSGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
if training_num >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev())
def run_toy_SGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
exe, params, _ = \
SGLD(sym=net, data_inputs=data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size, dev=dev())
def run_toy_DistilledSGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev())}
# 'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev())
def run_toy_HMC():
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev())
def run_synthetic_SGLD():
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in xrange(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad=
X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
if __name__ == '__main__':
numpy.random.seed(100)
mx.random.seed(100)
parser = argparse.ArgumentParser(
description="Examples in the paper [NIPS2015]Bayesian Dark Knowledge and "
"[ICML2011]Bayesian Learning via Stochastic Gradient Langevin Dynamics")
parser.add_argument("-d", "--dataset", type=int, default=1,
help="Dataset to use. 0 --> TOY, 1 --> MNIST, 2 --> Synthetic Data in "
"the SGLD paper")
parser.add_argument("-l", "--algorithm", type=int, default=2,
help="Type of algorithm to use. 0 --> SGD, 1 --> SGLD, other-->DistilledSGLD")
parser.add_argument("-t", "--training", type=int, default=50000,
help="Number of training samples")
args = parser.parse_args()
training_num = args.training
if args.dataset == 1:
if 0 == args.algorithm:
run_mnist_SGD(training_num)
elif 1 == args.algorithm:
run_mnist_SGLD(training_num)
else:
run_mnist_DistilledSGLD(training_num)
elif args.dataset == 0:
if 1 == args.algorithm:
run_toy_SGLD()
elif 2 == args.algorithm:
run_toy_DistilledSGLD()
elif 3 == args.algorithm:
run_toy_HMC()
else:
run_synthetic_SGLD()
| apache-2.0 |
gentnerlab/klusta-pipeline | klusta_pipeline/probe.py | 1 | 13203 | from scipy import spatial
import itertools
from klusta_pipeline import MAX_CHANS
import numpy as np
def load_sitemap(sitelist):
# site:channel
s = {site:None for site in range(MAX_CHANS)}
with open(sitelist,'r') as f:
for line in f:
indx,_,site = line.split(',')
s[int(site)] = int(indx)
return s
def get_channel_groups(probe,s):
if probe=='A1x32-Poly3-6mm-50':
channel_groups = {
# Shank index.
0: {
# List of channels to keep for spike detection.
'channels': s.values(),
# 2D positions of the channels
'geometry': {
s[3]: (0,500), # column 0
s[2]: (0,450),
s[1]: (0,400),
s[4]: (0,350),
s[5]: (0,300),
s[6]: (0,250),
s[7]: (0,200),
s[8]: (0,150),
s[9]: (0,100),
s[10]: (0,50),
s[11]: (0,0),
s[17]: (50,450), # column 1
s[16]: (50,400),
s[18]: (50,350),
s[15]: (50,300),
s[19]: (50,250),
s[14]: (50,200),
s[20]: (50,150),
s[13]: (50,100),
s[21]: (50,50),
s[12]: (50,0),
s[30]: (100,500), # column 2
s[31]: (100,450),
s[32]: (100,400),
s[29]: (100,350),
s[28]: (100,300),
s[27]: (100,250),
s[26]: (100,200),
s[25]: (100,150),
s[24]: (100,100),
s[23]: (100,50),
s[22]: (100,0),
}
}
}
# probes A1x32-Poly3-xmm-25s-yyy
elif 'A1x32-Poly3' in probe and '25s' in probe:
channel_groups = {
# Shank index.
0: {
# List of channels to keep for spike detection.
'channels': s.values(),
# 2D positions of the channels
'geometry': {
s[17]: (18, 275), # column 1
s[16]: (18, 250),
s[18]: (18, 225),
s[15]: (18, 200),
s[19]: (18, 175),
s[14]: (18, 150),
s[20]: (18, 125),
s[13]: (18, 100),
s[21]: (18, 75),
s[12]: (18, 50),
s[22]: (18, 25),
s[11]: (18, 0),
s[10]: (0, 237), # column 0
s[9]: (0, 212),
s[8]: (0, 187),
s[7]: (0, 162),
s[6]: (0, 137),
s[5]: (0, 112),
s[4]: (0, 87),
s[3]: (0, 62),
s[2]: (0, 37),
s[1]: (0, 12),
s[23]: (36, 237), # column 3
s[24]: (36, 212),
s[25]: (36, 187),
s[26]: (36, 162),
s[27]: (36, 137),
s[28]: (36, 112),
s[29]: (36, 87),
s[30]: (36, 62),
s[31]: (36, 37),
s[32]: (36, 12)
}
}
}
# probes A1x32-Edge-xmm-20s-yyy
elif 'A1x32-Edge' in probe and '20' in probe:
channel_groups = {
# Shank index.
0: {
# List of channels to keep for spike detection.
'channels': s.values(),
# 2D positions of the channels
'geometry': {
s[1]: (0, 0), # column 1
s[2]: (0, 20),
s[3]: (0, 40),
s[4]: (0, 60),
s[5]: (0, 80),
s[6]: (0, 100),
s[7]: (0, 120),
s[8]: (0, 140),
s[9]: (0, 160),
s[10]: (0, 180),
s[11]: (0, 200),
s[12]: (0, 220),
s[13]: (0, 240), # column 0
s[14]: (0, 260),
s[15]: (0, 280),
s[16]: (0, 300),
s[17]: (0, 320),
s[18]: (0, 340),
s[19]: (0, 360),
s[20]: (0, 380),
s[21]: (0, 400),
s[22]: (0, 420),
s[23]: (0, 440), # column 3
s[24]: (0, 460),
s[25]: (0, 480),
s[26]: (0, 500),
s[27]: (0, 520),
s[28]: (0, 540),
s[29]: (0, 560),
s[30]: (0, 580),
s[31]: (0, 600),
s[32]: (0, 620)
}
}
}
elif probe=='A1x16-5mm-50':
channel_groups = {
# Shank index.
0: {
# List of channels to keep for spike detection.
'channels': s.values(),
# 2D positions of the channels, in microns.
# NOTE: For visualization purposes
# in KlustaViewa, the unit doesn't matter.
'geometry': {
s[6]: (0, 0),
s[11]: (0, 50),
s[3]: (0, 100),
s[14]: (0, 150),
s[1]: (0, 200),
s[16]: (0, 250),
s[2]: (0, 300),
s[15]: (0, 350),
s[5]: (0, 400),
s[12]: (0, 450),
s[4]: (0, 500),
s[13]: (0, 550),
s[7]: (0, 600),
s[10]: (0, 650),
s[8]: (0, 700),
s[9]: (0, 750),
}
}
}
#probes A2x2-tet-3mm-150-312
elif probe=='A2x2-tet-3mm-150-312':
channel_groups = {
# Shank index.
0: {
# List of channels to keep for spike detection.
'channels': s.values(),
# 2D positions of the channels, in microns.
# NOTE: For visualization purposes
# in KlustaViewa, the unit doesn't matter.
'geometry': {
s[2]: (0, 0),
s[1]: (0, 150),
s[3]: (-18, 18),
s[6]: (-18, 168),
s[5]: (0, 35),
s[4]: (0, 185),
s[7]: (18, 18),
s[8]: (18, 168),
}
},
1: {
'channels': s.values(),
'geometry': {
s[12]: (150, 0),
s[13]: (150, 150),
s[10]: (132, 18),
s[9]: (132, 168),
s[15]: (150, 35),
s[16]: (150, 185),
s[14]: (168, 18),
s[11]: (168, 168),
}
}
}
elif probe=='A1x16-5mm-50-177-H16':
channel_groups = {
# Shank index.
0: {
# List of channels to keep for spike detection.
'channels': s.values(),
# 2D positions of the channels, in microns.
# NOTE: For visualization purposes
# in KlustaViewa, the unit doesn't matter.
'geometry': {
s[1]: (0, 0),
s[16]: (0, 50),
s[2]: (0, 100),
s[15]: (0, 150),
s[3]: (0, 200),
s[14]: (0, 250),
s[4]: (0, 300),
s[13]: (0, 350),
s[5]: (0, 400),
s[12]: (0, 450),
s[6]: (0, 500),
s[11]: (0, 550),
s[7]: (0, 600),
s[10]: (0, 650),
s[8]: (0, 700),
s[9]: (0, 750),
}
}
}
elif probe=='Buzsaki32':
channel_groups = {}
for i in range(4):
channel_groups[i] = {}
channel_groups[i]['geometry'] = {
s[5 + i*8]: (0 + 200 * i, 0),
s[4 + i*8]: (-8.5 + 200 * i, 20),
s[6 + i*8]: (8.5 + 200 * i, 40),
s[3 + i*8]: (-12.5 + 200 * i, 60),
s[7 + i*8]: (12.5 + 200 * i, 80),
s[2 + i*8]: (-16.5 + 200 * i, 100),
s[8 + i*8]: (16.5 + 200 * i, 120),
s[1 + i*8]: (-20.5 + 200 * i, 140),
}
channel_groups[i]['channels'] = channel_groups[i]['geometry'].keys()
elif 'a4x4-4mm200' in probe:
channel_groups = {}
for i in range(4):
channel_groups[i] = {}
channel_groups[0]['geometry'] = {
s[6]: (0,0),
s[2]: (0,200),
s[3]: (0,400),
s[1]: (0,600),
}
channel_groups[1]['geometry'] = {
s[5]: (200,0),
s[8]: (200,200),
s[4]: (200,400),
s[7]: (200,600),
}
channel_groups[2]['geometry'] = {
s[9]: (400,0),
s[12]: (400,200),
s[10]: (400,400),
s[13]: (400,600),
}
channel_groups[3]['geometry'] = {
s[15]: (600,0),
s[11]: (600,200),
s[16]: (600,400),
s[14]: (600,600),
}
for i in range(4):
channel_groups[i]['channels'] = channel_groups[i]['geometry'].keys()
elif 'a1x32-10mm50' in probe:
channel_groups = {
0: {
'channels': s.values(),
'geometry': {s[i]: (0, idx*50) for idx, i in enumerate(itertools.chain(*zip(range(1, 17), range(32, 16, -1))))},
}
}
else:
raise Exception('probe not found: ' + probe)
return channel_groups
def get_graph_from_geometry(geometry):
# let's transform the geometry into lists of channel names and coordinates
chans,coords = zip(*[(ch,xy) for ch,xy in geometry.iteritems()])
# we'll perform the triangulation and extract the
try:
tri = spatial.Delaunay(coords)
except:
x,y = zip(*coords)
coords = list(coords)
coords.append((max(x)+1,max(y)+1))
tri = spatial.Delaunay(coords)
# then build the list of edges from the triangulation
indices, indptr = tri.vertex_neighbor_vertices
print indices, indptr
edges = []
for k in range(indices.shape[0]-1):
for j in indptr[indices[k]:indices[k+1]]:
try:
edges.append((chans[k],chans[j]))
except IndexError:
# ignore dummy site
pass
return edges
def clean_dead_channels(channel_groups):
new_group = {}
for gr, group in channel_groups.iteritems():
new_group[gr] = {
'channels': [],
'geometry': {}
}
new_group[gr]['channels'] = [ch for ch in group['channels'] if ch is not None]
new_group[gr]['geometry'] = {ch:xy for (ch,xy) in group['geometry'].iteritems() if ch is not None}
return new_group
def build_geometries(channel_groups):
for gr, group in channel_groups.iteritems():
group['graph'] = get_graph_from_geometry(group['geometry'])
return channel_groups
def load_probe(filename):
prb = {}
execfile(filename, {}, prb)
return prb['channel_groups']
def plot_channel_groups(channel_groups):
import matplotlib.pyplot as plt
n_shanks = len(channel_groups)
f,ax = plt.subplots(1,n_shanks,squeeze=False)
for sh in range(n_shanks):
coords = [xy for ch,xy in channel_groups[sh]['geometry'].iteritems()]
x,y = zip(*coords)
ax[sh,0].scatter(x,y,color='0.2')
for pr in channel_groups[sh]['graph']:
points = [channel_groups[sh]['geometry'][p] for p in pr]
ax[sh,0].plot(*zip(*points),color='k',alpha=0.2)
ax[sh,0].set_xlim(min(x)-10,max(x)+10)
ax[sh,0].set_ylim(min(y)-10,max(y)+10)
ax[sh,0].set_xticks([])
ax[sh,0].set_yticks([])
ax[sh,0].set_title('group %i'%sh)
ax[sh,0].set_aspect('equal')
plt.show()
def _get_args():
import argparse
parser = argparse.ArgumentParser(description='gdisplay the geometry defined in a probe file')
parser.add_argument('probefile',type=str,
help='the probe file')
return parser.parse_args()
def _display():
args = _get_args()
channel_groups = load_probe(args.probefile)
plot_channel_groups(channel_groups)
| bsd-3-clause |
UCL-RITS/rcps-rcops-scripts | thomas/allocations.py | 1 | 3338 | #!/usr/bin/env python3
import os.path
import sys
import configparser
import argparse
#import csv
import numpy
import pandas
def getargs(argv):
parser = argparse.ArgumentParser(description="Show allocation usage in given period.")
parser.add_argument("--input", help="Gold allocations from stdin, input formed from glsalloc --raw", action='store_true')
parser.add_argument("-i", "--institute", dest="institute", help="Show Gold total usage for this institute")
parser.add_argument("-d", "--date", dest="date", help="Filter by start date of allocation period, in format yyyy-mm-dd")
parser.add_argument("--csv", dest="csvfile", help="Write out CSV to this file in this location")
parser.add_argument("--verbose", help="", action='store_true')
parser.add_argument("--debug", help="", action='store_true')
# Show the usage if no arguments are supplied
if len(argv) < 1:
parser.print_usage()
exit(1)
# return the arguments
# contains only the attributes for the main parser and the subparser that was used
return parser.parse_args(argv)
# end getargs
# Put main in a function so it is importable.
def main(argv):
try:
args = getargs(argv)
# make a dictionary from args to make string substitutions doable by key name
#args_dict = vars(args)
except ValueError as err:
print(err)
exit(1)
# Update Gold allocations from pipe-separated stdin input
# Id|Account|Projects|StartTime|EndTime|Amount|Deposited|Description
if args.input:
dataframe = pandas.read_csv(sys.stdin, sep='|')
# filter by date
if args.date is not None:
dataframe = dataframe[dataframe.StartTime == args.date]
# add an institute column. We only want the first item in the split
dataframe['Institute'] = dataframe['Projects'].str.split('_', 1).str[0]
# filter by institute
if args.institute is not None:
# select all lines where institute matches
dataframe = dataframe[dataframe.Institute == args.institute]
# filter out _allocation projects and everything else separately
allocs = dataframe[dataframe.Projects.str.contains("_allocation")]
projects = dataframe[~dataframe.Projects.str.contains("_allocation")]
# We want this output:
# StartTime Institute Deposited Unallocated Unused % Unused
projects = projects.rename(columns={'Amount':'Unused'})
allocs = allocs.rename(columns={'Amount':'Unallocated'})
# sum the unused time for each institute in this period
unused = projects.groupby(['StartTime','Institute'])['Unused'].sum().reset_index()
# merge the columns we want from allocs and unused
result = allocs[['StartTime', 'EndTime', 'Institute', 'Deposited', 'Unallocated']].merge(unused, on=['StartTime', 'Institute'])
result['Used'] = result['Deposited'] - result['Unallocated'] - result['Unused']
# write out as csv, leave off the row indices
if args.csv is not None:
result.to_csv(args.csv, index=False)
else:
print(result)
else:
print("No input was specified.")
# end main
# When not imported, use the normal global arguments
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
axeltidemann/self_dot | my_sai_test.py | 1 | 6472 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2014 Oeyvind Brandtsegg and Axel Tidemann
#
# This file is part of [self.]
#
# [self.] is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# [self.] is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with [self.]. If not, see <http://www.gnu.org/licenses/>.
''' [self.]
@author: Axel Tidemann, Øyvind Brandtsegg
@contact: [email protected], [email protected]
@license: GPL
'''
import time
import glob
import itertools
import multiprocessing as mp
import cPickle as pickle
import os
import numpy as np
from scipy.io import wavfile
from scipy.cluster.vq import kmeans, vq
import cv2
import sai as pysai
import IO
import utils
import brain
def CreateSAIParams(sai_width, num_triggers_per_frame=2, **kwargs):
"""Fills an SAIParams object using reasonable defaults for some fields."""
return pysai.SAIParams(sai_width=sai_width,
# Half of the SAI should come from the future.
future_lags=sai_width / 2,
num_triggers_per_frame=num_triggers_per_frame,
**kwargs)
def sai_rectangles(sai_frame, channels=32, lags=16):
center = sai_frame.shape[1]/2
height = sai_frame.shape[0]
width = sai_frame.shape[1]
window_height = channels
window_width = lags
marginal_values = []
while window_height <= height:
end_row = window_height
while end_row <= height:
while window_width <= width:
#print 'SAI frame section {} {} {} {}'.format(end_row - window_height, end_row, center - (window_width - width/2) if window_width > width/2 else center, np.clip(center + window_width, center, width))
r = sai_frame[end_row - window_height:end_row, center - (window_width - width/2) if window_width > width/2 else center:np.clip(center + window_width, center, width)]
r_resized = cv2.resize(r, (lags, channels))
marginal_values.append(np.hstack((np.mean(r_resized, axis=0), np.mean(r_resized, axis=1))))
window_width *= 2
end_row += window_height/2
window_width = lags
window_height *= 2
return marginal_values
def sai_codebooks(marginals, k):
code_books = []
for rectangle in zip(*marginals):
obs = np.vstack(rectangle)
#print '{} observations, {} features, k={}'.format(obs.shape[0], obs.shape[1], k)
code_books.append(kmeans(obs, k)[0])
return code_books
def sai_histogram(sai_video_marginals, codebooks, k):
sparse_code = np.zeros(len(codebooks)*k)
for marginals in sai_video_marginals:
sparse_frame = [ np.zeros(k) ] * len(codebooks)
for rect, code, frame in zip(marginals, codebooks, sparse_frame):
frame[vq(np.atleast_2d(rect), code)[0]] = 1
sparse_code += np.hstack(sparse_frame)
return sparse_code
def sai_sparse_codes(sai_video_marginals, k):
all_marginals = list(itertools.chain.from_iterable(sai_video_marginals))
codebooks = sai_codebooks(all_marginals, k)
return [ sai_histogram(s, codebooks, k) for s in sai_video_marginals ]
def _valid_file(filename, threshold=.1):
try:
return utils.get_segments(filename)[-1] > threshold
except:
return False
def _cochlear_trim_sai_marginals(filename_and_indexes):
try:
filename, norm_segstart, norm_segend, audio_id, NAP_detail = filename_and_indexes
sai_video_filename = '{}_sai_video_{}'.format(filename, NAP_detail)
if os.path.isfile('{}.npy'.format(sai_video_filename)):
return sai_video_filename
if NAP_detail == 'high':
try:
NAP = utils.csv_to_array(filename+'cochlear'+NAP_detail)
except:
NAP = brain.cochlear(filename, stride=1, rate=44100, apply_filter=0, suffix='cochlear'+NAP_detail)
if NAP_detail == 'low':
try:
NAP = utils.csv_to_array(filename+'cochlear'+NAP_detail)
except:
NAP = brain.cochlear(filename, stride=IO.NAP_STRIDE, rate=IO.NAP_RATE, apply_filter=0, suffix='cochlear'+NAP_detail) # Seems to work best, in particular when they are all the same.
num_channels = NAP.shape[1]
input_segment_width = 2048
sai_params = CreateSAIParams(num_channels=num_channels,
input_segment_width=input_segment_width,
trigger_window_width=input_segment_width,
sai_width=1024)
sai = pysai.SAI(sai_params)
NAP = utils.trim_right(NAP[ np.int(np.rint(NAP.shape[0]*norm_segstart)) : np.int(np.rint(NAP.shape[0]*norm_segend)) ], threshold=.05)
sai_video = [ np.copy(sai.RunSegment(input_segment.T)) for input_segment in utils.chunks(NAP, input_segment_width) ]
del NAP
np.save(sai_video_filename, np.array([ sai_rectangles(frame) for frame in sai_video ]))
return sai_video_filename
except:
print utils.print_exception('Calculation SAI video failed for file {}, NAP detail {}'.format(filename, NAP_detail))
return False
def experiment(filenames, k):
t0 = time.time()
pool = mp.Pool()
sai_video_marginals = pool.map(_cochlear_trim_sai_marginals, filenames)
pool.close()
t1 = time.time()
print 'Cochlear SAI marginals calculated in {} seconds'.format(t1 - t0)
sparse_codes = sai_sparse_codes([ np.load('{}.npy'.format(filename)) for filename in sai_video_marginals if filename ], k)
print 'Sparse codes calculated in {} seconds'.format(time.time() - t1)
return sparse_codes
if __name__ == '__main__':
import matplotlib.pyplot as plt
sparse_codes = experiment([ filename for filename in glob.glob('testing/*wav') if _valid_file(filename) ], k=4)
#pickle.dump(sai_video_marginals, open('SAIVIDEOMARGINALS', 'w'))
plt.ion()
plt.matshow(sparse_codes, aspect='auto')
plt.colorbar()
| gpl-3.0 |
ravindrapanda/tensorflow | tensorflow/examples/get_started/regression/test.py | 41 | 4037 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple smoke test that runs these examples for 1 training iteration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import pandas as pd
from six.moves import StringIO
import tensorflow.examples.get_started.regression.imports85 as imports85
sys.modules["imports85"] = imports85
# pylint: disable=g-bad-import-order,g-import-not-at-top
import tensorflow.contrib.data as data
import tensorflow.examples.get_started.regression.dnn_regression as dnn_regression
import tensorflow.examples.get_started.regression.linear_regression as linear_regression
import tensorflow.examples.get_started.regression.linear_regression_categorical as linear_regression_categorical
import tensorflow.examples.get_started.regression.custom_regression as custom_regression
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# pylint: disable=g-bad-import-order,g-import-not-at-top
# pylint: disable=line-too-long
FOUR_LINES = "\n".join([
"1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500",
"2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950",
"2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450",
"2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250",])
# pylint: enable=line-too-long
def four_lines_dataframe():
text = StringIO(FOUR_LINES)
return pd.read_csv(text, names=imports85.types.keys(),
dtype=imports85.types, na_values="?")
def four_lines_dataset(*args, **kwargs):
del args, kwargs
return data.Dataset.from_tensor_slices(FOUR_LINES.split("\n"))
class RegressionTest(googletest.TestCase):
"""Test the regression examples in this directory."""
@test.mock.patch.dict(data.__dict__,
{"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(linear_regression.__dict__, {"STEPS": 1})
def test_linear_regression(self):
linear_regression.main([""])
@test.mock.patch.dict(data.__dict__,
{"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(linear_regression_categorical.__dict__, {"STEPS": 1})
def test_linear_regression_categorical(self):
linear_regression_categorical.main([""])
@test.mock.patch.dict(data.__dict__,
{"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(dnn_regression.__dict__, {"STEPS": 1})
def test_dnn_regression(self):
dnn_regression.main([""])
@test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(custom_regression.__dict__, {"STEPS": 1})
def test_custom_regression(self):
custom_regression.main([""])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
fyffyt/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
miyyer/qb | cli.py | 2 | 10210 | #!/usr/bin/env python
"""
CLI utilities for QANTA
"""
from typing import Dict, Optional
import random
import sqlite3
import csv
from collections import defaultdict
import json
from os import path
import click
import yaml
from jinja2 import Environment, PackageLoader
import tqdm
from qanta import qlogging
from qanta.guesser.abstract import AbstractGuesser
from qanta.guesser.elasticsearch import elasticsearch_cli
from qanta.util.environment import ENVIRONMENT
from qanta.util.io import safe_open, shell, get_tmp_filename
from qanta.util.constants import QANTA_SQL_DATASET_PATH, GUESSER_GENERATION_FOLDS
from qanta.hyperparam import expand_config
from qanta.wikipedia.categories import categorylinks_cli
from qanta.wikipedia.vital import vital_cli
from qanta.ingestion.trickme import trick_cli
from qanta.ingestion.command import ingestion_cli
log = qlogging.get("cli")
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
def main():
log.info("QANTA starting with configuration:")
for k, v in ENVIRONMENT.items():
log.info("{0}={1}".format(k, v))
main.add_command(categorylinks_cli, name="categories")
main.add_command(vital_cli, name="vital")
main.add_command(elasticsearch_cli, name="elasticsearch")
main.add_command(trick_cli, name="trick")
main.add_command(ingestion_cli, name="map")
@main.command()
@click.option("--host", default="0.0.0.0")
@click.option("--port", default=5000)
@click.option("--debug", default=False)
@click.argument("guessers", nargs=-1)
def guesser_api(host, port, debug, guessers):
if debug:
log.warning(
"WARNING: debug mode can expose environment variables (AWS keys), NEVER use when API is exposed to web"
)
log.warning("Confirm that you would like to enable flask debugging")
confirmation = input("yes/no:\n").strip()
if confirmation != "yes":
raise ValueError("Most confirm enabling debug mode")
AbstractGuesser.multi_guesser_web_api(guessers, host=host, port=port, debug=debug)
def run_guesser(n_times, workers, guesser_qualified_class):
for _ in range(n_times):
if "qanta.guesser" not in guesser_qualified_class:
log.error(
"qanta.guesser not found in guesser_qualified_class, this is likely an error, exiting."
)
return
shell("rm -rf /tmp/qanta")
shell(f"rm -rf output/guesser/{guesser_qualified_class}")
shell(
f"luigi --local-scheduler --module qanta.pipeline.guesser --workers {workers} AllSingleGuesserReports"
)
@main.command()
@click.option("--n_times", default=1)
@click.option("--workers", default=1)
@click.argument("guesser_qualified_class")
def guesser_pipeline(n_times, workers, guesser_qualified_class):
run_guesser(n_times, workers, guesser_qualified_class)
@main.command()
@click.option("--n", default=20)
@click.option("--seed", default=0)
def sample_answer_pages(n, seed):
"""
Take a random sample of n questions, then return their answers and pages
formatted for latex in the journal paper
"""
with open("data/external/datasets/qanta.mapped.2018.04.18.json") as f:
questions = json.load(f)["questions"]
random.seed(seed)
random.shuffle(questions)
for i, q in enumerate(questions[:n]):
answer = q["answer"]
page = q["page"]
if i - 1 == n:
latex_format = r"{answer} & {page}\\ \midrule"
else:
latex_format = r"{answer} & {page}\\ \bottomrule"
answer = answer.replace("{", r"\{").replace("}", r"\}").replace("_", r"\_")
if page is None:
page = r"\textbf{No Mapping Found}"
else:
page = page.replace("{", r"\{").replace("}", r"\}").replace("_", r"\_")
print(latex_format.format(answer=answer, page=page))
@main.command()
@click.argument("base_file")
@click.argument("hyper_file")
@click.argument("output_file")
def hyper_to_conf(base_file, hyper_file, output_file):
expand_config(base_file, hyper_file, output_file)
def get_slurm_config_value(
name: str, default_config: Dict, guesser_config: Optional[Dict]
):
if guesser_config is None:
return default_config[name]
else:
if name in guesser_config:
return guesser_config[name]
else:
return default_config[name]
@main.command()
@click.option("--slurm-config-file", default="slurm-config.yaml")
@click.argument("task")
@click.argument("output_dir")
def generate_guesser_slurm(slurm_config_file, task, output_dir):
with open(slurm_config_file) as f:
slurm_config = yaml.load(f)
default_slurm_config = slurm_config["default"]
env = Environment(loader=PackageLoader("qanta", "slurm/templates"))
template = env.get_template("guesser-luigi-template.sh")
enabled_guessers = list(AbstractGuesser.list_enabled_guessers())
for i, gs in enumerate(enabled_guessers):
if gs.guesser_class == "ElasticSearchGuesser":
raise ValueError("ElasticSearchGuesser is not compatible with slurm")
elif gs.guesser_class in slurm_config:
guesser_slurm_config = slurm_config[gs.guesser_class]
else:
guesser_slurm_config = None
partition = get_slurm_config_value(
"partition", default_slurm_config, guesser_slurm_config
)
qos = get_slurm_config_value("qos", default_slurm_config, guesser_slurm_config)
mem_per_cpu = get_slurm_config_value(
"mem_per_cpu", default_slurm_config, guesser_slurm_config
)
gres = get_slurm_config_value(
"gres", default_slurm_config, guesser_slurm_config
)
max_time = get_slurm_config_value(
"max_time", default_slurm_config, guesser_slurm_config
)
cpus_per_task = get_slurm_config_value(
"cpus_per_task", default_slurm_config, guesser_slurm_config
)
account = get_slurm_config_value(
"account", default_slurm_config, guesser_slurm_config
)
if task == "GuesserReport":
folds = GUESSER_GENERATION_FOLDS
else:
folds = []
script = template.render(
{
"task": task,
"guesser_module": gs.guesser_module,
"guesser_class": gs.guesser_class,
"dependency_module": gs.dependency_module,
"dependency_class": gs.dependency_class,
"config_num": gs.config_num,
"partition": partition,
"qos": qos,
"mem_per_cpu": mem_per_cpu,
"max_time": max_time,
"gres": gres,
"cpus_per_task": cpus_per_task,
"account": account,
"folds": folds,
}
)
slurm_file = path.join(output_dir, f"slurm-{i}.sh")
with safe_open(slurm_file, "w") as f:
f.write(script)
singleton_path = "qanta/slurm/templates/guesser-singleton.sh"
singleton_output = path.join(output_dir, "guesser-singleton.sh")
shell(f"cp {singleton_path} {singleton_output}")
master_template = env.get_template("guesser-master-template.sh")
master_script = master_template.render(
{
"script_list": [
path.join(output_dir, f"slurm-{i}.sh")
for i in range(len(enabled_guessers))
]
+ [singleton_output],
"gres": gres,
"partition": partition,
"qos": qos,
"mem_per_cpu": mem_per_cpu,
"max_time": max_time,
"gres": gres,
"cpus_per_task": cpus_per_task,
"account": account,
}
)
with safe_open(path.join(output_dir, "slurm-master.sh"), "w") as f:
f.write(master_script)
@main.command()
@click.option("--partition", default="dpart")
@click.option("--qos", default="batch")
@click.option("--mem-per-cpu", default="8g")
@click.option("--max-time", default="1-00:00:00")
@click.option("--nodelist", default=None)
@click.option("--cpus-per-task", default=None)
@click.argument("luigi_module")
@click.argument("luigi_task")
def slurm(
partition,
qos,
mem_per_cpu,
max_time,
nodelist,
cpus_per_task,
luigi_module,
luigi_task,
):
env = Environment(loader=PackageLoader("qanta", "slurm/templates"))
template = env.get_template("luigi-template.sh.jinja2")
sbatch_script = template.render(
{
"luigi_module": luigi_module,
"luigi_task": luigi_task,
"partition": partition,
"qos": qos,
"mem_per_cpu": mem_per_cpu,
"max_time": max_time,
"nodelist": nodelist,
"cpus_per_task": cpus_per_task,
}
)
tmp_file = get_tmp_filename()
with open(tmp_file, "w") as f:
f.write(sbatch_script)
shell(f"sbatch {tmp_file}")
shell(f"rm -f {tmp_file}")
@main.command()
def answer_map_google_csvs():
from qanta.ingestion.gspreadsheets import create_answer_mapping_csvs
create_answer_mapping_csvs()
@main.command()
@click.argument("question_tsv")
def process_annotated_test(question_tsv):
import pandas as pd
df = pd.read_csv(question_tsv, delimiter="\t")
proto_questions = df[df.qdb_id.isna()]
qdb_questions = df[df.proto_id.isna()]
qdb_map = {
int(q.qdb_id): q.page for q in qdb_questions.itertuples() if type(q.page) is str
}
proto_map = {
q.proto_id: q.page for q in proto_questions.itertuples() if type(q.page) is str
}
print("Proto lines")
for qid, page in proto_map.items():
print(f" {qid}: {page}")
print("QDB lines")
for qid, page in qdb_map.items():
print(f" {qid}: {page}")
print("Unmappable proto")
for r in proto_questions.itertuples():
if type(r.page) is not str:
print(f" - {r.proto_id}")
print("Unmappable qdb")
for r in qdb_questions.itertuples():
if type(r.page) is not str:
print(f" - {int(r.qdb_id)}")
if __name__ == "__main__":
main()
| mit |
dgormez/pattern-recognition | TEST/projetImagerie.py | 1 | 4323 | """
Author : GORMEZ David
Imagery Project: Pattern recognition
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from skimage.color import rgb2hsv,rgb2lab,hsv2rgb
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.vq import kmeans,kmeans2,vq
def loadImages(formatChange):
if formatChange:
return changeFormat(plt.imread("./images/AB05.png"))
else:
return plt.imread("./images/AB05.png")
def changeFormat(img):
return (255*img).astype(np.uint8)
def convertHSV(img):
if img.shape[2]==4:
return rgb2hsv(img[:,:,0:3])
else:
if img.shape[2]==3:
return rgb2hsv(img)
else:
print ("Image format not supported")
def convertHSVtoRGB(img):
return hsv2rgb(img)
def scatter3D(centroids):
# visualizing the centroids into the RGB space
fig = plt.figure(3)
ax = Axes3D(fig)
ax.scatter(centroids[:,0],centroids[:,1],centroids[:,2],c=centroids/255.,s=100)
def convertLAB(img):
if img.shape[2]==4:
return rgb2lab(img[:,:,0:3])
else:
if img.shape[2]==3:
return rgb2lab(img)
else:
print ("Image format not supported")
def showOnScreen(img):
plt.Figure()
plt.imshow(img,interpolation='nearest')
def clustering(img,clusters):
#Reshaping image in list of pixels to allow kmean Algorithm
#From 1792x1792x3 to 1792^2x3
pixels = np.reshape(img,(img.shape[0]*img.shape[1],3))
print ("pixels in Clustering : ",pixels.dtype,pixels.shape,type(pixels))
#performing the clustering
centroids,_ = kmeans(pixels,clusters,iter=3)
print ("Centroids : ",centroids.dtype,centroids.shape,type(centroids))
print centroids
# quantization
#Assigns a code from a code book to each observation
#code : A length N array holding the code book index for each observation.
#dist : The distortion (distance) between the observation and its nearest code.
code,_ = vq(pixels,centroids)
print ("Code : ",code.dtype,code.shape,type(code))
print code
# reshaping the result of the quantization
reshaped = np.reshape(code,(img.shape[0],img.shape[1]))
print ("reshaped : ",reshaped.dtype,reshaped.shape,type(reshaped))
clustered = centroids[reshaped]
print ("clustered : ",clustered.dtype,clustered.shape,type(clustered))
#scatter3D(centroids)
return clustered
def clustering2(img,clusters):
#Reshaping image in list of pixels to allow kmean Algorithm
#From 1792x1792x3 to 1792^2x3
pixels = np.reshape(img,(img.shape[0]*img.shape[1],3))
centroids,_ = kmeans2(pixels,3,iter=3,minit= 'random')
print ("Centroids : ",centroids.dtype,centroids.shape,type(centroids))
print centroids
# quantization
#Assigns a code from a code book to each observation
#code : A length N array holding the code book index for each observation.
#dist : The distortion (distance) between the observation and its nearest code.
code,_ = vq(pixels,centroids)
print ("Code : ",code.dtype,code.shape,type(code))
print code
# reshaping the result of the quantization
reshaped = np.reshape(code,(img.shape[0],img.shape[1]))
print ("reshaped : ",reshaped.dtype,reshaped.shape,type(reshaped))
clustered = centroids[reshaped]
print ("clustered : ",clustered.dtype,clustered.shape,type(clustered))
#scatter3D(centroids)
return clustered
img = loadImages('false')
print ("Original Image",img.dtype, type(img),img.shape)
print ("pixel test Original = ", img[img.shape[0]/2,img.shape[1]/2,:])
#img = changeFormat(img)
imgHSV = convertHSV(img)
print ("imgHSV : ", imgHSV.dtype, type(imgHSV),imgHSV.shape)
print ("pixel test HSV = ", imgHSV[imgHSV.shape[0]/2,imgHSV.shape[1]/2,:])
clusters = 6
imgClus = convertHSVtoRGB(clustering(imgHSV,clusters))
imgClus2 = convertHSVtoRGB(clustering2(imgHSV,clusters))
"""
kmeanHSV1 = kmeansAlgo(imgHSV)
kmean2 = kmeansAlgo2(img)
kmeanHSV2 = kmeansAlgo2(imgHSV)
"""
#imgLAB = convertLAB(img)
window1 = plt.figure(1)
window1.add_subplot(1,2,1)
plt.title('Original')
plt.imshow(img)
window1.add_subplot(1,2,2)
plt.imshow(imgClus)
plt.title("After Clustering1")
window2= plt.figure(2)
plt.imshow(imgClus2)
plt.title("After Clustering2")
plt.show()
| gpl-2.0 |
rfdougherty/dipy | dipy/viz/tests/test_regtools.py | 19 | 1473 | import numpy as np
from dipy.viz import regtools
import numpy.testing as npt
from dipy.align.metrics import SSDMetric
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
# Conditional import machinery for matplotlib
from dipy.utils.optpkg import optional_package
_, have_matplotlib, _ = optional_package('matplotlib')
@npt.dec.skipif(not have_matplotlib)
def test_plot_2d_diffeomorphic_map():
# Test the regtools plotting interface (lightly).
mv_shape = (11, 12)
moving = np.random.rand(*mv_shape)
st_shape = (13, 14)
static = np.random.rand(*st_shape)
dim = static.ndim
metric = SSDMetric(dim)
level_iters = [200, 100, 50, 25]
sdr = SymmetricDiffeomorphicRegistration(metric,
level_iters,
inv_iter=50)
mapping = sdr.optimize(static, moving)
# Smoke testing of plots
ff = regtools.plot_2d_diffeomorphic_map(mapping, 10)
# Defualt shape is static shape, moving shape
npt.assert_equal(ff[0].shape, st_shape)
npt.assert_equal(ff[1].shape, mv_shape)
# Can specify shape
ff = regtools.plot_2d_diffeomorphic_map(mapping,
delta = 10,
direct_grid_shape=(7, 8),
inverse_grid_shape=(9, 10))
npt.assert_equal(ff[0].shape, (7, 8))
npt.assert_equal(ff[1].shape, (9, 10))
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/tight_layout.py | 16 | 13115 | """
This module provides routines to adjust subplot params so that subplots are
nicely fit in the figure. In doing so, only axis labels, tick labels and axes
titles are currently considered.
Internally, it assumes that the margins (left_margin, etc.) which are
differences between ax.get_tightbbox and ax.bbox are independent of axes
position. This may fail if Axes.adjustable is datalim. Also, This will fail
for some cases (for example, left or right margin is affected by xlabel).
"""
import warnings
import matplotlib
from matplotlib.transforms import TransformedBbox, Bbox
from matplotlib.font_manager import FontProperties
rcParams = matplotlib.rcParams
def _get_left(tight_bbox, axes_bbox):
return axes_bbox.xmin - tight_bbox.xmin
def _get_right(tight_bbox, axes_bbox):
return tight_bbox.xmax - axes_bbox.xmax
def _get_bottom(tight_bbox, axes_bbox):
return axes_bbox.ymin - tight_bbox.ymin
def _get_top(tight_bbox, axes_bbox):
return tight_bbox.ymax - axes_bbox.ymax
def auto_adjust_subplotpars(fig, renderer,
nrows_ncols,
num1num2_list,
subplot_list,
ax_bbox_list=None,
pad=1.08, h_pad=None, w_pad=None,
rect=None):
"""
Return a dictionary of subplot parameters so that spacing between
subplots are adjusted. Note that this function ignore geometry
information of subplot itself, but uses what is given by
*nrows_ncols* and *num1num2_list* parameteres. Also, the results could be
incorrect if some subplots have ``adjustable=datalim``.
Parameters:
nrows_ncols
number of rows and number of columns of the grid.
num1num2_list
list of numbers specifying the area occupied by the subplot
subplot_list
list of subplots that will be used to calcuate optimal subplot_params.
pad : float
padding between the figure edge and the edges of subplots, as a fraction
of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect
[left, bottom, right, top] in normalized (0, 1) figure coordinates.
"""
rows, cols = nrows_ncols
pad_inches = pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
if h_pad is not None:
vpad_inches = h_pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
else:
vpad_inches = pad_inches
if w_pad is not None:
hpad_inches = w_pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
else:
hpad_inches = pad_inches
if len(subplot_list) == 0:
raise RuntimeError("")
if len(num1num2_list) != len(subplot_list):
raise RuntimeError("")
if rect is None:
margin_left = None
margin_bottom = None
margin_right = None
margin_top = None
else:
margin_left, margin_bottom, _right, _top = rect
if _right:
margin_right = 1. - _right
else:
margin_right = None
if _top:
margin_top = 1. - _top
else:
margin_top = None
vspaces = [[] for i in range((rows + 1) * cols)]
hspaces = [[] for i in range(rows * (cols + 1))]
union = Bbox.union
if ax_bbox_list is None:
ax_bbox_list = []
for subplots in subplot_list:
ax_bbox = union([ax.get_position(original=True)
for ax in subplots])
ax_bbox_list.append(ax_bbox)
for subplots, ax_bbox, (num1, num2) in zip(subplot_list,
ax_bbox_list,
num1num2_list):
#ax_bbox = union([ax.get_position(original=True) for ax in subplots])
tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots])
tight_bbox = TransformedBbox(tight_bbox_raw,
fig.transFigure.inverted())
row1, col1 = divmod(num1, cols)
if num2 is None:
# left
hspaces[row1 * (cols + 1) + col1].append(
_get_left(tight_bbox, ax_bbox))
# right
hspaces[row1 * (cols + 1) + (col1 + 1)].append(
_get_right(tight_bbox, ax_bbox))
# top
vspaces[row1 * cols + col1].append(
_get_top(tight_bbox, ax_bbox))
# bottom
vspaces[(row1 + 1) * cols + col1].append(
_get_bottom(tight_bbox, ax_bbox))
else:
row2, col2 = divmod(num2, cols)
for row_i in range(row1, row2 + 1):
# left
hspaces[row_i * (cols + 1) + col1].append(
_get_left(tight_bbox, ax_bbox))
# right
hspaces[row_i * (cols + 1) + (col2 + 1)].append(
_get_right(tight_bbox, ax_bbox))
for col_i in range(col1, col2 + 1):
# top
vspaces[row1 * cols + col_i].append(
_get_top(tight_bbox, ax_bbox))
# bottom
vspaces[(row2 + 1) * cols + col_i].append(
_get_bottom(tight_bbox, ax_bbox))
fig_width_inch, fig_height_inch = fig.get_size_inches()
# margins can be negative for axes with aspect applied. And we
# append + [0] to make minimum margins 0
if not margin_left:
margin_left = max([sum(s) for s in hspaces[::cols + 1]] + [0])
margin_left += pad_inches / fig_width_inch
if not margin_right:
margin_right = max([sum(s) for s in hspaces[cols::cols + 1]] + [0])
margin_right += pad_inches / fig_width_inch
if not margin_top:
margin_top = max([sum(s) for s in vspaces[:cols]] + [0])
margin_top += pad_inches / fig_height_inch
if not margin_bottom:
margin_bottom = max([sum(s) for s in vspaces[-cols:]] + [0])
margin_bottom += pad_inches / fig_height_inch
kwargs = dict(left=margin_left,
right=1 - margin_right,
bottom=margin_bottom,
top=1 - margin_top)
if cols > 1:
hspace = max([sum(s)
for i in range(rows)
for s
in hspaces[i * (cols + 1) + 1:(i + 1) * (cols + 1) - 1]])
hspace += hpad_inches / fig_width_inch
h_axes = ((1 - margin_right - margin_left) -
hspace * (cols - 1)) / cols
kwargs["wspace"] = hspace / h_axes
if rows > 1:
vspace = max([sum(s) for s in vspaces[cols:-cols]])
vspace += vpad_inches / fig_height_inch
v_axes = ((1 - margin_top - margin_bottom) -
vspace * (rows - 1)) / rows
kwargs["hspace"] = vspace / v_axes
return kwargs
def get_renderer(fig):
if fig._cachedRenderer:
renderer = fig._cachedRenderer
else:
canvas = fig.canvas
if canvas and hasattr(canvas, "get_renderer"):
renderer = canvas.get_renderer()
else:
# not sure if this can happen
warnings.warn("tight_layout : falling back to Agg renderer")
from matplotlib.backends.backend_agg import FigureCanvasAgg
canvas = FigureCanvasAgg(fig)
renderer = canvas.get_renderer()
return renderer
def get_subplotspec_list(axes_list, grid_spec=None):
"""
Return a list of subplotspec from the given list of axes. For an
instance of axes that does not support subplotspec, None is
inserted in the list.
If grid_spec is given, None is inserted for those not from
the given grid_spec.
"""
subplotspec_list = []
for ax in axes_list:
axes_or_locator = ax.get_axes_locator()
if axes_or_locator is None:
axes_or_locator = ax
if hasattr(axes_or_locator, "get_subplotspec"):
subplotspec = axes_or_locator.get_subplotspec()
subplotspec = subplotspec.get_topmost_subplotspec()
gs = subplotspec.get_gridspec()
if grid_spec is not None:
if gs != grid_spec:
subplotspec = None
elif gs.locally_modified_subplot_params():
subplotspec = None
else:
subplotspec = None
subplotspec_list.append(subplotspec)
return subplotspec_list
def get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer,
pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Return subplot parameters for tight-layouted-figure with specified
padding.
Parameters:
*fig* : figure instance
*axes_list* : a list of axes
*subplotspec_list* : a list of subplotspec associated with each
axes in axes_list
*renderer* : renderer instance
*pad* : float
padding between the figure edge and the edges of subplots,
as a fraction of the font-size.
*h_pad*, *w_pad* : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
*rect* : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
subplot_list = []
nrows_list = []
ncols_list = []
ax_bbox_list = []
subplot_dict = {} # multiple axes can share
# same subplot_interface (e.g., axes_grid1). Thus
# we need to join them together.
subplotspec_list2 = []
for ax, subplotspec in zip(axes_list,
subplotspec_list):
if subplotspec is None:
continue
subplots = subplot_dict.setdefault(subplotspec, [])
if not subplots:
myrows, mycols, _, _ = subplotspec.get_geometry()
nrows_list.append(myrows)
ncols_list.append(mycols)
subplotspec_list2.append(subplotspec)
subplot_list.append(subplots)
ax_bbox_list.append(subplotspec.get_position(fig))
subplots.append(ax)
max_nrows = max(nrows_list)
max_ncols = max(ncols_list)
num1num2_list = []
for subplotspec in subplotspec_list2:
rows, cols, num1, num2 = subplotspec.get_geometry()
div_row, mod_row = divmod(max_nrows, rows)
div_col, mod_col = divmod(max_ncols, cols)
if (mod_row != 0) or (mod_col != 0):
raise RuntimeError("")
rowNum1, colNum1 = divmod(num1, cols)
if num2 is None:
rowNum2, colNum2 = rowNum1, colNum1
else:
rowNum2, colNum2 = divmod(num2, cols)
num1num2_list.append((rowNum1 * div_row * max_ncols +
colNum1 * div_col,
((rowNum2 + 1) * div_row - 1) * max_ncols +
(colNum2 + 1) * div_col - 1))
kwargs = auto_adjust_subplotpars(fig, renderer,
nrows_ncols=(max_nrows, max_ncols),
num1num2_list=num1num2_list,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad)
if rect is not None:
# if rect is given, the whole subplots area (including
# labels) will fit into the rect instead of the
# figure. Note that the rect argument of
# *auto_adjust_subplotpars* specify the area that will be
# covered by the total area of axes.bbox. Thus we call
# auto_adjust_subplotpars twice, where the second run
# with adjusted rect parameters.
left, bottom, right, top = rect
if left is not None:
left += kwargs["left"]
if bottom is not None:
bottom += kwargs["bottom"]
if right is not None:
right -= (1 - kwargs["right"])
if top is not None:
top -= (1 - kwargs["top"])
#if h_pad is None: h_pad = pad
#if w_pad is None: w_pad = pad
kwargs = auto_adjust_subplotpars(fig, renderer,
nrows_ncols=(max_nrows, max_ncols),
num1num2_list=num1num2_list,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=(left, bottom, right, top))
return kwargs
| gpl-2.0 |
ljdursi/simple-squiggle-pseudomapper | spatialindex.py | 1 | 24553 | #!/usr/bin/env python
"""
Contains a SpatialIndex class and an implementation of a KDTreeIndex subclass,
along with a driver program for indexing a reference fasta with the KDTreeIndex.
"""
from __future__ import print_function
import argparse
import numpy
import collections
import poremodel
try: #python2: use cPickle instead of pickle
import cPickle as pickle
except ImportError:
import pickle
import scipy.spatial
import time
import sys
class Timer(object):
"""
simple context-manager based timer
"""
def __init__(self, name, verbose):
self.__name = name
self.__verbose = verbose
self.__start = None
def __enter__(self):
self.__start = time.time()
if self.__verbose:
print(self.__name + "...", file=sys.stderr)
return self
def __exit__(self, *args):
interval = time.time() - self.__start
if self.__verbose:
print(self.__name + ": " + interval + "s", file=sys.stderr)
def readfasta(infilename):
"""
Reads a fasta file to index, returns a zipped list of labels and sequences
"""
labels = []
sequences = []
curlabel = None
cursequence = ""
def updatelists():
"maintain the sequences and label lists"
if len(cursequence) is not 0:
sequences.append(cursequence)
if curlabel is not None:
labels.append(curlabel)
else:
labels.append('seq'+str(len(sequences)))
with open(infilename, 'r') as infile:
for line in infile:
if line[0] == ">":
updatelists()
cursequence = ""
curlabel = line[1:].strip()
else:
cursequence += line.strip()
updatelists()
return list(zip(labels, sequences))
def reverse_complement(seq):
""" reverse complement of a sequence """
rcbases = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}
return "".join([rcbases[base] if base in rcbases else base for base in seq[::-1]])
def kmer_to_int64(kmer):
"""
Convert a kmer into a numerical rank
input: kmer (string) - sequence
returns: numpy.int64 rank
"""
vals = {'A':0, 'C':1, 'G':2, 'T':3}
nalphabet = 4
val = 0
for base in kmer:
if not base in vals:
return numpy.int64(-1)
val = val * nalphabet + vals[base]
return numpy.int64(val)
class SpatialIndex(object):
"""
The SpatialIndex class defines the interface for a spatial index
on a genome given a pore model (which maps kmers to floating point values).
The spatial index requires a dimension in which to map, a pore model,
and the sequence to index.
The SpatialIndex class lacks a complete implementation; the KDTreeIndex
is a subclass which completely implements the interface. The SpatialIndex
class factors out that functionality which isn't specific to the k-d tree
index (eg, would be needed for an r-tree implementation).
"""
def __init__(self, sequence, name, model, dimension, maxentries=10):
"""
Initialization:
sequence: sequence to index
name: name of the index
model: a pore model
dimension: dimension in which to do the spatial index
maxentries:filter out any uniformitive points
(those that occur more than this many times in the sequence)
"""
self.__max_locations = maxentries
self.__model = model
self.__dimension = dimension
self.__referencename = name
self.__reflen = len(sequence)
self.__locations = None
self.__starts = None
def set_locs_and_starts(self, locations, starts):
"""
Sets the matching locations of each individual kmer and where
each unique kmer position starts in that list; do it in a method
here so it is set in the superclass
"""
self.__locations = locations
self.__starts = starts
@classmethod
def kmers_from_sequence(cls, sequence, model, dimension,
include_complement=True, maxentries=10, verbose=False):
"""
This routine, called on initialization, reads in the sequence to index
and generates three key arrays used in the lookup routines:
- the list of unique dmers (expressed as int64s, their rank: eg, AAA...A = 0).
- A list of genomic locations in the reference
- A list of starts, so that the ith lexicographic dmer present in the
sequence corresponds to locations [starts[i],starts[i+1])
"""
if not isinstance(model, poremodel.PoreModel):
raise ValueError("KDTreeIndex.__init__(): must provide pore model")
# First, the sequence (and its reverse complement) is turned into:
# - an array of integers representing dmers in sequence (eg, AA..A = 0)
# - the list of genomic locations corresponding to each
# ( which is just 1,2,3...N,-1,-2,-3...-N )
# with negative values corresponding to locations on the complement
# strand
# convert to events (which represent kmers)
with Timer("Generating events", verbose) as timer:
complement = ""
if include_complement:
complement = reverse_complement(sequence)
refevents, refsds = model.sequence_to_events(sequence)
compevents, compsds = model.sequence_to_events(complement)
allkmers = numpy.zeros(len(refevents)-dimension+1+
len(compevents)-dimension+1,
dtype=numpy.int64)
# convert k+d-1-mers into integers
with Timer("Converting to integer representations", verbose) as timer:
# this could fairly easily be sped up
for i in range(len(compevents)-dimension+1):
allkmers[i] = kmer_to_int64(complement[i:i+model.k+dimension-1])
shift = len(compevents)-dimension
for i in range(len(refevents)-dimension+1):
allkmers[i+shift] = kmer_to_int64(sequence[i:i+model.k+dimension-1])
# sort by value of the dmer integer values
with Timer("Sorting", verbose) as timer:
locs = numpy.concatenate((-numpy.arange(len(compevents)-dimension+1)-1,
numpy.arange(len(refevents)-dimension+1)+1))
positions = allkmers.argsort()
allkmers, locs = allkmers[positions], locs[positions]
del positions
# get rid of invalid dmers (eg, containing N) which return -1
start = numpy.argmax(allkmers >= 0)
allkmers, locs = allkmers[start:], locs[start:]
# generate list of unique dmers (as integers) and start locations
with Timer("Building Counts", verbose) as timer:
kmers, counts = numpy.unique(allkmers, return_counts=True)
starts = numpy.cumsum(counts)
starts = numpy.concatenate((numpy.array([0]), starts))
if verbose:
print("total entries = "+str(len(kmers)))
# extract the dmer level means and std deviations corresponding to each dmer
with Timer("Extracting Kmers", verbose) as timer:
data = numpy.zeros((len(kmers), dimension), dtype=numpy.float32)
sdata = numpy.zeros((len(kmers), dimension), dtype=numpy.float32)
for i in range(len(kmers)):
loc = locs[starts[i]]
if loc < 0:
idx = -(loc+1)
events = compevents[idx:idx+dimension]
sds = compsds[idx:idx+dimension]
else:
idx = loc-1
events = refevents[idx:idx+dimension]
sds = refsds[idx:idx+dimension]
data[i, :] = numpy.array(events, numpy.float32)
sdata[i, :] = numpy.array(sds, numpy.float32)
return data, sdata, locs, starts
@property
def dimension(self):
"""Returns the spatial dimension of the index"""
return self.__dimension
@property
def reference_length(self):
"""Returns the size of the sequence indexed"""
return self.__reflen
def index_to_genomic_locations(self, idxs):
"""
Returns a list-of-list of genomic locations corresponding to each
dmer index (index into the sorted list of unique dmers in the reference)
in idxs
"""
def idx_to_locs(idx):
"""returns list of locations for one index"""
return self.__locations[self.__starts[idx]:self.__starts[idx+1]]
if isinstance(idxs, int) or isinstance(idxs, numpy.int64):
return list(idx_to_locs(idxs))
else:
return [idx_to_locs(idx) for idx in idxs]
def events_to_dmer_array(self, events, each_dmer=False, each_event=False):
"""
Convert a 1d array of events to an array of d-dimensional points for
the index
inputs: events - 1d array of events
each_dmer, each_event: booleans, one of which must be true
each_dmer: every unique dmer is returned
each_event: dmers are returned s/t every event occurs once
outputs: 2d array of dmers
if each_dmer is true array is of size (|events|-dimension+1, d)
if each_event is true array is of size (|events|//dimension, d)
"""
if (each_dmer and each_event) or not(each_dmer or each_event):
raise ValueError("events_to_dmer_array: one of each_kmer, each_event must be True")
dim = self.__dimension
if each_event:
step = dim
else:
step = 1
kmers = numpy.array([events[i:i+dim]
for i in range(0, len(events)-dim+1, step)])
return kmers
def scale_events(self, events, sds=None):
"""
Scales the events to be consistent with the pore model
"""
return self.__model.scale_events(events, sds)
def lookup(self, read_dmers, maxdist=4.25, closest=False):
""" Lookup function """
pass
@property
def model(self):
"""Return the pore model"""
return self.__model
class Mappings(object):
"""
The spatial indices return the lookups as mappings, from read
locations to genomic locations. This is more complicated than
in the base-called case, as any signal-level dmer may well
correspond to multiple basecalled dmers, which may each occur
in various locations in the sequence.
Objects of the mappings class contain:
information about the read and the reference, and whether this
is a complement-strand index
an array of read locations, with repeats
an array of corresponding reference locations
for each read loc - ref loc, the distance to the proposed dmer
The class defines several operations on these mappings.
"""
def __init__(self, readlocs, idxlocs, dists, dmers, nearestdmers, nmatches,
referenceLen, readlen, complement=False):
"""
Initializes a mapping.
Inputs:
- array of read locations
- same-sized array of reference locations
(+ve for template strand, -ve for complement strand)
- same-sized array of distances (from read dmer to proposed ref dmer)
- same-sized array of the proposed signal-level dmer
"""
assert len(readlocs) == len(idxlocs)
assert len(idxlocs) == len(dists)
assert nearestdmers.shape[0] == len(dists)
self.read_locs = readlocs
self.idx_locs = idxlocs
self.dists = dists
self.dmers = dmers
self.nearest_dmers = nearestdmers
self.nmatches = nmatches
self.reflen = referenceLen
self.readlen = readlen
self.complement = complement
def __str__(self):
"""
Readable output summary of a mapping
"""
output = "Mappings: ReferenceLength = %d, complement = %d, readlength = %d\n" \
% (self.reflen, self.readlen, self.complement)
output += " : nmatches = %d\n" % (len(self.read_locs))
for read, idx, dist, nearest in zip(self.read_locs[:5], self.idx_locs[:5],
self.dists[:5], self.nearest_dmers[:5, :]):
output += " %d: %d (%5.3f) " % (read, idx, dist) + \
numpy.array_str(nearest, precision=2, suppress_small=True) + "\n"
if len(self.read_locs) > 5:
output += " ...\n"
return output
def set_complement(self, complement=True):
"""
Sets the complement strand flag of the mapping
"""
self.complement = complement
def coords_compl_to_templ(self, complement_coords):
"""
Converts a set of complement strand coordinates to corresponding
template strand coordinates, given the mapping reference length
"""
# if a complement strand maps to |pos| on the complement of reference -> -pos,
# then template strand would map to reflen-|pos| on template strand of ref = ref+pos;
# if a complement strand maps to pos on the template of the reference -> +pos,
# then the template strand would map to (ref-|pos|) of the complement = -ref+pos
return -numpy.sign(complement_coords)*self.reflen+complement_coords
@property
def starts(self):
"""
Returns the array of implied starting positions of the read, given the
list of read-to-reference mappings
"""
startlocs = numpy.sign(self.idx_locs)*\
numpy.mod(numpy.abs(self.idx_locs)-self.read_locs+self.reflen, self.reflen)
if self.complement:
startlocs = self.coords_compl_to_templ(startlocs)
return startlocs
def append(self, other):
"""
Returns a new set of mappings consisting of the current
mappings data and another set appended to it.
If the two are of the same strand, this is a simple concatenation;
if not, must convert to the template coordinates and append
"""
if self.complement == other.complement:
return Mappings(numpy.concatenate((self.read_locs, other.read_locs)),
numpy.concatenate((self.idx_locs, other.idx_locs)),
numpy.concatenate((self.dists, other.dists)),
numpy.concatenate((self.dmers, other.dmers)),
numpy.concatenate((self.nearest_dmers, other.nearest_dmers)),
numpy.concatenate((self.nmatches, other.nmatches)),
self.reflen,
self.readlen+other.readlen,
self.complement)
else:
template, complement = (self, other) if self.complement else (other, self)
return Mappings(numpy.concatenate((template.read_locs,
template.readlen-complement.read_locs)),
numpy.concatenate((template.idx_locs,
complement.coords_compl_to_templ(complement.idx_locs))),
numpy.concatenate((template.dists, complement.dists)),
numpy.concatenate((self.dmers, other.dmers)),
numpy.concatenate((template.nearest_dmers, complement.nearest_dmers)),
numpy.concatenate((self.nmatches, other.nmatches)),
template.reflen,
template.readlen,
False)
def local_rescale(self, read_dmers, map_range):
"""
As with scaling read events to a model, in this case we update the
distances and signal-level dmers by re-scaling to the mappings,
to improve the accuracy of the original crude rescaling which used
no information about correspondance between particular read and
model events.
"""
reflen = self.reflen
if map_range is None:
map_range = (-reflen, reflen+1)
starts = self.starts
valid = numpy.where((starts >= map_range[0]) & (starts <= map_range[1]))[0]
read_events = read_dmers[self.read_locs[valid], :].reshape(read_dmers[self.read_locs[valid], :].size)
idx_events = self.nearest_dmers[valid, :].reshape(self.nearest_dmers[valid, :].size)
if valid.size == 0:
return self, (1, 0)
fit = numpy.polyfit(read_events, idx_events, 1)
new_dmers = fit[0]*read_dmers + fit[1]
dists = numpy.sqrt(numpy.sum((new_dmers[self.read_locs, :] - self.nearest_dmers)*\
(new_dmers[self.read_locs, :] - self.nearest_dmers), axis=1))
return Mappings(self.read_locs, self.idx_locs, dists, self.dmers, self.nearest_dmers,
self.nmatches, reflen, self.readlen, self.complement), fit
class KDTreeIndex(SpatialIndex):
"""
Specialization of the Spatial Index class which uses kdtrees;
uses cKDTree in scipy.spatial, with very small modifications
also works with sklearn.neighbours kdtree
"""
def __init__(self, sequence, name, model, dimension, include_complement=True,
maxentries=10, verbose=False):
super(KDTreeIndex, self).__init__(sequence, name, model, dimension, maxentries)
events, _, locations, starts = self.kmers_from_sequence(sequence, model,
dimension, include_complement,
maxentries, verbose)
self.set_locs_and_starts(locations, starts)
if verbose:
print("KDTree: building tree")
#pylint: disable=not-callable
self.__kdtree = scipy.spatial.cKDTree(events)
#pylint: enable=not-callable
#self.__kdtree = sklearn.neighbors.KDTree(events)
@property
def reference_length(self):
"""Return reference length"""
return super(KDTreeIndex, self).reference_length
@property
def dimension(self):
"""Return index dimension"""
return super(KDTreeIndex, self).dimension
def scale_events(self, events, sds=None):
"""Scale input (read) events to model"""
return super(KDTreeIndex, self).scale_events(events, sds)
def lookup(self, read_dmers, maxdist=4.25, closest=False):
"""
For a given set of read events, return a list of mappings
between the read events and the indexed reference.
Inputs:
- read_kmers: signal-level events from a read
- maxdist: maximum allowable distance between points in read & ref
- closest (optional, boolean: default False): if True,
only return mappings to closest dmer. If False,
return mappings to all dmers within maxdist
"""
Match = collections.namedtuple("Match", ["dist", "idx_locs", "nearestDmer", "readLoc"])
if read_dmers.ndim == 1:
read_dmers = [read_dmers]
def dist(read_posn, idx):
"""Distance between read_dmer at position read_pos and dmer index idx"""
p = read_dmers[read_posn, :]
q = self.__kdtree.data[idx, :]
return numpy.sqrt(numpy.max((p-q)*(p-q)))
if closest:
dists, idxs = self.__kdtree.query(read_dmers)
nearests = self.__kdtree.data[idxs, :]
matches = [self.index_to_genomic_locations(idx) for idx in idxs]
results = [Match(*result) for result in zip(dists, matches,
nearests, range(len(dists)))]
else:
idxs = self.__kdtree.query_ball_point(read_dmers, maxdist)
#below is the corresponding line for sklearn.neighbours.KDTree
#idxs = self.__kdtree.query_radius(read_dmers, maxdist)
results = [Match(dist(posn, pidx), self.index_to_genomic_locations(pidx),
self.__kdtree.data[pidx, :], posn)
for posn, pidxs in enumerate(idxs)
for pidx in pidxs]
dists = numpy.array([match.dist for match in results for idx in match.idx_locs],
dtype=numpy.float32)
read_locs = numpy.array([match.readLoc for match in results for idx in match.idx_locs],
dtype=numpy.int)
idx_locs = numpy.array([idx for match in results for idx in match.idx_locs],
dtype=numpy.int)
nmatches = numpy.array([len(match.idx_locs) for match in results for idx in match.idx_locs],
dtype=numpy.int)
nearests = numpy.array([match.nearestDmer for match in results for idx in match.idx_locs],
dtype=numpy.float32)
dmers = numpy.array([read_dmers[match.readLoc] for match in results for idx in match.idx_locs],
dtype=numpy.float32)
return Mappings(read_locs, idx_locs, dists, dmers, nearests, nmatches,
referenceLen=self.reference_length, readlen=len(read_dmers))
def __getstate__(self):
"""
Need to modify default get/set state routines
so that nested cKDTree can be pickled
"""
kdstate = self.__kdtree.__getstate__()
self.__kdtree = None
state = self.__dict__.copy()
state["_kdtree"] = kdstate
return state
def __setstate__(self, state):
"""
Need to modify default get/set state routines
so that nested cKDTree can be pickled
"""
kdstate = state["_kdtree"]
del state["_kdtree"]
self.__dict__.update(state)
#pylint: disable=not-callable
self.__kdtree = scipy.spatial.cKDTree([[0]*self.dimension, [1]*self.dimension])
#pylint: enable=not-callable
self.__kdtree.__setstate__(kdstate)
def main():
"""
Driver program - generate a spatial index from a reference
"""
parser = argparse.ArgumentParser(description="Build a KDTree index of reference given a pore model")
parser.add_argument('reference', type=str, help="Reference FASTA")
parser.add_argument('modelfile', type=str, help="Pore model file (CSV or Fast5)")
parser.add_argument('outfile', type=str, help="Output file prefix to save (pickled) index")
parser.add_argument('-D', '--dimension', type=int, default=10,
help="Number of dimensions to use in spatial index")
parser.add_argument('-m', '--maxentries', type=int, default=5,
help="Filter out reference dmers that occur in more m locations")
parser.add_argument('-v', '--verbose', action="store_true")
parser.add_argument('-C', '--complement', action="store_true",
help="Use Complement model rather than Template (from 2D Fast5 files only)")
parser.add_argument('-d', '--delimiter', default=",", type=str,
help="Pore model delimeter (for CSV/TSV files only)")
args = parser.parse_args()
if args.verbose:
print("Reading Model...")
pore_model = poremodel.PoreModel(args.modelfile, args.complement, args.delimiter)
if args.verbose:
print("Indexing...")
contigs = readfasta(args.reference)
for label, ref_sequence in contigs:
labelbase = label.split()[0]
if len(contigs) == 1:
referencename = args.reference
outfilename = args.outfile+".kdtidx"
else:
referencename = args.reference+"-"+labelbase
outfilename = args.outfile+"-"+labelbase+".kdtidx"
index = KDTreeIndex(ref_sequence, referencename, pore_model,
args.dimension, maxentries=args.maxentries)
if args.verbose:
print("Saving Index...")
with open(outfilename, "wb") as pickle_file:
pickle.dump(index, pickle_file, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| gpl-3.0 |
466152112/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
JamesRiverHomeBrewers/WaterTesting | WaterTesting/gsload.py | 1 | 3569 | """
Module for loading data form google sheets and dumping it
into a pandas dataframe.
"""
import pandas as pd
from pandas import read_csv, to_datetime, DataFrame
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from slugify import Slugify
SLUG = Slugify(to_lower=True)
SO4CL_RATIO = {
0: 'Too Malty',
0.4: 'Very Malty',
.6: 'Malty',
.8: 'Balanced',
1.5: 'Little Bitter',
2.01: 'More Bitter',
4.01: 'Extra Bitter',
6.01: 'Quite Bitter',
8.01: 'Very Bitter',
9.01: 'Too Bitter'
}
def add_columns(df):
""" Add calculated columns to the dataframe
Accepts test result dataframe and return the newly modified dataframe.
"""
# Rename columns from google sheets
df = df.rename(columns={
'Sample ID': 'sample_id',
'Sample Date': 'sample_date',
'Sample Source': 'sample_source',
'Sample Treatment': 'treatment',
'Sample Notes': 'notes',
'Test Date': 'test_date',
'Sample Location': 'sample_location',
'Total Hardness': 'total_hardness',
'Calcium Hardness': 'ca_hardness',
'Total Alkalinity': 'total_alkalinity',
'Sulfate': 'so4',
'Chlorine': 'cl',
})
# Everything is loaded as strings, need to convert to numeric
df['total_hardness'] = df['total_hardness'].apply(pd.to_numeric, args=('coerce',))
df['ca_hardness'] = df['ca_hardness'].apply(pd.to_numeric, args=('coerce',))
df['total_alkalinity'] = df['total_alkalinity'].apply(pd.to_numeric, args=('coerce',))
df['so4'] = df['so4'].apply(pd.to_numeric, args=('coerce',))
df['cl'] = df['cl'].apply(pd.to_numeric, args=('coerce',))
# Add calculated columns
df['mg_hardness'] = df['total_hardness'] - df['ca_hardness']
df['res_alkalinity'] = df['total_alkalinity'] - (df['ca_hardness'] / 3.5 + df['mg_hardness'] / 7)
df['ca2'] = df['ca_hardness'] * 0.4
df['mg2'] = df['mg_hardness'] * 0.25
df['hco3'] = df['total_alkalinity'] * 1.22
df['so4_cl_ratio'] = df['so4'] / df['cl']
# Add descriptor from SO4 / Cl Ratio Lookup
set_ratio = [min(SO4CL_RATIO.keys(), key=lambda x: abs(x - r)) for r in df['so4_cl_ratio']]
ratios = [SO4CL_RATIO[value] for value in set_ratio]
df['balance'] = ratios
df['sample_date'] = to_datetime(df['sample_date'], format='%m/%d/%Y').dt.date
df['test_date'] = to_datetime(df['test_date'], format='%m/%d/%Y').dt.date
df['slug'] = [SLUG(x) for x in df['sample_location']]
df = df.sort_values(by='sample_date')
df = df.round(2)
return df
def load_csv(filename):
df = read_csv(filename)
df = add_columns(df)
return df
def load_sheet(key, sheet_id, sheet_tab):
""" Connect to a google spreadsheet using gspread and load it
into a dataframe.
Parameters:
key: json key file from google developer console
sheet_id: ID of the sheet found in the sheet URL
sheet_tab: name of the tab within the worksheet to pull
Return: gspread worksheet object
"""
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name(key, scope)
gc = gspread.authorize(credentials)
wks = gc.open_by_key(sheet_id).worksheet(sheet_tab)
# Dump spreadsheet data into dataframe and add calculated columns
df = DataFrame(wks.get_all_records())
df = add_columns(df)
return df
| mit |
nsnam/ns-3-dev-git | src/core/examples/sample-rng-plot.py | 12 | 1430 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
## @file
# @ingroup core-examples
# @ingroup randomvariable
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools.
#
# This is adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalRandomVariable()
rng.SetAttribute("Mean", ns.core.DoubleValue(100.0))
rng.SetAttribute("Variance", ns.core.DoubleValue(225.0))
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
alexvmarch/atomic | exatomic/interfaces/cube.py | 3 | 7161 | #-*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Cube File Support
##########################
Cube files contain an atomic geometry and scalar field values corresponding to
a physical quantity.
"""
import os
import six
import numpy as np
import pandas as pd
from glob import glob
from exa import Series, TypedMeta
from exatomic import __version__, Atom, Editor, AtomicField, Frame, Universe
from exatomic.base import z2sym, sym2z
class Meta(TypedMeta):
atom = Atom
frame = Frame
field = AtomicField
class Cube(six.with_metaclass(Meta, Editor)):
"""
An editor for handling cube files. Assumes scalar field values are arranged
with the x axis as the outer loop and the z axis as the inner loop.
.. code-block:: python
cube = Cube('my.cube')
cube.atom # Displays the atom dataframe
cube.field # Displays the field dataframe
cube.field_values # Displays the list of field values
uni = cube.to_universe() # Converts the cube file editor to a universe
UniverseWidget(uni) # Renders the cube file
Warning:
Be sure your cube is in atomic units.
"""
def parse_atom(self):
"""
Parse the :class:`~exatomic.atom.Atom` object from the cube file in place.
"""
nat = abs(int(self[2].split()[0]))
names = ['Z', 'Zeff', 'x', 'y', 'z']
df = self.pandas_dataframe(6, nat + 6, names)
df['symbol'] = df['Z'].map(z2sym).astype('category')
df['label'] = range(nat)
df['frame'] = 0
self.atom = Atom(df)
def parse_field(self):
"""
Parse the scalar field into an :class:`~exatomic.core.field.AtomicField`.
Note:
The :class:`~exatomic.core.field.AtomicField` tracks both the
field parameters (i.e. information about the discretization and shape of
the field's spatial points) as well as the field values (at each of
those points in space). See :meth:`~exatomic.algorithms.orbital_util.make_fps`
for more details.
"""
self.meta = {'comments': self[:2]}
typs = [int, float, float, float]
nat, ox, oy, oz = [typ(i) for typ, i in zip(typs, self[2].split())]
nx, dxi, dxj, dxk = [typ(i) for typ, i in zip(typs, self[3].split())]
ny, dyi, dyj, dyk = [typ(i) for typ, i in zip(typs, self[4].split())]
nz, dzi, dzj, dzk = [typ(i) for typ, i in zip(typs, self[5].split())]
nat, nx, ny, nz = abs(nat), abs(nx), abs(ny), abs(nz)
volstart = nat + 6
if len(self[volstart].split()) < 5:
if not len(self[volstart + 1].split()) < 5:
volstart += 1
ncol = len(self[volstart].split())
data = self.pandas_dataframe(volstart, len(self), ncol).values.ravel()
df = pd.Series({'ox': ox, 'oy': oy, 'oz': oz,
'nx': nx, 'ny': ny, 'nz': nz,
'dxi': dxi, 'dxj': dxj, 'dxk': dxk,
'dyi': dyi, 'dyj': dyj, 'dyk': dyk,
'dzi': dzi, 'dzj': dzj, 'dzk': dzk,
'frame': 0, 'label': self.label,
'field_type': self.field_type}).to_frame().T
for col in ['nx', 'ny', 'nz']:
df[col] = df[col].astype(np.int64)
for col in ['ox', 'oy', 'oz', 'dxi', 'dxj', 'dxk',
'dyi', 'dyj', 'dyk', 'dzi', 'dzj', 'dzk']:
df[col] = df[col].astype(np.float64)
fields = [Series(data[~np.isnan(data)])]
self.field = AtomicField(df, field_values=fields)
@classmethod
def from_universe(cls, uni, idx, name=None, frame=None):
"""
Make a cube file format Editor from a given field in a
:class:`~exatomic.core.universe.Universe`.
Args:
uni (:class:`~exatomic.core.universe.Universe`): a universe
idx (int): field index in :class:`~exatomic.core.field.AtomicField`
name (str): description for comment line
frame (int): frame index in :class:`~exatomic.core.atom.Atom`
"""
name = '' if name is None else name
frame = uni.atom.nframes - 1 if frame is None else frame
hdr = '{} -- written by exatomic v{}\n\n'
ffmt = ' {:> 12.6f}'
flfmt = ('{:>5}' + ffmt * 3 + '\n').format
if 'Z' not in uni.atom:
uni.atom['Z'] = uni.atom['symbol'].map(sym2z)
if 'Zeff' not in uni.atom:
uni.atom['Zeff'] = uni.atom['Z'].astype(np.float64)
frame = uni.atom[uni.atom['frame'] == frame]
for col in ['nx', 'ny', 'nz']:
uni.field[col] = uni.field[col].astype(np.int64)
field = uni.field.loc[idx]
volum = uni.field.field_values[idx]
orig = len(frame.index), field.ox, field.oy, field.oz
nx, ny, nz = field.nx, field.ny, field.nz
xdim = nx, field.dxi, field.dxj, field.dxk
ydim = ny, field.dyi, field.dyj, field.dyk
zdim = nz, field.dzi, field.dzj, field.dzk
atargs = {'float_format': '%12.6f',
'header': None, 'index': None,
'columns': ['Z', 'Zeff', 'x', 'y', 'z']}
chnk = ''.join(['{}' * 6 + '\n' for i in range(nz // 6)])
if nz % 6: chnk += '{}' * (nz % 6) + '\n'
return cls(hdr.format(name, __version__)
+ flfmt(*orig) + flfmt(*xdim)
+ flfmt(*ydim) + flfmt(*zdim)
+ uni.atom.to_string(**atargs) + '\n'
+ (chnk * nx * ny).format(*volum.apply(
ffmt.replace('f', 'E').format)))
def __init__(self, *args, **kwargs):
label = kwargs.pop("label", None)
field_type = kwargs.pop("field_type", None)
super(Cube, self).__init__(*args, **kwargs)
self.label = label
self.field_type = field_type
def uni_from_cubes(adir, verbose=False, ncubes=None, ext='cube'):
"""Put a bunch of cubes into a universe.
.. code-block:: python
uni = uni_from_cubes('/path/to/files/') # Parse all cubes matching 'files/*cube'
uni = uni_from_cubes('files/', ext='cub') # Parse all cubes matching 'files/*cub'
uni = uni_from_cubes('files/', verbose=True) # Print file names when parsing
uni = uni_from_cubes('files/', ncubes=5) # Only parse the first 5 cubes
# sorted lexicographically by file name
Args:
verbose (bool): print file names when reading cubes
ncubes (int): get only the first ncubes
ext (str): file extension of cube files
Returns:
uni (:class:`exatomic.core.universe.Universe`)
"""
if not adir.endswith(os.sep): adir += os.sep
cubes = sorted(glob(adir + '*' + ext))
if ncubes is not None:
cubes = cubes[:ncubes]
if verbose:
for cub in cubes: print(cub)
uni = Universe(atom=Cube(cubes[0]).atom)
flds = [Cube(cub).field for cub in cubes]
uni.add_field(flds)
return uni
| apache-2.0 |
qiwsir/vincent | tests/test_vega.py | 9 | 32992 | # -*- coding: utf-8 -*-
'''
Test Vincent.vega
-----------------
'''
from datetime import datetime, timedelta
from itertools import product
import time
import json
from vincent.charts import Line
from vincent.core import (grammar, GrammarClass, GrammarDict, KeyedList,
LoadError, ValidationError)
from vincent.visualization import Visualization
from vincent.data import Data
from vincent.transforms import Transform
from vincent.properties import PropertySet
from vincent.scales import DataRef, Scale
from vincent.marks import ValueRef, MarkProperties, MarkRef, Mark
from vincent.axes import AxisProperties, Axis
from vincent.legends import LegendProperties, Legend
import nose.tools as nt
import pandas as pd
import numpy as np
sequences = {
'int': range,
'float': lambda l: list(map(float, list(range(l)))),
'char': lambda l: list(map(chr, list(range(97, 97 + l)))),
'datetime': lambda l: [datetime.now() + timedelta(days=i)
for i in range(l)],
'Timestamp': lambda l: pd.date_range('1/2/2000', periods=l),
'numpy float': lambda l: list(map(np.float32, list(range(l)))),
'numpy int': lambda l: list(map(np.int32, list(range(l))))}
def test_keyed_list():
"""Test keyed list implementation"""
class TestKey(object):
"""Test object for Keyed List"""
def __init__(self, name=None):
self.name = name
key_list = KeyedList(attr_name='name')
# Basic usage
test_key = TestKey(name='test')
key_list.append(test_key)
nt.assert_equal(test_key, key_list['test'])
# Bad key
with nt.assert_raises(KeyError) as err:
key_list['test_1']
nt.assert_equal(err.exception.args[0], ' "test_1" is an invalid key')
# Repeated keys
test_key_1 = TestKey(name='test')
key_list.append(test_key_1)
with nt.assert_raises(ValidationError) as err:
key_list['test']
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0], 'duplicate keys found')
# Setting keys
key_list.pop(-1)
test_key_2 = TestKey(name='test_2')
key_list['test_2'] = test_key_2
nt.assert_equal(key_list['test_2'], test_key_2)
mirror_key_2 = TestKey(name='test_2')
key_list['test_2'] = mirror_key_2
nt.assert_equal(key_list['test_2'], mirror_key_2)
key_list[0] = mirror_key_2
nt.assert_equal(key_list[0], mirror_key_2)
# Keysetting errors
test_key_3 = TestKey(name='test_3')
with nt.assert_raises(ValidationError) as err:
key_list['test_4'] = test_key_3
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0],
"key must be equal to 'name' attribute")
key_list = KeyedList(attr_name='type')
test_key_4 = TestKey(name='test_key_4')
with nt.assert_raises(ValidationError) as err:
key_list['test_key_4'] = test_key_4
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0], 'object must have type attribute')
def test_grammar():
"""Grammar decorator behaves correctly."""
validator_fail = False
class DummyType(object):
pass
class TestGrammarClass(object):
def __init__(self):
self.grammar = GrammarDict()
@grammar
def test_grammar(value):
if validator_fail:
raise ValueError('validator failed')
@grammar(grammar_type=DummyType)
def test_grammar_with_type(value):
if validator_fail:
raise ValueError('validator failed')
@grammar(grammar_name='a name')
def test_grammar_with_name(value):
if validator_fail:
raise ValueError('validator failed')
test = TestGrammarClass()
nt.assert_is_none(test.test_grammar)
nt.assert_dict_equal(test.grammar, {})
test.test_grammar = 'testing'
nt.assert_equal(test.test_grammar, 'testing')
nt.assert_dict_equal(test.grammar, {'test_grammar': 'testing'})
del test.test_grammar
nt.assert_is_none(test.test_grammar)
nt.assert_dict_equal(test.grammar, {})
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar', 'testing')
# grammar with type checking
test = TestGrammarClass()
validator_fail = False
dummy = DummyType()
test.test_grammar_with_type = dummy
nt.assert_equal(test.test_grammar_with_type, dummy)
nt.assert_dict_equal(test.grammar, {'test_grammar_with_type': dummy})
nt.assert_raises_regexp(ValueError, 'must be DummyType', setattr, test,
'test_grammar_with_type', 'testing')
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar_with_type', dummy)
# grammar with field name
test = TestGrammarClass()
validator_fail = False
test.test_grammar_with_name = 'testing'
nt.assert_equal(test.test_grammar_with_name, 'testing')
nt.assert_dict_equal(test.grammar, {'a name': 'testing'})
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar_with_name', 'testing')
def test_grammar_dict():
"""Test Vincent Grammar Dict"""
g_dict = GrammarDict()
test = Visualization()
test_dict = {'axes': [], 'data': [], 'marks': [],
'scales': [], 'legends': []}
test_str = ('{"axes": [], "data": [], "legends": [], '
'"marks": [], "scales": []}')
nt.assert_equal(test.grammar(), test_dict)
print(json.dumps(test.grammar, sort_keys=True))
nt.assert_equal(json.dumps(test.grammar, sort_keys=True),
test_str)
nt.assert_equal(g_dict.encoder(test), test.grammar)
def assert_grammar_typechecking(grammar_types, test_obj):
"""Assert that the grammar fields of a test object are correctly
type-checked.
`grammar_types` should be a list of (name, type) pairs, and `test_obj`
should be an instance of the object to test.
"""
class BadType(object):
pass
for name, objects in grammar_types:
for obj in objects:
tmp_obj = obj()
setattr(test_obj, name, tmp_obj)
nt.assert_equal(getattr(test_obj, name), tmp_obj)
bad_obj = BadType()
nt.assert_raises_regexp(ValueError, name + '.*' + obj.__name__,
setattr, test_obj, name, bad_obj)
nt.assert_equal(getattr(test_obj, name), tmp_obj)
def assert_manual_typechecking(bad_grammar, test_obj):
"""Some attrs use the _assert_is_type func for typechecking"""
for attr, value in bad_grammar:
with nt.assert_raises(ValueError) as err:
setattr(test_obj, attr, value)
nt.assert_equal(err.expected, ValueError)
def assert_grammar_validation(grammar_errors, test_obj):
"""Check grammar methods for validation errors"""
for attr, value, error, message in grammar_errors:
with nt.assert_raises(error) as err:
setattr(test_obj, attr, value)
nt.assert_equal(err.exception.args[0], message)
class TestGrammarClass(object):
"""Test GrammarClass's built-in methods that aren't tested elsewhere"""
def test_bad_init(self):
"""Test bad initialization"""
nt.assert_raises(ValueError, GrammarClass, width=50)
def test_validation(self):
"""Test validation of grammar"""
test = Visualization()
test.axes.append({'bad axes': 'ShouldRaiseError'})
with nt.assert_raises(ValidationError) as err:
test.validate()
nt.assert_equal(err.exception.args[0],
'invalid contents: axes[0] must be Axis')
class TestVisualization(object):
"""Test the Visualization Class"""
def test_grammar_typechecking(self):
"""Visualization fields are correctly type checked"""
grammar_types = [('name', [str]),
('width', [int]),
('height', [int]),
('data', [list, KeyedList]),
('scales', [list, KeyedList]),
('axes', [list, KeyedList]),
('marks', [list, KeyedList])]
assert_grammar_typechecking(grammar_types, Visualization())
def test_validation_checking(self):
"""Visualization fields are grammar-checked"""
grammar_errors = [('width', -1, ValueError,
'width cannot be negative'),
('height', -1, ValueError,
'height cannot be negative'),
('viewport', [1], ValueError,
'viewport must have 2 dimensions'),
('viewport', [-1, -1], ValueError,
'viewport dimensions cannot be negative'),
('padding', {'top': 2}, ValueError,
('Padding must have keys "top", "left", "right",'
' "bottom".')),
('padding',
{'top': 1, 'left': 1, 'right': 1, 'bottom': -1},
ValueError, 'Padding cannot be negative.'),
('padding', -1, ValueError,
'Padding cannot be negative.')]
assert_grammar_validation(grammar_errors, Visualization())
def test_manual_typecheck(self):
"""Test manual typechecking for elements like marks"""
test_attr = [('data', [1]), ('scales', [1]),
('axes', [1]), ('marks', [1]),
('legends', [1])]
assert_manual_typechecking(test_attr, Visualization())
def test_validation(self):
"""Test Visualization validation"""
test_obj = Visualization()
with nt.assert_raises(ValidationError) as err:
test_obj.validate()
nt.assert_equal(err.exception.args[0],
'data must be defined for valid visualization')
test_obj.data = [Data(name='test'), Data(name='test')]
with nt.assert_raises(ValidationError) as err:
test_obj.validate()
nt.assert_equal(err.exception.args[0],
'data has duplicate names')
def test_axis_labeling(self):
"""Test convenience method for axis label setting"""
# With Axes already in place
test_obj = Visualization()
test_obj.axes.extend([Axis(type='x'), Axis(type='y')])
test_obj.axis_titles(x="test1", y="test2")
nt.assert_equals(test_obj.axes['x'].title, 'test1')
nt.assert_equals(test_obj.axes['y'].title, 'test2')
# With no Axes already defined
del test_obj.axes[0]
del test_obj.axes[0]
test_obj.axis_titles(x="test1", y="test2")
nt.assert_equals(test_obj.axes['x'].title, 'test1')
nt.assert_equals(test_obj.axes['y'].title, 'test2')
def test_axis_properties(self):
test_vis = Visualization()
with nt.assert_raises(ValueError) as err:
test_vis.x_axis_properties(title_size=20, label_angle=30)
nt.assert_equals(err.exception.args[0],
'This Visualization has no axes!')
test_vis.axes = [Axis(scale='x'), Axis(scale='y')]
test_vis.x_axis_properties(title_size=20, title_offset=10,
label_angle=30, color='#000')
test_vis.y_axis_properties(title_size=20, title_offset=10,
label_angle=30, color='#000')
def check_axis_colors():
for axis in test_vis.axes:
props = axis.properties
for prop in [props.title.fill, props.labels.fill]:
nt.assert_equals(getattr(prop, 'value'), '#000')
for prop in [props.axis.stroke, props.major_ticks.stroke,
props.minor_ticks.stroke, props.ticks.stroke]:
nt.assert_equals(getattr(prop, 'value'), '#000')
for axis in test_vis.axes:
props = axis.properties
nt.assert_equals(props.labels.angle.value, 30)
nt.assert_equals(props.title.font_size.value, 20)
nt.assert_equals(props.title.dy.value, 10)
check_axis_colors()
test_vis.axes = [Axis(scale='x'), Axis(scale='y')]
test_vis.common_axis_properties(color='#000')
for axis in test_vis.axes:
check_axis_colors()
def test_legends(self):
test_vis = Visualization()
test_vis.legend(title='Test', text_color='#000')
nt.assert_equals(test_vis.legends[0].title, 'Test')
nt.assert_equals(test_vis.legends[0].properties.labels.fill.value,
'#000')
nt.assert_equals(test_vis.legends[0].properties.title.fill.value,
'#000')
def test_colors(self):
test_vis = Line([1, 2, 3])
rng = ['foo', 'bar']
test_vis.colors(range_=rng)
nt.assert_equals(test_vis.scales['color'].range, rng)
def test_to_json(self):
"""Test JSON to string"""
pretty = '''{
"marks": [],
"axes": [],
"data": [],
"scales": [],
"legends": []
}'''
test = Visualization()
actual, tested = json.loads(pretty), json.loads(test.to_json())
nt.assert_dict_equal(actual, tested)
class TestData(object):
"""Test the Data class"""
def test_grammar_typechecking(self):
"""Data fields are correctly type-checked"""
grammar_types = [
('name', [str]),
('url', [str]),
('values', [list]),
('source', [str]),
('transform', [list])]
assert_grammar_typechecking(grammar_types, Data('name'))
def test_validate(self):
"""Test Data name validation"""
test_obj = Data()
del test_obj.name
nt.assert_raises(ValidationError, test_obj.validate)
def test_serialize(self):
"""Objects are serialized to JSON-compatible objects"""
def epoch(obj):
"""Convert to JS Epoch time"""
return int(time.mktime(obj.timetuple())) * 1000
types = [('test', str, 'test'),
(pd.Timestamp('2013-06-08'), int,
epoch(pd.Timestamp('2013-06-08'))),
(datetime.utcnow(), int, epoch(datetime.utcnow())),
(1, int, 1),
(1.0, float, 1.0),
(np.float32(1), float, 1.0),
(np.int32(1), int, 1),
(np.float64(1), float, 1.0),
(np.int64(1), int, 1)]
for puts, pytype, gets in types:
nt.assert_equal(Data.serialize(puts), gets)
class BadType(object):
"""Bad object for type warning"""
test_obj = BadType()
with nt.assert_raises(LoadError) as err:
Data.serialize(test_obj)
nt.assert_equals(err.exception.args[0],
'cannot serialize index of type BadType')
def test_pandas_series_loading(self):
"""Pandas Series objects are correctly loaded"""
# Test valid series types
name = ['_x', ' name']
length = [0, 1, 2]
index_key = [None, 'ix', 1]
index_types = ['int', 'char', 'datetime', 'Timestamp']
value_key = [None, 'x', 1]
value_types = ['int', 'char', 'datetime', 'Timestamp', 'float',
'numpy float', 'numpy int']
series_info = product(name, length, index_key, index_types,
value_key, value_types)
for n, l, ikey, itype, vkey, vtype in series_info:
index = sequences[itype](l)
series = pd.Series(sequences[vtype](l), index=index, name=n,)
vkey = series.name or vkey
expected = [{'idx': Data.serialize(i), 'col': vkey,
'val': Data.serialize(v)}
for i, v in zip(index, series)]
data = Data.from_pandas(series, name=n, series_key=vkey)
nt.assert_list_equal(expected, data.values)
nt.assert_equal(n, data.name)
data.to_json()
# Missing a name
series = pd.Series(np.random.randn(10))
data = Data.from_pandas(series)
nt.assert_equal(data.name, 'table')
def test_pandas_dataframe_loading(self):
# Simple columns/key_on tests
df = pd.DataFrame({'one': [1, 2, 3], 'two': [6, 7, 8],
'three': [11, 12, 13], 'four': [17, 18, 19]})
get_all = [{'col': 'four', 'idx': 0, 'val': 17},
{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'three', 'idx': 0, 'val': 11},
{'col': 'two', 'idx': 0, 'val': 6},
{'col': 'four', 'idx': 1, 'val': 18},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'three', 'idx': 1, 'val': 12},
{'col': 'two', 'idx': 1, 'val': 7},
{'col': 'four', 'idx': 2, 'val': 19},
{'col': 'one', 'idx': 2, 'val': 3},
{'col': 'three', 'idx': 2, 'val': 13},
{'col': 'two', 'idx': 2, 'val': 8}]
get1 = [{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'one', 'idx': 2, 'val': 3}]
get2 = [{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'two', 'idx': 0, 'val': 6},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'two', 'idx': 1, 'val': 7},
{'col': 'one', 'idx': 2, 'val': 3},
{'col': 'two', 'idx': 2, 'val': 8}]
getkey2 = [{'col': 'one', 'idx': 6, 'val': 1},
{'col': 'one', 'idx': 7, 'val': 2},
{'col': 'one', 'idx': 8, 'val': 3}]
getkey3 = [{'col': 'one', 'idx': 11, 'val': 1},
{'col': 'two', 'idx': 11, 'val': 6},
{'col': 'one', 'idx': 12, 'val': 2},
{'col': 'two', 'idx': 12, 'val': 7},
{'col': 'one', 'idx': 13, 'val': 3},
{'col': 'two', 'idx': 13, 'val': 8}]
val_all = Data.from_pandas(df)
val1 = Data.from_pandas(df, columns=['one'])
val2 = Data.from_pandas(df, columns=['one', 'two'])
key2 = Data.from_pandas(df, columns=['one'], key_on='two')
key3 = Data.from_pandas(df, columns=['one', 'two'], key_on='three')
nt.assert_list_equal(val_all.values, get_all)
nt.assert_list_equal(val1.values, get1)
nt.assert_list_equal(val2.values, get2)
nt.assert_list_equal(key2.values, getkey2)
nt.assert_list_equal(key3.values, getkey3)
# Missing a name
dataframe = pd.DataFrame(np.random.randn(10, 3))
data = Data.from_pandas(dataframe)
nt.assert_equal(data.name, 'table')
# Bad obj
nt.assert_raises(ValueError, Data.from_pandas, {})
def test_numpy_loading(self):
"""Numpy ndarray objects are correctly loaded"""
test_data = np.random.randn(6, 3)
index = range(test_data.shape[0])
columns = ['a', 'b', 'c']
data = Data.from_numpy(test_data, name='name', columns=columns)
ikey = Data._default_index_key
expected_values = [
{ikey: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
nt.assert_equal('name', data.name)
index_key = 'akey'
data = Data.from_numpy(test_data, name='name', columns=columns,
index_key=index_key)
expected_values = [
{index_key: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
index = ['a', 'b', 'c', 'd', 'e', 'f']
data = Data.from_numpy(test_data, name='name', index=index,
columns=columns)
expected_values = [
{ikey: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
# Bad loads
with nt.assert_raises(LoadError) as err:
Data.from_numpy(test_data, 'test', columns, index=range(4))
nt.assert_equal(err.expected, LoadError)
columns = ['a', 'b']
with nt.assert_raises(LoadError) as err:
Data.from_numpy(test_data, 'test', columns, index)
nt.assert_equal(err.expected, LoadError)
def test_from_mult_iters(self):
"""Test set of iterables"""
test1 = Data.from_mult_iters(x=[0, 1, 2], y=[3, 4, 5], z=[7, 8, 9],
idx='x')
test2 = Data.from_mult_iters(fruit=['apples', 'oranges', 'grapes'],
count=[12, 16, 54], idx='fruit')
values1 = [{'col': 'y', 'idx': 0, 'val': 3},
{'col': 'y', 'idx': 1, 'val': 4},
{'col': 'y', 'idx': 2, 'val': 5},
{'col': 'z', 'idx': 0, 'val': 7},
{'col': 'z', 'idx': 1, 'val': 8},
{'col': 'z', 'idx': 2, 'val': 9}]
values2 = [{'col': 'count', 'idx': 'apples', 'val': 12},
{'col': 'count', 'idx': 'oranges', 'val': 16},
{'col': 'count', 'idx': 'grapes', 'val': 54}]
nt.assert_list_equal(test1.values, values1)
nt.assert_list_equal(test2.values, values2)
# Iter errors
nt.assert_raises(ValueError, Data.from_mult_iters, x=[0], y=[1, 2])
def test_from_iter(self):
"""Test data from single iterable"""
test_list = Data.from_iter([10, 20, 30])
test_dict = Data.from_iter({
'apples': 10, 'bananas': 20, 'oranges': 30})
get1 = [{'col': 'data', 'idx': 0, 'val': 10},
{'col': 'data', 'idx': 1, 'val': 20},
{'col': 'data', 'idx': 2, 'val': 30}]
get2 = [{'col': 'data', 'idx': 'apples', 'val': 10},
{'col': 'data', 'idx': 'bananas', 'val': 20},
{'col': 'data', 'idx': 'oranges', 'val': 30}]
nt.assert_list_equal(test_list.values, get1)
nt.assert_list_equal(test_dict.values, get2)
def test_serialize_error(self):
"""Test serialization error"""
class badType(object):
"""I am a bad actor"""
broken = badType()
nt.assert_raises(LoadError, Data.serialize, broken)
def test_keypairs(self):
Data.keypairs([0, 10, 20, 30, 40])
Data.keypairs(((0, 1), (0, 2), (0, 3)))
Data.keypairs({'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50})
class TestTransform(object):
"""Test the Transform class"""
def test_grammar_typechecking(self):
"""Transform field typechecking"""
grammar_types = [
('fields', [list]), ('from_', [str]),
('as_', [list]), ('keys', [list]), ('sort', [str]),
('test', [str]), ('field', [str]), ('expr', [str]),
('by', [str, list]), ('value', [str]), ('median', [bool]),
('with_', [str]), ('key', [str]), ('with_key', [str]),
('links', [str]), ('size', [list]), ('iterations', [int]),
('charge', [int, str]), ('link_distance', [int, str]),
('link_strength', [int, str]), ('friction', [int, float]),
('theta', [int, float]), ('gravity', [int, float]),
('alpha', [int, float]), ('point', [str]),
('height', [str])]
assert_grammar_typechecking(grammar_types, Transform())
class TestValueRef(object):
"""Test the ValueRef class"""
def test_grammar_typechecking(self):
"""ValueRef fields are correctly type-checked"""
grammar_types = [
('value', [str]),
('value', [int]),
('value', [float]),
('field', [str]),
('scale', [str]),
('mult', [int]),
('mult', [float]),
('offset', [int]),
('offset', [float]),
('band', [bool])]
assert_grammar_typechecking(grammar_types, ValueRef())
def test_json_serialization(self):
"""ValueRef JSON is correctly serialized"""
vref = ValueRef()
nt.assert_equal(json.dumps({}), vref.to_json(pretty_print=False))
props = {
'value': 'test-value',
'band': True}
vref = ValueRef(**props)
nt.assert_equal(json.dumps(props, sort_keys=True),
vref.to_json(pretty_print=False))
props = {
'value': 'test-value',
'field': 'test-field',
'scale': 'test-scale',
'mult': 1.2,
'offset': 4,
'band': True}
vref = ValueRef(**props)
nt.assert_equal(json.dumps(props, sort_keys=True),
vref.to_json(pretty_print=False))
class TestPropertySet(object):
"""Test the PropertySet Class"""
def test_grammar_typechecking(self):
"""PropertySet fields are correctly type-checked"""
# All fields must be ValueRef for Mark properties
fields = [
'x', 'x2', 'width', 'y', 'y2', 'height', 'opacity', 'fill',
'fill_opacity', 'stroke', 'stroke_width', 'stroke_opacity',
'size', 'shape', 'path', 'inner_radius', 'outer_radius',
'start_angle', 'end_angle', 'interpolate', 'tension', 'url',
'align', 'baseline', 'text', 'dx', 'dy', 'angle', 'font',
'font_size', 'font_weight', 'font_style']
grammar_types = [(f, [ValueRef]) for f in fields]
assert_grammar_typechecking(grammar_types, PropertySet())
def test_validation_checking(self):
"""ValueRef fields are grammar-checked"""
grammar_errors = [('fill_opacity', ValueRef(value=-1), ValueError,
'fill_opacity must be between 0 and 1'),
('fill_opacity', ValueRef(value=2), ValueError,
'fill_opacity must be between 0 and 1'),
('stroke_width', ValueRef(value=-1), ValueError,
'stroke width cannot be negative'),
('stroke_opacity', ValueRef(value=-1), ValueError,
'stroke_opacity must be between 0 and 1'),
('stroke_opacity', ValueRef(value=2), ValueError,
'stroke_opacity must be between 0 and 1'),
('size', ValueRef(value=-1), ValueError,
'size cannot be negative')]
assert_grammar_validation(grammar_errors, PropertySet())
bad_shape = ValueRef(value="BadShape")
nt.assert_raises(ValueError, PropertySet, shape=bad_shape)
def test_manual_typecheck(self):
"""Test manual typechecking for elements like marks"""
test_attr = [('fill', ValueRef(value=1)),
('fill_opacity', ValueRef(value='str')),
('stroke', ValueRef(value=1)),
('stroke_width', ValueRef(value='str')),
('stroke_opacity', ValueRef(value='str')),
('size', ValueRef(value='str')),
('shape', ValueRef(value=1)),
('path', ValueRef(value=1))]
assert_manual_typechecking(test_attr, PropertySet())
class TestMarkProperties(object):
"""Test the MarkProperty Class"""
def test_grammar_typechecking(self):
"""Test grammar of MarkProperty"""
fields = ['enter', 'exit', 'update', 'hover']
grammar_types = [(f, [PropertySet]) for f in fields]
assert_grammar_typechecking(grammar_types, MarkProperties())
class TestMarkRef(object):
"""Test the MarkRef Class"""
def test_grammar_typechecking(self):
"""Test grammar of MarkRef"""
grammar_types = [('data', [str]), ('transform', [list])]
assert_grammar_typechecking(grammar_types, MarkRef())
class TestMark(object):
"""Test Mark Class"""
def test_grammar_typechecking(self):
"""Test grammar of Mark"""
grammar_types = [('name', [str]), ('description', [str]),
('from_', [MarkRef]),
('properties', [MarkProperties]), ('key', [str]),
('key', [str]), ('delay', [ValueRef]),
('ease', [str]), ('marks', [list]),
('scales', [list, KeyedList])]
assert_grammar_typechecking(grammar_types, Mark())
def test_validation_checking(self):
"""Mark fields are grammar checked"""
nt.assert_raises(ValueError, Mark, type='panda')
class TestDataRef(object):
"""Test DataRef class"""
def test_grammar_typechecking(self):
"""Test grammar of DataRef"""
grammar_types = [('data', [str]), ('field', [str])]
assert_grammar_typechecking(grammar_types, DataRef())
class TestScale(object):
"""Test Scale class"""
def test_grammar_typechecking(self):
"""Test grammar of Scale"""
grammar_types = [('name', [str]), ('type', [str]),
('domain', [list, DataRef]),
('domain_min', [float, int, DataRef]),
('domain_max', [float, int, DataRef]),
('range', [list, str]),
('range_min', [float, int, DataRef]),
('range_max', [float, int, DataRef]),
('reverse', [bool]), ('round', [bool]),
('points', [bool]), ('clamp', [bool]),
('nice', [bool, str]),
('exponent', [float, int]),
('zero', [bool])]
assert_grammar_typechecking(grammar_types, Scale())
class TestAxisProperties(object):
"""Test AxisProperties Class"""
def test_grammar_typechecking(self):
"""Test grammar of AxisProperties"""
grammar_types = [('major_ticks', [PropertySet]),
('minor_ticks', [PropertySet]),
('labels', [PropertySet]),
('axis', [PropertySet])]
assert_grammar_typechecking(grammar_types, AxisProperties())
class TestAxis(object):
"""Test Axis Class"""
def test_grammar_typechecking(self):
"""Test grammar of Axis"""
grammar_types = [('title', [str]),
('title_offset', [int]),
('grid', [bool]),
('scale', [str]),
('orient', [str]), ('format', [str]),
('ticks', [int]), ('values', [list]),
('subdivide', [int, float]),
('tick_padding', [int]), ('tick_size', [int]),
('tick_size_major', [int]),
('tick_size_minor', [int]),
('tick_size_end', [int]),
('offset', [int]),
('properties', [AxisProperties])]
assert_grammar_typechecking(grammar_types, Axis())
def test_validation_checking(self):
"""Axis fields are grammar checked"""
nt.assert_raises(ValueError, Axis, type='panda')
class TestLegendProperties(object):
"""Test LegendProperties class"""
def test_grammar_typechecking(self):
"""Test grammar of LegendProperties"""
grammar_types = [('title', [PropertySet]),
('labels', [PropertySet]),
('symbols', [PropertySet]),
('gradient', [PropertySet]),
('legend', [PropertySet])]
assert_grammar_typechecking(grammar_types, LegendProperties())
class TestLegend(object):
"""Test Legend Class"""
def test_grammar_typechecking(self):
"""Test grammar of Legend"""
grammar_types = [('size', [str]),
('shape', [str]),
('fill', [str]),
('stroke', [str]),
('title', [str]),
('format', [str]),
('values', [list]),
('properties', [LegendProperties])]
assert_grammar_typechecking(grammar_types, Legend())
def test_validation_checking(self):
"""Legend fields are grammar checked"""
nt.assert_raises(ValueError, Legend, orient='center')
| mit |
elijah513/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
madjelan/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 83 | 34544 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
GrahamDennis/xpdeint | xpdeint/xsil2graphicsParser.py | 1 | 5648 | #!/usr/bin/env python
# encoding: utf-8
"""
xsil2graphicsParser.py
Created by Joe Hope on 2009-01-06.
Modified by Thomas Antioch on 2013-07-18.
Copyright (c) 2009-2012, Joe Hope
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import getopt
from xpdeint.XSILFile import XSILFile
from xpdeint.IndentFilter import IndentFilter
# Hack for Mac OS X so it doesn't import the web rendering
# framework WebKit when Cheetah tries to import the Python
# web application framework WebKit
if sys.platform == 'darwin':
module = type(sys)
sys.modules['WebKit'] = module('WebKit')
from xpdeint.xsil2graphics2.MathematicaImport import MathematicaImport
from xpdeint.xsil2graphics2.MatlabOctaveImport import MatlabOctaveImport
from xpdeint.xsil2graphics2.PythonImport import PythonImport
from xpdeint.xsil2graphics2.RImport import RImport
# The help message printed when --help is used as an argument
help_message = '''
usage: xsil2graphics2 [options] filenames [...]
Options and arguments for xsil2graphics2:
-h : Print this message (also --help)
-o filename : This overrides the name of the output file to be generated (also --output)
-d : Debug mode (also --debug)
Options:
infile(s): required, the input xsil file or files
-h/--help: optional, display this information
-m/--matlab: optional, produce matlab output (default, also supports Octave)
-e/--mathematica: optional, produce mathematica output
-8/--octave: optional, produce octave output (identical to MATLAB output)
-p/--python: optional, produce Python/pylab/matplotlib script (HDF5 requires h5py)
-r/--R: optional, produce R output
-o/--outfile: optional, alternate output file name (one input file only)
--debug: Debug mode
For further help, please see http://www.xmds.org
'''
class Usage(Exception):
"""
Exception class used when an error occurs parsing command
line arguments.
"""
def __init__(self, msg):
self.msg = msg
def main(argv=None):
# Default to not being verbose with error messages
# If debug is true, then when an error occurs during parsing,
# the Python backtrace will be shown in addition to the XML location
# where the error occurred.
debug = False
# Import version information
from Preferences import versionString
from Version import subversionRevisionString
print "xsil2graphics2 from xmds2 version %(versionString)s (%(subversionRevisionString)s)" % locals()
# Attempt to parse command line arguments
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.gnu_getopt(argv[1:], "hm8epro:", ["help", "matlab", "octave", "mathematica", "python", "R", "outfile=", "debug"])
except getopt.error, msg:
raise Usage(msg)
userSpecifiedFilename = None
defaultExtension = None
outputTemplateClass = MatlabOctaveImport
optionList = [
("-m", "--matlab", MatlabOctaveImport),
("-8", "--octave", MatlabOctaveImport),
("-e", "--mathematica", MathematicaImport),
("-p", "--python", PythonImport),
("-r", "--R", RImport),
]
# option processing
for option, value in opts:
if option in ("-h", "--help"):
raise Usage(help_message)
if option in ("-o", "--outfile"):
userSpecifiedFilename = value
if option == '--debug':
debug = True
for shortOpt, longOpt, importClass in optionList:
if option in (shortOpt, longOpt):
outputTemplateClass = importClass
if userSpecifiedFilename and len(args) > 1:
print >> sys.stderr, "The '-o' option cannot be used when processing multiple xsil files."
if not args:
# No xsil files to process
raise Usage(help_message)
except Usage, err:
print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg)
print >> sys.stderr, "\t for help use --help"
return 2
outputTemplate = outputTemplateClass(filter=IndentFilter)
print "Generating output for %s." % outputTemplate.name
for xsilInputName in args:
# If an output name wasn't specified, construct a default
if not userSpecifiedFilename:
# Strip off the '.xsil' extension
baseName = os.path.splitext(xsilInputName)[0]
# Grab the default extension from the output template
outputFilename = baseName + '.' + outputTemplateClass.defaultExtension
else:
outputFilename = userSpecifiedFilename
print "Writing import script for '%(xsilInputName)s' to '%(outputFilename)s'." % locals()
try:
inputXSILFile = XSILFile(xsilInputName, loadData='ascii')
except IOError, err:
print >> sys.stderr, "Exception raised while trying to read xsil file:", err
if debug:
raise
return
# Now actually write the simulation to disk.
try:
file(outputFilename, 'w').write(outputTemplate.loadXSILFile(inputXSILFile))
except Exception, err:
print >> sys.stderr, 'ERROR:', err
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 |
rbharath/deepchem | deepchem/molnet/load_function/pdbbind_datasets.py | 1 | 4298 | """
PDBBind dataset loader.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import re
import time
import deepchem
import numpy as np
import pandas as pd
def featurize_pdbbind(data_dir=None, feat="grid", subset="core"):
"""Featurizes pdbbind according to provided featurization"""
tasks = ["-logKd/Ki"]
if "DEEPCHEM_DATA_DIR" in os.environ:
data_dir = os.environ["DEEPCHEM_DATA_DIR"]
else:
data_dir = "/tmp"
data_dir = os.path.join(data_dir, "pdbbind")
dataset_dir = os.path.join(data_dir, "%s_%s" % (subset, feat))
if not os.path.exists(dataset_dir):
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz'
)
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/full_grid.tar.gz'
)
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/refined_grid.tar.gz'
)
os.system('tar -zxvf ' + os.path.join(data_dir, 'core_grid.tar.gz') + ' -C '
+ data_dir)
os.system('tar -zxvf ' + os.path.join(data_dir, 'full_grid.tar.gz') + ' -C '
+ data_dir)
os.system('tar -zxvf ' + os.path.join(data_dir, 'refined_grid.tar.gz') +
' -C ' + data_dir)
return deepchem.data.DiskDataset(dataset_dir), tasks
def load_pdbbind_grid(split="random",
featurizer="grid",
subset="core",
reload=True):
"""Load PDBBind datasets. Does not do train/test split"""
if featurizer == 'grid':
dataset, tasks = featurize_pdbbind(feat=featurizer, subset=subset)
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
transformers = []
for transformer in transformers:
train = transformer.transform(train)
for transformer in transformers:
valid = transformer.transform(valid)
for transformer in transformers:
test = transformer.transform(test)
else:
if "DEEPCHEM_DATA_DIR" in os.environ:
data_dir = os.environ["DEEPCHEM_DATA_DIR"]
else:
data_dir = "/tmp"
if reload:
save_dir = os.path.join(
data_dir, "pdbbind_" + subset + "/" + featurizer + "/" + split)
dataset_file = os.path.join(data_dir, subset + "_smiles_labels.csv")
if not os.path.exists(dataset_file):
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/' +
subset + "_smiles_labels.csv")
tasks = ["-logKd/Ki"]
if reload:
loaded, all_dataset, transformers = deepchem.utils.save.load_dataset_from_disk(
save_dir)
if loaded:
return tasks, all_dataset, transformers
if featurizer == 'ECFP':
featurizer = deepchem.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = deepchem.feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = deepchem.feat.WeaveFeaturizer()
elif featurizer == 'Raw':
featurizer = deepchem.feat.RawFeaturizer()
loader = deepchem.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
transformers = [
deepchem.trans.NormalizationTransformer(
transform_y=True, dataset=dataset)
]
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'scaffold': deepchem.splits.ScaffoldSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
if reload:
deepchem.utils.save.save_dataset_to_disk(save_dir, train, valid, test,
transformers)
return tasks, (train, valid, test), transformers
| mit |
JackKelly/neuralnilm_prototype | scripts/e427.py | 2 | 6384 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
425: FF auto encoder with single appliance (Fridge)
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 2000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 256
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[100, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff=100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.8,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-3,
learning_rate_changes_by_iteration={
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'W': Normal(std=1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "rsync -uvzr --progress --exclude '.git' --exclude '.ropeproject' --exclude '*/.ipynb_checkpoints' --exclude '*/flycheck_*.py' /home/jack/workspace/python/neuralnilm/ /mnt/sshfs/imperial/workspace/python/neuralnilm/"
End:
"""
| mit |
myuuuuun/NumericalCalculation | chapter7/chap7.py | 1 | 2708 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
Copyright (c) 2015 @myuuuuun
https://github.com/myuuuuun/NumericalCalculation
This software is released under the MIT License.
"""
from __future__ import division, print_function
import math
import numpy as np
import scipy as sc
import scipy.linalg as scl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.ticker as ti
EPSIRON = 1.0e-8
def ldl_decomposition(matrix):
p, L, U = scl.lu(matrix)
size = matrix.shape[0]
D = np.zeros((size, size), dtype=np.float64)
divisor = np.ones(size, dtype=np.float64)
for i in range(size):
D[i][i] = U[i][i]
divisor = U[i][i]
U[i] /= divisor
return L, D, U
def power_method(matrix, initial_vector, repeat):
if repeat == 0:
y = matrix.dot(initial_vector).flatten()
x = initial_vector.flatten()
argmax_x = np.argmax(np.abs(x))
return y[argmax_x] / x[argmax_x]
y = matrix.dot(initial_vector)
x = y / np.linalg.norm(y)
print("*"*30)
print(repeat, "回目")
print(y)
print(x)
return power_method(matrix, x, repeat-1)
def power_method_rayleigh(matrix, initial_vector, repeat):
if repeat == 0:
y = matrix.dot(initial_vector).flatten()
x = initial_vector.flatten()
x_t = x.transpose()
return x_t.dot(matrix).dot(x) / x_t.dot(x)
y = matrix.dot(initial_vector)
x = y / np.linalg.norm(y)
return power_method_rayleigh(matrix, x, repeat-1)
def inverse_iteration(approx, matrix, initial_vector=None, repeat=2):
size = matrix.shape[0]
shifted_matrix = matrix - np.identity(size) * approx
if initial_vector is None:
initial_vector = np.ones((size, 1))
for i in range(repeat):
y = np.linalg.solve(shifted_matrix, initial_vector)
initial_vector = y / np.linalg.norm(y)
y = y.flatten()
x = initial_vector.flatten()
argmax_x = np.argmax(np.abs(x))
print(x[argmax_x])
print(y[argmax_x])
return approx + (x[argmax_x] / y[argmax_x])
if __name__ == '__main__':
"""
A = np.array([[5, 1, -1], [2, 4, -2], [1, -1, 3]])
x = np.array([[1], [1], [1]])
"""
A = np.array([[2, 1, 0, 0],
[1, 2, 1, 0],
[0, 1, 2, 1],
[0, 0, 1, 2]
], dtype=np.float64)
x = np.array([[1, 1, 1, 1]], dtype=np.float64).transpose()
"""
a = power_method(A, x, 3)
b = power_method_rayleigh(A, x, 3)
print(a)
print(b)
"""
#c = inverse_iteration(3.61765, A, x)
#print(c)
L, D, U = ldl_decomposition(A)
print(A)
print(L)
print(D)
print(U)
print(L.dot(D).dot(U))
| mit |
DonRegan/nbodykit | nbodykit/plugins/DataSource/HDF5.py | 1 | 4774 | from nbodykit.plugins import DataSource
from nbodykit.utils.pluginargparse import BoxSizeParser
import numpy
import logging
from nbodykit.utils import selectionlanguage
logger = logging.getLogger('HDF5')
def list_str(value):
return value.split()
class HDF5DataSource(DataSource):
"""
Class to read field data from a HDF5 data file
Notes
-----
* `h5py` must be installed to use this data source.
Parameters
----------
path : str
the path of the file to read the data from
dataset: list of str
For text files, one or more strings specifying the names of the data
columns. Shape must be equal to number of columns
in the field, otherwise, behavior is undefined.
For hdf5 files, the name of the pandas data group.
BoxSize : float or array_like (3,)
the box size, either provided as a single float (isotropic)
or an array of the sizes of the three dimensions
usecols : list of str, optional
if not None, only these columns will be read from file
poscols : list of str, optional
Full path to the column of the position vector
velcols : list of str, optional
Full path to the column of the velocity vector
masscols : list of str, optional
Full path to the column of the mass
rsd : [x|y|z], optional
direction to do the redshift space distortion
posf : float, optional
multiply the position data by this factor
velf : float, optional
multiply the velocity data by this factor
select : str, optional
string specifying how to select a subset of data, based
on the column names. For example, if there are columns
`type` and `mass`, you could specify
select= "type == central and mass > 1e14"
"""
field_type = "HDF5"
@classmethod
def register(kls):
h = kls.add_parser()
h.add_argument("path", help="path to file")
h.add_argument("dataset", help="name of dataset in HDF5 file")
h.add_argument("BoxSize", type=BoxSizeParser,
help="the size of the isotropic box, or the sizes of the 3 box dimensions.")
h.add_argument("-poscol", default='Position',
help="name of the position column")
h.add_argument("-velcol", default='Velocity',
help="name of the velocity column")
h.add_argument("-masscol", default=None,
help="name of the mass column, None for unit mass")
h.add_argument("-rsd", choices="xyz",
help="direction to do redshift distortion")
h.add_argument("-posf", default=1., type=float,
help="factor to scale the positions")
h.add_argument("-velf", default=1., type=float,
help="factor to scale the velocities")
h.add_argument("-select", default=None, type=selectionlanguage.Query,
help='row selection based on conditions specified as string')
def read(self, columns, comm, bunchsize):
if comm.rank == 0:
try:
import h5py
except:
raise ImportError("h5py must be installed to use HDF5 reader")
dataset = h5py.File(self.path, mode='r')[self.dataset]
data = dataset[...]
nobj = len(data)
# select based on input conditions
if self.select is not None:
mask = self.select.get_mask(data)
data = data[mask]
logger.info("total number of objects selected is %d / %d" % (len(data), nobj))
# get position and velocity, if we have it
pos = data[self.poscol].astype('f4')
pos *= self.posf
if self.velcol is not None:
vel = data[self.velcol].astype('f4')
vel *= self.velf
else:
vel = numpy.zeros(nobj, dtype=('f4', 3))
if self.masscol is not None:
mass = data[self.masscol]
else:
pos = numpy.empty(0, dtype=('f4', 3))
vel = numpy.empty(0, dtype=('f4', 3))
mass = numpy.empty(0, dtype='f4')
if self.masscol is None:
mass = None
P = {}
if 'Position' in columns:
P['Position'] = pos
if 'Velocity' in columns or self.rsd is not None:
P['Velocity'] = vel
if 'Mass' in columns:
P['Mass'] = mass
if self.rsd is not None:
dir = "xyz".index(self.rsd)
P['Position'][:, dir] += P['Velocity'][:, dir]
P['Position'][:, dir] %= self.BoxSize[dir]
yield [P[key] for key in columns]
| gpl-3.0 |
amaurywalbert/twitter | net_structure/multilayer/unweighted_directed/hashmap_plot_nodes_correlation_v2.py | 1 | 6546 | # -*- coding: latin1 -*-
################################################################################################
#
#
import calc
import sys, time, json, os, os.path
import numpy as np
from math import*
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib as mpl
import pylab
import numpy as np
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
import pandas_datareader
from pandas_datareader import data, wb
from pandas import Series, DataFrame
pd.__version__
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 1 - Plotar os dados de acordo com as métricas e propriedades calculadas nas redes Multilayer - Nodes
## Versão 2 - Imprime na tela o desvio padrão entre os pares de layers
##
##
## ID_ego a:amigos s:seguidores r:retuítes l:likes m:menções
##
## ID_ego as:data sr:data rl:data lm:data ma:data - TXT
## {ID_ego:{ as:data sr:data rl:data lm:data ma:data} - JSON
######################################################################################################################################################################
######################################################################################################################################################################
#
# Cria diretórios
#
######################################################################################################################################################################
def create_dirs(x):
if not os.path.exists(x):
os.makedirs(x)
#####################################################################################################################################################################
# Color Bar - Correlation Matrix
######################################################################################################################################################################
def color_bar(_aa,_as,_ar,_al,_am,_ss,_sr,_sl,_sm,_rr,_rl,_rm,_ll,_lm,_mm,output):
print ("\nCriando Matriz de Correlação...")
print ("Salvando dados em: "+str(output)+"\n")
_rs=_sr
_ml=_lm
_ma=_am
_la=_al
_sa=_as
_ra=_ar
_ls=_sl
_ms=_sm
_lr=_rl
_mr=_rm
raw_data = {'Follow': [_aa,_ar,_al,_am],
'Retweet': [_ra,_rr,_rl,_rm],
'Like': [_la,_lr,_ll,_lm],
'Mention': [_ma,_mr,_ml,_mm]
}
df = pd.DataFrame(raw_data, columns = ['Follow','Retweet','Like','Mention'])
print df
plt.matshow(df,cmap=plt.cm.get_cmap('Blues', 10))
# plt.matshow(df,cmap=plt.cm.get_cmap('gray_r', 10)) #10 tonalidades
plt.xticks(range(len(df.columns)), df.columns,rotation=30,size=9)
plt.yticks(range(len(df.columns)), df.columns,rotation=30,size=9)
plt.colorbar()
for (i, j), z in np.ndenumerate(df): #Show values in the grid
plt.text(j, i, '{:0.2f}'.format(z), ha='center', va='center',bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.9'),size=8)
name = "nodes_correlation"
plt.savefig(output+name+".png",bbox_inches='tight',dpi=300)
plt.show()
plt.close()
print (" - OK! Color Bar salvo em: "+str(output))
print
######################################################################################################################################################################
#
# Plotar Gŕaficos relacionados aos dados
#
######################################################################################################################################################################
def prepare(metric,file,output):
with open(file,'r') as f:
data = json.load(f)
print data
color_bar(data['aa']['pearson'],data['as']['pearson'],data['ar']['pearson'],data['al']['pearson'],data['am']['pearson'],data['ss']['pearson'],data['sr']['pearson'],data['sl']['pearson'],data['sm']['pearson'],data['rr']['pearson'],data['rl']['pearson'],data['rm']['pearson'],data['ll']['pearson'],data['lm']['pearson'],data['mm']['pearson'],output_dir)
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
os.system('clear')
print "################################################################################"
print" "
print" Plotar gráficos sobre as métricas e propriedades calculadas - Multilayer "
print" "
print"#################################################################################"
print
metric = "nodes_correlation"
if not os.path.exists(str(data_dir)+str(metric)+".json"): # Verifica se diretório existe
print ("Impossível localizar arquivo: "+str(data_dir)+str(metric)+".json")
else:
file = str(data_dir)+str(metric)+".json"
output =str(output_dir)+str(metric)+"/"
create_dirs(output)
prepare(metric,file,output)
print("\n######################################################################\n")
print("Script finalizado!")
print("\n######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
data_dir = "/home/amaury/Dropbox/net_structure_hashmap/multilayer/graphs_with_ego/unweighted_directed/json/" # Diretório com arquivos JSON com métricas e propriedades Calculadas
output_dir = "/home/amaury/Dropbox/net_structure_hashmap_statistics/multilayer/graphs_with_ego/unweighted_directed/" # Diretório para Salvar os gráficos...
#Executa o método main
if __name__ == "__main__": main() | gpl-3.0 |
xubenben/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
jaantollander/Fourier-Legendre | src/plotting/convergence_plotting.py | 8 | 3846 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.widgets import Button
from src_legacy.analysis.convergence import max_slope
from src_legacy.io.load.load import LoadCsv
from src_legacy.other.settings import timeit
class ConvergencePlot:
"""
Interactive plot for results.
http://bastibe.de/2013-05-30-speeding-up-matplotlib.html
http://stackoverflow.com/questions/29277080/efficient-matplotlib-redrawing
"""
def __init__(self, filename, function):
self.initial = True # Flag
loads = LoadCsv(filename, function)
self.function = function
self.index = loads.errors.index.values
self.errors = loads.errors
self.a = loads.inputs[0]
self.x = loads.inputs[1]
self.fig, self.ax = plt.subplots(figsize=(10, 8))
self.fig.subplots_adjust(bottom=0.2)
self.ax.set(ylim=(10 ** -6, np.max(self.errors.values) + 0.1),
xlim=(self.index.min(), self.index.max()),
xlabel=r'$ p $',
ylabel=r'$ \varepsilon $')
self.line, = self.ax.loglog([], [], lw=1, marker='*')
self.cline, = self.ax.loglog([], [], lw=2, marker='*')
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
# rect = [left, bottom, width, height] in normalized (0, 1) units
xprev = plt.axes([0.44, 0.05, 0.1, 0.075])
xnext = plt.axes([0.56, 0.05, 0.1, 0.075])
bnext = Button(xnext, r'$ x \Rightarrow $')
bprev = Button(xprev, r'$ \Leftarrow x $')
bnext.on_clicked(self.xnext)
bprev.on_clicked(self.xprev)
aprev = plt.axes([0.91, 0.45, 0.08, 0.075])
anext = plt.axes([0.91, 0.55, 0.08, 0.075])
cnext = Button(anext, r'$ a \Rightarrow $')
cprev = Button(aprev, r'$ \Leftarrow a $')
cnext.on_clicked(self.anext)
cprev.on_clicked(self.aprev)
self.index_x = 0
self.index_a = 0
self.line.set_xdata(self.index)
self.draw()
@timeit
def draw(self):
"""
Redraw the axis
"""
a_ = self.a[self.index_a]
x_ = self.x[self.index_x]
data = self.errors[str(a_)]
data = data[str(x_)]
mask = max_slope(data)
convergence = data.iloc[mask]
self.line.set_ydata(data.values)
self.cline.set_data(convergence.index.values, convergence.values)
self.ax.set_title(r'{function}: '.format(function=self.function) +
r'$ a: {}\approx {:.4f} $, '.format(a_, float(a_)) +
r'$ x: {}\approx {:.4f} $ '.format(x_, float(x_)))
if self.initial:
self.fig.canvas.draw()
self.initial = False
plt.show()
else:
self.fig.canvas.restore_region(self.background)
self.ax.draw_artist(self.ax.patch)
self.ax.draw_artist(self.line)
self.ax.draw_artist(self.cline)
# self.fig.canvas.update()
# self.fig.canvas.flush_events()
self.fig.canvas.blit(self.ax.bbox)
def xnext(self, event):
if self.index_x < len(self.x) - 1:
self.index_x += 1
self.draw()
def xprev(self, event):
if self.index_x > -len(self.x):
self.index_x -= 1
self.draw()
def anext(self, event):
if self.index_a < len(self.a) - 1:
self.index_a += 1
self.draw()
def aprev(self, event):
if self.index_a > 0:
self.index_a -= 1
self.draw()
sns.set()
ConvergencePlot('100000_391_1', 'step_function')
| mit |
petebachant/ACSpy | examples/acsc_pvt_test.py | 1 | 1333 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 20:36:34 2013
This file calls function from the ACS C library wrapper
@author: Pete
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import acsc
import time
axes = {'y':0, 'z':1, 'turbine':4, 'tow':5}
axis = 5
acc = 1
flags = 0
vel = 1
target = 2
hc = acsc.OpenCommDirect()
if hc == acsc.INVALID:
print("Cannot connect to controller, error", acsc.GetLastError())
else:
acsc.Enable(hc, axis)
time.sleep(0.1)
state = acsc.GetMotorState(hc, axis, acsc.SYNCHRONOUS)
acsc.SetVelocity(hc, axis, vel)
acsc.SetAcceleration(hc, axis, acc)
acsc.SetDeceleration(hc, axis, acc)
position = acsc.GetRPosition(hc, axis)
pvec = [position]
tvec = [time.time()]
acsc.ToPoint(hc, flags, axis, target, acsc.SYNCHRONOUS)
while position != target:
time.sleep(0.1)
position = acsc.GetRPosition(hc, axis, acsc.SYNCHRONOUS)
pvec.append(position)
tvec.append(time.time())
print("Axis", axis, "is", acsc.GetAxisState(hc, axis))
pvec = np.asarray(pvec)
tvec = np.asarray(tvec) - tvec[0]
print("Generating plot")
plt.close('all')
plt.plot(tvec, pvec)
acsc.CloseComm(hc)
| mit |
MTgeophysics/mtpy | mtpy/gui/tstools/tsscene.py | 1 | 15263 |
from PyQt5.QtWidgets import QGraphicsScene
from PyQt5.QtGui import QMouseEvent
from PyQt5.QtGui import QPen
from PyQt5 import QtGui
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5 import QtWidgets
import matplotlib
matplotlib.use('Agg')
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from matplotlib.backends.backend_qt5agg import FigureCanvas
import matplotlib.pyplot as plt
from .tsdata import TSData
from obspy.core.trace import Trace
from obspy.core.stream import Stream
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import QTimeLine
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QListWidget
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QVBoxLayout
from obspy.core.utcdatetime import UTCDateTime
from datetime import datetime
from multiprocessing import Queue
import numpy as np
import time
import re
class TSScene(QGraphicsScene):
starttimechanged = pyqtSignal(str)
endtimechanged = pyqtSignal(str)
def __init__(self, parent, width=14, height=12, numofchannel=6):
super(TSScene, self).__init__(parent)
# set waveform windows
figure = Figure()
figure.set_size_inches(width, height)
self.graphwidth = figure.dpi * width
self.canvas = FigureCanvas(figure)
self.addWidget(self.canvas)
self.canvas.mpl_connect('button_press_event',self.button_press_event)
self.canvas.mpl_connect('button_release_event', self.button_release_event)
self.canvas.mpl_connect('motion_notify_event', self.motion_notify_event)
self.canvas.mpl_connect('scroll_event', self.scroll_event)
self.axesavailability = [True for i in range(numofchannel)]
self.axes = []
for i in range(numofchannel):
self.axes.append(figure.add_subplot(str(numofchannel)+'1'+str(i+1)))
# set backend data model
self.data = TSData()
self.visibleWave = {}
self.starttime = None
self.endtime = None
# prepare for user input
self.downxcoord = None
self.wheelactive = False
self.rect = None
self.installEventFilter(self)
self.showgap = False
self.downbutton = None
self.currentxdata = None
self.count = 0
self.state = 'ready'
self.timeline = QTimeLine(1)
self.timeline.setCurrentTime(0)
self.timeline.setUpdateInterval(1)
self.timeline.finished.connect(self.timeshift)
self.timeline.finished.connect(self.animfinished)
def animfinished(self):
self.state = 'ready'
self.timeline.setCurrentTime(0)
def togglegap(self):
self.showgap = ~self.showgap
tmplist = self.visibleWave.copy()
for wave in tmplist:
self.refreshwave(wave,tmplist[wave][1])
# self.togglewave(wave)
# self.togglewave(wave, tmplist[wave][1])
def applytime(self, start: str, end: str):
if self.data is None:
return
for wave in self.visibleWave:
if start<self.visibleWave[wave][3]:
start = self.visibleWave[wave][3]
if end>self.visibleWave[wave][4]:
end = self.visibleWave[wave][4]
self.starttime = UTCDateTime(start)
self.endtime = UTCDateTime(end)
print((self.starttime, self.endtime, '-----------------'))
tmplist = self.visibleWave.copy()
for wave in tmplist:
self.refreshwave(wave, tmplist[wave][1])
# self.togglewave(wave)
# self.togglewave(wave, tmplist[wave][2])
def loadfile(self, filename: str):
self.data.loadFile(filename)
def getlist(self):
return self.data.getlist()
def getsegments(self, item: object):
waves = self.data.getsegments(item.text(0))
wavelist = QListWidget()
for w in waves:
wavelist.addItem(w)
# print(w)
wavelist.itemDoubleClicked.connect(self.segmentselected)
wavelistwindowlayout = QVBoxLayout()
wavelistwindowlayout.addWidget(wavelist)
self.wavelistwindow = QDialog(self.parent())
self.wavelistwindow.setWindowTitle('segments')
self.wavelistwindow.setLayout(wavelistwindowlayout)
self.wavelistwindow.resize(800,600)
self.wavelistwindow.show()
self.segmentsource = item.text(0)
self.currentitem = item
def segmentselected(self, segment: str):
matches = re.match(r'[^ ]+ \| ([^ ]+) - ([^ ]+) \| .*', segment.text(), flags=0)
start = UTCDateTime(matches.group(1))
end = UTCDateTime(matches.group(2))
print(start)
print(end)
if self.segmentsource in self.visibleWave:
self.applytime(start, end)
else:
self.starttime = start
self.endtime = end
print((self.segmentsource))
self.togglewave(self.segmentsource)
self.currentitem.setSelected(True)
def refreshwave(self, wave: str, colorcode:int=0):
if wave in self.visibleWave:
axes, lines, _, _, _, _ = self.visibleWave[wave]
self.removewave(axes, lines)
self.visibleWave.pop(wave, None)
channelid = self.axes.index(axes)
self.axesavailability[channelid] = True
waveform, wavename, starttime, endtime, gaps = self.data.getwaveform(wave, self.starttime, self.endtime)
axes, lines = self.displaywave(wavename, waveform, gaps)
if axes is not None:
self.visibleWave[wave] = (axes, lines, colorcode, starttime, endtime, gaps)
def hidewave(self, wave: str, colorcode:int=0):
if wave in self.visibleWave:
axes, lines, _, _, _, _ = self.visibleWave[wave]
self.removewave(axes, lines)
self.visibleWave.pop(wave, None)
channelid = self.axes.index(axes)
self.axesavailability[channelid] = True
if len(self.visibleWave)==0:
self.starttime = None
self.endtime = None
return True
def showwave(self, wave: str, starttime=None, endtime=None):
if starttime is None or endtime is None:
if wave in self.visibleWave:
pass
else:
self.togglewave(wave)
else:
self.starttime = starttime
self.endtime = endtime
tmplist = self.visibleWave.copy()
for wave in tmplist:
self.refreshwave(wave, tmplist[wave][1])
if wave not in self.visibleWave:
self.togglewave(wave)
def togglewave(self, wave: str, colorcode:int=0):
if wave in self.visibleWave:
axes, lines, _, _, _, _ = self.visibleWave[wave]
self.removewave(axes, lines)
self.visibleWave.pop(wave, None)
channelid = self.axes.index(axes)
self.axesavailability[channelid] = True
if len(self.visibleWave)==0:
self.starttime = None
self.endtime = None
else:
# print(wave)
waveform, wavename, starttime, endtime, gaps = self.data.getwaveform(wave, self.starttime, self.endtime)
print((starttime, endtime))
axes, lines = self.displaywave(wavename, waveform, gaps)
if axes is not None:
self.visibleWave[wave] = (axes, lines, colorcode, starttime, endtime, gaps)
#print("togglewave:", starttime, endtime)
def displaywave(self, wavename: str, waveform: np.array, gaps, colorcode: int=None):
if True not in self.axesavailability:
return None, None
else:
location = self.axesavailability.index(True)
axes = self.axes[location]
self.axesavailability[location] = False
if wavename is not None and waveform is not None:
if colorcode is None:
colorcode = 'C'+str(location%10)
times = waveform[0,:]
span = round(len(times)/4)
if span<1:
span = 1
axes.set_xticks(times[::span])
axes.set_xticklabels([UTCDateTime(t).strftime("%Y-%m-%d %H:%M:%S") for t in times[::span]])
lines = axes.plot(times, waveform[1,:],linestyle="-", label=wavename, color=colorcode)
if self.showgap:
for g in gaps:
if g[4].timestamp>=times[0] and g[5].timestamp<times[-1]:
axes.axvspan(g[4],g[5],facecolor='0.2',alpha=0.5)
axes.legend()
self.canvas.draw()
if self.endtime is not None and self.starttime is not None and len(times)>0:
timewindow = self.endtime-self.starttime
if abs(times[0]-times[-1]-timewindow)/timewindow<0.1:
self.starttime = UTCDateTime(times[0])
self.endtime = self.starttime + timewindow
elif len(times)>0:
self.starttime = UTCDateTime(times[0])
self.endtime = UTCDateTime(times[-1])
self.starttimechanged.emit(self.starttime.strftime("%Y-%m-%d %H:%M:%S"))
self.endtimechanged.emit(self.endtime.strftime("%Y-%m-%d %H:%M:%S"))
return axes, lines
else:
lines = None
axes.legend([wavename])
return axes, lines
def removewave(self, axes: Axes, lines: Line2D):
if lines is not None:
lines.pop(0).remove()
axes.relim()
axes.autoscale_view(True, True, True)
axes.clear()
self.canvas.draw()
def timeshift(self):
if self.downxcoord is None or self.currentxdata is None:
return
shift = self.downxcoord-self.currentxdata
if shift == 0:
print('skipped')
return
if self.starttime is None:
return
starttime = self.starttime + shift
endtime = self.endtime + shift
for wave in self.visibleWave:
if starttime<self.visibleWave[wave][3]:
starttime = self.visibleWave[wave][3]
if endtime>self.visibleWave[wave][4]:
endtime = self.visibleWave[wave][4]
if starttime!=self.starttime and endtime!=self.endtime:
self.starttime = starttime
self.endtime = endtime
tmplist = self.visibleWave.copy()
for wave in tmplist:
self.refreshwave(wave, tmplist[wave][1])
# self.togglewave(wave)
# self.togglewave(wave, tmplist[wave][2])
return
def timescale(self, delta: float):
if self.starttime is None:
return
shift = (self.endtime - self.starttime) * -delta*0.1
starttime = self.starttime + shift
endtime = self.endtime - shift
for wave in self.visibleWave:
if starttime<self.visibleWave[wave][3]:
starttime = self.starttime
if endtime>self.visibleWave[wave][4]:
endtime = self.endtime
if endtime-starttime<0.1:
pass
elif starttime==self.starttime and endtime==self.endtime:
pass
else:
self.starttime = starttime
self.endtime = endtime
tmplist = self.visibleWave.copy()
for wave in tmplist:
self.refreshwave(wave, tmplist[wave][1])
# self.togglewave(wave)
# self.togglewave(wave, tmplist[wave][1])
def button_press_event(self, event):
if self.starttime is None:
return
self.downxcoord = event.xdata
self.downx = event.x
self.downbutton = event.button
self.count = 0
def motion_notify_event(self, event):
# print(event.button, self.starttime, self.downbutton, self.downxcoord, event.xdata)
self.count += 1
self.currentxdata = event.xdata
#print(self.currentxdata,"+" * 10)
if self.starttime is None:
return
elif self.downxcoord is not None:
if self.downbutton == 1 and self.timeline.currentTime()==0:
self.state = 'busy'
self.timeline.start()
elif self.downbutton == 1:
pass
elif self.downbutton == 3:
if self.rect is not None:
self.removeItem(self.rect)
if self.downx < event.x:
self.rect = self.addRect(self.downx, 0, event.x - self.downx, self.height(), pen=QPen(Qt.red))
else:
self.rect = self.addRect(event.x, 0, self.downx - event.x, self.height(), pen=QPen(Qt.red))
def button_release_event(self, event):
if self.starttime is None:
return
if event.button == 3:
left = 225
right = 1215
if self.downxcoord < event.xdata:
start = self.downxcoord
end = event.xdata
else:
start = event.xdata
end = self.downxcoord
start = UTCDateTime(start)
end = UTCDateTime(end)
print((start,end,'================'))
self.applytime(start, end)
# self.downx = None
self.downbutton = None
self.removeItem(self.rect)
self.rect = None
self.downxcoord = None
self.currentxdata = None
#print(self.count,'count!!!!!!!!')
self.count=0
def scroll_event(self, event):
delta = -event.step
if self.wheelactive==False and event.xdata>= self.starttime and event.xdata<= self.endtime:
self.wheelactive = True
self.timescale(delta)
self.wheelactive = False
def exportmetadata(self, filename: tuple):
wavelist = self.getlist()
outfile = open(filename[0]+'.txt','w')
for network in wavelist:
for station in wavelist[network]:
for wave in wavelist[network][station]:
for w in wavelist[network][station][wave]:
outfile.write("%s\n\n" % w)
outfile.close()
def exportwaveform(self, filename: tuple):
traces = []
for wave in self.visibleWave:
fill_value = 'last'
waveform, wavename, starttime, endtime, gaps = self.data.readdisc(wave, self.starttime, self.endtime, resample=False, fill_value=fill_value)
traces.append(waveform)
stream = Stream(traces=traces)
if 'MSEED' in filename[1]:
stream.write(filename[0] + ".mseed", format='MSEED')
elif 'txt' in filename[1]:
stream.write(filename[0] + ".txt", format='TSPAIR')
def gettimeboundary(self):
return self.starttime, self.endtime
return False | gpl-3.0 |
vibhorag/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 56 | 37976 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/discrete/tests/test_discrete.py | 8 | 55883 | """
Tests for discrete models
Notes
-----
DECIMAL_3 is used because it seems that there is a loss of precision
in the Stata *.dta -> *.csv output, NOT the estimator for the Poisson
tests.
"""
# pylint: disable-msg=E1101
from statsmodels.compat.python import range
import os
import numpy as np
from numpy.testing import (assert_, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_allclose,
assert_array_less)
from statsmodels.discrete.discrete_model import (Logit, Probit, MNLogit,
Poisson, NegativeBinomial)
from statsmodels.discrete.discrete_margins import _iscount, _isdummy
import statsmodels.api as sm
import statsmodels.formula.api as smf
from nose import SkipTest
from .results.results_discrete import Spector, DiscreteL1, RandHIE, Anes
from statsmodels.tools.sm_exceptions import PerfectSeparationError
try:
import cvxopt
has_cvxopt = True
except ImportError:
has_cvxopt = False
try:
from scipy.optimize import basinhopping
has_basinhopping = True
except ImportError:
has_basinhopping = False
DECIMAL_14 = 14
DECIMAL_10 = 10
DECIMAL_9 = 9
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class CheckModelResults(object):
"""
res2 should be the test results from RModelWrap
or the results as defined in model_results_data
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_allclose(self.res1.conf_int(), self.res2.conf_int, rtol=8e-5)
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4)
def pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
# def test_cov_params(self):
# assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
# DECIMAL_4)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
def test_llnull(self):
assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3)
def test_llr_pvalue(self):
assert_almost_equal(self.res1.llr_pvalue, self.res2.llr_pvalue,
DECIMAL_4)
def test_normalized_cov_params(self):
pass
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_dof(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.model.predict(self.res1.params),
self.res2.phat, DECIMAL_4)
def test_predict_xb(self):
assert_almost_equal(self.res1.model.predict(self.res1.params,
linear=True),
self.res2.yhat, DECIMAL_4)
def test_loglikeobs(self):
#basic cross check
llobssum = self.res1.model.loglikeobs(self.res1.params).sum()
assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14)
def test_jac(self):
#basic cross check
jacsum = self.res1.model.score_obs(self.res1.params).sum(0)
score = self.res1.model.score(self.res1.params)
assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ?
class CheckBinaryResults(CheckModelResults):
def test_pred_table(self):
assert_array_equal(self.res1.pred_table(), self.res2.pred_table)
def test_resid_dev(self):
assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev,
DECIMAL_4)
def test_resid_generalized(self):
assert_almost_equal(self.res1.resid_generalized,
self.res2.resid_generalized, DECIMAL_4)
def smoke_test_resid_response(self):
self.res1.resid_response
class CheckMargEff(object):
"""
Test marginal effects (margeff) and its options
"""
def test_nodummy_dydxoverall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydx_se, DECIMAL_4)
def test_nodummy_dydxmean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4)
def test_nodummy_dydxmedian(self):
me = self.res1.get_margeff(at='median')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4)
def test_nodummy_dydxzero(self):
me = self.res1.get_margeff(at='zero')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
def test_nodummy_dyexoverall(self):
me = self.res1.get_margeff(method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyex_se, DECIMAL_4)
def test_nodummy_dyexmean(self):
me = self.res1.get_margeff(at='mean', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4)
def test_nodummy_dyexmedian(self):
me = self.res1.get_margeff(at='median', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4)
def test_nodummy_dyexzero(self):
me = self.res1.get_margeff(at='zero', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4)
def test_nodummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydx_se, DECIMAL_4)
def test_nodummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4)
def test_nodummy_eydxmedian(self):
me = self.res1.get_margeff(at='median', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4)
def test_nodummy_eydxzero(self):
me = self.res1.get_margeff(at='zero', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4)
def test_nodummy_eyexoverall(self):
me = self.res1.get_margeff(method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyex_se, DECIMAL_4)
def test_nodummy_eyexmean(self):
me = self.res1.get_margeff(at='mean', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4)
def test_nodummy_eyexmedian(self):
me = self.res1.get_margeff(at='median', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4)
def test_nodummy_eyexzero(self):
me = self.res1.get_margeff(at='zero', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4)
def test_dummy_dydxoverall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydx_se, DECIMAL_4)
def test_dummy_dydxmean(self):
me = self.res1.get_margeff(at='mean', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydxmean_se, DECIMAL_4)
def test_dummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydx_se, DECIMAL_4)
def test_dummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydxmean_se, DECIMAL_4)
def test_count_dydxoverall(self):
me = self.res1.get_margeff(count=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydx_se, DECIMAL_4)
def test_count_dydxmean(self):
me = self.res1.get_margeff(count=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydxmean_se, DECIMAL_4)
def test_count_dummy_dydxoverall(self):
me = self.res1.get_margeff(count=True, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4)
def test_count_dummy_dydxmean(self):
me = self.res1.get_margeff(count=True, dummy=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4)
class TestProbitNewton(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
#def test_predict(self):
# assert_almost_equal(self.res1.model.predict(self.res1.params),
# self.res2.predict, DECIMAL_4)
class TestProbitBFGS(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="bfgs",
disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
class TestProbitNM(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="nm",
disp=0, maxiter=500)
class TestProbitPowell(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="powell",
disp=0, ftol=1e-8)
class TestProbitCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
# fmin_cg fails to converge on some machines - reparameterize
from statsmodels.tools.transform_model import StandardizeTransform
transf = StandardizeTransform(data.exog)
exog_st = transf(data.exog)
res1_st = Probit(data.endog,
exog_st).fit(method="cg", disp=0, maxiter=1000,
gtol=1e-08)
start_params = transf.transform_params(res1_st.params)
assert_allclose(start_params, res2.params, rtol=1e-5, atol=1e-6)
cls.res1 = Probit(data.endog,
data.exog).fit(start_params=start_params,
method="cg", maxiter=1000,
gtol=1e-05, disp=0)
assert_array_less(cls.res1.mle_retvals['fcalls'], 100)
class TestProbitNCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="ncg",
disp=0, avextol=1e-8,
warn_convergence=False)
# converges close enough but warnflag is 2 for precision loss
class TestProbitBasinhopping(CheckBinaryResults):
@classmethod
def setupClass(cls):
if not has_basinhopping:
raise SkipTest("Skipped TestProbitBasinhopping since"
" basinhopping solver is not available")
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
fit = Probit(data.endog, data.exog).fit
cls.res1 = fit(method="basinhopping", disp=0, niter=5,
minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8})
class CheckLikelihoodModelL1(object):
"""
For testing results generated with L1 regularization
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(
self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_nnz_params(self):
assert_almost_equal(
self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4)
def test_aic(self):
assert_almost_equal(
self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(
self.res1.bic, self.res2.bic, DECIMAL_3)
class TestProbitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0]
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.probit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestMNLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0]
alpha[-1,:] = 0
cls.res1 = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10, disp=0)
res2 = DiscreteL1()
res2.mnlogit()
cls.res2 = res2
class TestLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0]
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=cls.alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.logit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestCVXOPT(object):
@classmethod
def setupClass(self):
self.data = sm.datasets.spector.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=True)
def test_cvxopt_versus_slsqp(self):
#Compares resutls from cvxopt to the standard slsqp
if has_cvxopt:
self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0]
res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,
trim_mode='auto')
res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1_cvxopt_cp", alpha=self.alpha, disp=0, abstol=1e-10,
trim_mode='auto', auto_trim_tol=0.01, maxiter=1000)
assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4)
else:
raise SkipTest("Skipped test_cvxopt since cvxopt is not available")
class TestSweepAlphaL1(object):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.model = Logit(data.endog, data.exog)
cls.alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5],
[0.5, 0.5, 1, 1]]) #/ data.exog.shape[0]
cls.res1 = DiscreteL1()
cls.res1.sweep()
def test_sweep_alpha(self):
for i in range(3):
alpha = self.alphas[i, :]
res2 = self.model.fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-10,
trim_mode='off', maxiter=1000)
assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4)
class CheckL1Compatability(object):
"""
Tests compatability between l1 and unregularized by setting alpha such
that certain parameters should be effectively unregularized, and others
should be ignored by the model.
"""
def test_params(self):
m = self.m
assert_almost_equal(
self.res_unreg.params[:m], self.res_reg.params[:m], DECIMAL_4)
# The last entry should be close to zero
# handle extra parameter of NegativeBinomial
kvars = self.res_reg.model.exog.shape[1]
assert_almost_equal(0, self.res_reg.params[m:kvars], DECIMAL_4)
def test_cov_params(self):
m = self.m
# The restricted cov_params should be equal
assert_almost_equal(
self.res_unreg.cov_params()[:m, :m],
self.res_reg.cov_params()[:m, :m],
DECIMAL_1)
def test_df(self):
assert_equal(self.res_unreg.df_model, self.res_reg.df_model)
assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid)
def test_t_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
t_unreg = self.res_unreg.t_test(np.eye(len(self.res_unreg.params)))
t_reg = self.res_reg.t_test(np.eye(kvars + extra))
assert_almost_equal(t_unreg.effect[:m], t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd[:m], t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_allclose(t_unreg.tvalue[:m], t_reg.tvalue[:m], atol=3e-3)
assert_almost_equal(np.nan, t_reg.tvalue[m])
def test_f_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
f_unreg = self.res_unreg.f_test(np.eye(len(self.res_unreg.params))[:m])
f_reg = self.res_reg.f_test(np.eye(kvars + extra)[:m])
assert_allclose(f_unreg.fvalue, f_reg.fvalue, rtol=3e-5, atol=1e-3)
assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)
def test_bad_r_matrix(self):
kvars = self.kvars
assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) )
class TestPoissonL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.Poisson(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
class TestNegativeBinomialL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0)
rand_exog = sm.add_constant(rand_exog_st, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1)
alpha[:cls.m] = 0
alpha[-1] = 0 # don't penalize alpha
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog)
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
cls.k_extra = 1 # 1 extra parameter in nb2
class TestNegativeBinomialGeoL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI,
loglike_method='geometric')
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last columns
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog,
loglike_method='geometric')
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
assert_equal(mod_reg.loglike_method, 'geometric')
class TestLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class TestMNLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit(
disp=0, tol=1e-15, method='bfgs', maxiter=1000)
def test_t_test(self):
m = self.m
kvars = self.kvars
t_unreg = self.res_unreg.t_test(np.eye(m))
t_reg = self.res_reg.t_test(np.eye(kvars))
assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m, :m], DECIMAL_3)
def test_f_test(self):
raise SkipTest("Skipped test_f_test for MNLogit")
class TestProbitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class CompareL1(object):
"""
For checking results for l1 regularization.
Assumes self.res1 and self.res2 are two legitimate models to be compared.
"""
def test_basic_results(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(), DECIMAL_4)
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(), DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(), DECIMAL_4)
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4)
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
class CompareL11D(CompareL1):
"""
Check t and f tests. This only works for 1-d results
"""
def test_tests(self):
restrictmat = np.eye(len(self.res1.params.ravel()))
assert_almost_equal(self.res1.t_test(restrictmat).pvalue,
self.res2.t_test(restrictmat).pvalue, DECIMAL_4)
assert_almost_equal(self.res1.f_test(restrictmat).pvalue,
self.res2.f_test(restrictmat).pvalue, DECIMAL_4)
class TestL1AlphaZeroLogit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroProbit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroMNLogit(CompareL1):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, tol=1e-15,
method='bfgs',
maxiter=1000)
class TestLogitNewton(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Logit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.logit()
cls.res2 = res2
def test_resid_pearson(self):
assert_almost_equal(self.res1.resid_pearson,
self.res2.resid_pearson, 5)
def test_nodummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.})
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)
def test_nodummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)
def test_dummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog1_se, DECIMAL_4)
def test_dummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean',
dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog2_se, DECIMAL_4)
class TestLogitBFGS(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.logit()
cls.res2 = res2
cls.res1 = Logit(data.endog, data.exog).fit(method="bfgs", disp=0)
class TestPoissonNewton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0)
res2 = RandHIE()
res2.poisson()
cls.res2 = res2
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_overall_se, DECIMAL_4)
def test_margeff_dummy_overall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_overall_se, DECIMAL_4)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, 2)
def test_predict_prob(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
probs_res = np.loadtxt(os.path.join(cur_dir, "results",
"predict_prob_poisson.csv"), delimiter=",")
# just check the first 100 obs. vs R to save memory
probs = self.res1.predict_prob()[:100]
assert_almost_equal(probs, probs_res, 8)
class TestNegativeBinomialNB2Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(
method="newton",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialNB2BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(
method='bfgs', disp=0,
maxiter=1000)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method="bfgs",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialGeometricBFGS(CheckModelResults):
"""
Cannot find another implementation of the geometric to cross-check results
we only test fitted values because geometric has fewer parameters than nb1 and nb2
and we want to make sure that predict() np.dot(exog, params) works
"""
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'geometric').fit(method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_geometric_bfgs()
cls.res2 = res2
# the following are regression tests, could be inherited instead
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_jac(self):
pass
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def no_info(self):
pass
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
test_jac = no_info
class CheckMNLogitBaseZero(CheckModelResults):
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6)
def test_margeff_mean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7)
def test_margeff_dummy(self):
data = self.data
vote = data.data['vote']
exog = np.column_stack((data.exog, vote))
exog = sm.add_constant(exog, prepend=False)
res = MNLogit(data.endog, exog).fit(method="newton", disp=0)
me = res.get_margeff(dummy=True)
assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall,
6)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dydx_dummy_overall_se, 6)
me = res.get_margeff(dummy=True, method="eydx")
assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall,
5)
assert_almost_equal(me.margeff_se,
self.res2.margeff_eydx_dummy_overall_se, 6)
def test_j(self):
assert_equal(self.res1.model.J, self.res2.J)
def test_k(self):
assert_equal(self.res1.model.K, self.res2.K)
def test_endog_names(self):
assert_equal(self.res1._get_endog_name(None,None)[1],
['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6'])
def test_pred_table(self):
# fitted results taken from gretl
pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0,
1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1,
1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1,
1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0,
0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0,
0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0,
0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0,
5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0,
6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0,
6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5,
0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0,
0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0,
1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6,
1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0,
1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0,
0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5,
6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6,
1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0,
0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1,
0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1,
0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6,
5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6,
6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5,
0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6,
6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5,
0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5,
0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1,
6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6,
0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0,
1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0,
1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0,
0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5,
5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0,
0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5,
6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0,
0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5,
6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6,
1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6,
6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6,
6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6,
5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1,
6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6,
5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6,
5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5]
assert_array_equal(self.res1.predict().argmax(1), pred)
# the rows should add up for pred table
assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred))
# note this is just a regression test, gretl doesn't have a prediction
# table
pred = [[ 126., 41., 2., 0., 0., 12., 19.],
[ 77., 73., 3., 0., 0., 15., 12.],
[ 37., 43., 2., 0., 0., 19., 7.],
[ 12., 9., 1., 0., 0., 9., 6.],
[ 19., 10., 2., 0., 0., 20., 43.],
[ 22., 25., 1., 0., 0., 31., 71.],
[ 9., 7., 1., 0., 0., 18., 140.]]
assert_array_equal(self.res1.pred_table(), pred)
def test_resid(self):
assert_array_equal(self.res1.resid_misclassified, self.res2.resid)
class TestMNLogitNewtonBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.res1 = MNLogit(data.endog, exog).fit(method="newton", disp=0)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
class TestMNLogitLBFGSBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
mymodel = MNLogit(data.endog, exog)
cls.res1 = mymodel.fit(method="lbfgs", disp=0, maxiter=50000,
#m=12, pgtol=1e-7, factr=1e3, # 5 failures
#m=20, pgtol=1e-8, factr=1e2, # 3 failures
#m=30, pgtol=1e-9, factr=1e1, # 1 failure
m=40, pgtol=1e-10, factr=5e0,
loglike_and_score=mymodel.loglike_and_score)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
def test_perfect_prediction():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')
iris_dir = os.path.abspath(iris_dir)
iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=",",
skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = sm.add_constant(X, prepend=True)
mod = Logit(y,X)
assert_raises(PerfectSeparationError, mod.fit, maxiter=1000)
#turn off raise PerfectSeparationError
mod.raise_on_perfect_prediction = False
# this will raise if you set maxiter high enough with a singular matrix
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
mod.fit(disp=False, maxiter=50) # should not raise but does warn
def test_poisson_predict():
#GH: 175, make sure poisson predict works without offset and exposure
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=True)
res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0)
pred1 = res.predict()
pred2 = res.predict(exog)
assert_almost_equal(pred1, pred2)
#exta options
pred3 = res.predict(exog, offset=0, exposure=1)
assert_almost_equal(pred1, pred3)
pred3 = res.predict(exog, offset=0, exposure=2)
assert_almost_equal(2*pred1, pred3)
pred3 = res.predict(exog, offset=np.log(2), exposure=1)
assert_almost_equal(2*pred1, pred3)
def test_poisson_newton():
#GH: 24, Newton doesn't work well sometimes
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
y_count = np.random.poisson(np.exp(x.sum(1)))
mod = sm.Poisson(y_count, x)
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
res = mod.fit(start_params=-np.ones(4), method='newton', disp=0)
assert_(not res.mle_retvals['converged'])
def test_issue_339():
# make sure MNLogit summary works for J != K.
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
# strip the header from the test
smry = "\n".join(res1.summary().as_text().split('\n')[9:])
cur_dir = os.path.dirname(os.path.abspath(__file__))
test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt')
test_case = open(test_case_file, 'r').read()
np.testing.assert_(smry == test_case[:-1])
def test_issue_341():
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
x = exog[0]
np.testing.assert_equal(res1.predict(x).shape, (1,7))
np.testing.assert_equal(res1.predict(x[None]).shape, (1,7))
def test_iscount():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(1, 10, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _iscount(X)
assert_equal(count_ind, [2, 6])
def test_isdummy():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(0, 2, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _isdummy(X)
assert_equal(count_ind, [4, 6])
def test_non_binary():
y = [1, 2, 1, 2, 1, 2]
X = np.random.randn(6, 2)
np.testing.assert_raises(ValueError, Logit, y, X)
def test_mnlogit_factor():
dta = sm.datasets.anes96.load_pandas()
dta['endog'] = dta.endog.replace(dict(zip(range(7), 'ABCDEFG')))
dta.exog['constant'] = 1
mod = sm.MNLogit(dta.endog, dta.exog)
res = mod.fit(disp=0)
# smoke tests
params = res.params
summary = res.summary()
# with patsy
del dta.exog['constant']
mod = smf.mnlogit('PID ~ ' + ' + '.join(dta.exog.columns), dta.data)
res2 = mod.fit(disp=0)
res2.params
summary = res2.summary()
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
# should work
mod1 = smf.poisson('Foo ~ Bar', data=df, exposure=df['exposure'])
assert_(type(mod1.exposure) is np.ndarray, msg='Exposure is not ndarray')
# make sure this raises
exposure = pd.Series(np.random.randn(5))
assert_raises(ValueError, sm.Poisson, df.Foo, df[['constant', 'Bar']],
exposure=exposure)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'],
exit=False)
| bsd-3-clause |
gbrammer/grizli | grizli/fitting.py | 1 | 179632 | """
Tools for fitting spectra with templates.
"""
import os
import time
import glob
import inspect
from collections import OrderedDict
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.cosmology import Planck15
import astropy.constants as const
from . import utils, model
#from .model import BeamCutout
from .utils import GRISM_COLORS
# Minimum redshift where IGM is applied
IGM_MINZ = 3.4 # blue edge of G800L
# Default parameters for drizzled line map
PLINE = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}
# Default arguments for optional bounded least-squares fits
BOUNDED_DEFAULTS = {'method': 'bvls', 'tol': 1.e-8, 'verbose': 0}
LINE_BOUNDS = [-1.e-16, 1.e-13] # erg/s/cm2
# IGM from eazy-py
try:
import eazy.igm
IGM = eazy.igm.Inoue14()
except:
IGM = None
def run_all_parallel(id, get_output_data=False, args_file='fit_args.npy', protect=True, **kwargs):
"""
Wrapper function for `grizli.fitting.run_all` that preloads all
keyword options from a stored file.
Parameters
----------
id : int
Object id
get_output_data : bool
Return the data produced by `~grizli.fitting.run_all` rather than just
a simple status indicator
args_file : str
Name of the `numpy` file contaning the fit keywords. These include
`root` and `group_name` used for finding the "beams.fits" files for
the given `id` (see `~grizli.fitting.run_all`).
Any additional keywords passed to this function will override the
defaults from `args_file`.
protect : bool
Run the fitter in a ``try/except`` clause so that it doesn't kill
the runtime execution for e.g. a list of `id`. However, with this
set it's much harder to figure out where a given fit failed, so turn
it off to get the full exception traceback
Returns
-------
id, status, t1-t0 : int, bool, float
The input `id`, status flag and execution time if
``get_output_data=False``.
If ``get_output_data==True``, then return everything output by
`~grizli.fitting.run_all` (beams files, tables, etc.)
"""
import numpy as np
from grizli.fitting import run_all
from grizli import multifit
import traceback
t0 = time.time()
print('Run id={0} with {1}'.format(id, args_file))
try:
args = np.load(args_file)[0]
except:
args = np.load(args_file, allow_pickle=True)[0]
args['verbose'] = False
for k in kwargs:
args[k] = kwargs[k]
fp = open('{0}_{1:05d}.log_par'.format(args['group_name'], id), 'w')
fp.write('{0}_{1:05d}: {2}\n'.format(args['group_name'], id, time.ctime()))
fp.close()
if protect:
try:
#args['zr'] = [0.7, 1.0]
#mb = multifit.MultiBeam('j100025+021651_{0:05d}.beams.fits'.format(id))
out = run_all(id, **args)
if get_output_data:
return out
status = 1
except:
status = -1
trace = traceback.format_exc(limit=2) # , file=fp)
if args['verbose']:
print(trace)
else:
out = run_all(id, **args)
if get_output_data:
return out
status = 1
t1 = time.time()
return id, status, t1-t0
DEFAULT_LINELIST = ['Lya', 'OII', 'Hb', 'OIII', 'Ha',
'Ha+NII', 'SII', 'SIII']
def run_all(id, t0=None, t1=None, fwhm=1200, zr=[0.65, 1.6], dz=[0.004, 0.0002], fitter=['nnls', 'bounded'], group_name='grism', fit_stacks=True, only_stacks=False, prior=None, fcontam=0.2, pline=PLINE, min_line_sn=4, mask_sn_limit=np.inf, fit_only_beams=False, fit_beams=True, root='*', fit_trace_shift=False, phot=None, use_phot_obj=True, phot_obj=None, verbose=True, scale_photometry=False, show_beams=True, scale_on_stacked_1d=True, use_cached_templates=True, loglam_1d=True, overlap_threshold=5, MW_EBV=0., sys_err=0.03, huber_delta=4, get_student_logpdf=False, get_dict=False, bad_pa_threshold=1.6, units1d='flam', redshift_only=False, line_size=1.6, use_psf=False, get_line_width=False, sed_args={'bin': 1, 'xlim': [0.3, 9]}, get_ir_psfs=True, min_mask=0.01, min_sens=0.02, mask_resid=True, save_stack=True, full_line_list=DEFAULT_LINELIST, get_line_deviations=True, bounded_kwargs=BOUNDED_DEFAULTS, write_fits_files=True, save_figures=True, fig_type='png', **kwargs):
"""Run the full template-fitting procedure
1) Load MultiBeam and stack files
2) ... tbd
Parameters
----------
id : int
Object ID in the internal catalogs. This is generally an `int`, but
in principle could be a `str` or something else.
t0 : dict
Dictionary of `~grizli.utils.SpectrumTemplate` objects used for the
redshift fits. Generally these will have fixed line ratios to avoid
unphysical line degeneracies (e.g., very strong [SII] without
H-alpha).
If ``None``, then the templates are generated with
>>> t0 = grizli.utils.load_templates(line_complexes=True, fsps_templates=True, fwhm=fwhm)
t1 : dict
Dictionary of `~grizli.utils.SpectrumTemplate` objects used for the
final fit at the best-fit redshift. Generally these will be
separate continuum and individual line templates so that the line
fluxes are determined freely (which are then also needed if you
want to make the drizzled narrowband emission line maps).
If ``None``, then the templates are generated with
>>> t1 = grizli.utils.load_templates(line_complexes=False, fsps_templates=True, fwhm=fwhm)
.. note:: As of `66a3ec5 <https://github.com/gbrammer/grizli/commit/588ad9e174aac5eb5607b78ae0268e3193e0d1f1>`_ all templates can be `eazy.templates.Template` objects.
fwhm : float
Line FWHM passed to `~grizli.utils.load_templates` if `t0` or `t1` not
specified.
zr : [float, float], [float], or 0
Redshift range to fit.
- [z1, z2] - fit on a logarithmic grid between ``z1`` and ``z2`` with
steps specified in `dz`
- [zfix] - fit templates at a specified value
- 0 - fit stellar templates only
dz : [float, float]
Logarithmic step size (1+z) of redshift grid. See
`~grizli.utils.log_zgrid`.
fitter : [str, str]
Least squares optimization method ('nnls','lstsq','bounded').
The first option is used for the redshift fit with the `t0` templates
and the second is used for the final fit with the `t1` templates.
- nnls: Generally SPS continuum templates should be fit with ``nnls``
to enforce physical template combinations.
- bounded: Enforces non-negative continuum templates but allows line
templates (with a name starting with ``line [space]``) to be
negative. The bounded fits are controlled with `bounded_kwargs`
and the flux limits set in the global parameter
``grizli.fitting.LINE_BOUNDS``.
- lstsq: Fit with regular least squares, e.g., for PCA templates that
can have negative coefficients (e.g.,
`~grizli.utils.load_sdss_pca_templates`).
bounded_kwargs : dict
Keywords passed to `scipy.optimize.lsq_linear` for 'bounded' fits.
group_name : str
Passed to `~grizli.multifit.MultiBeam` on initialization
root : str
Basename `~grizli.multifit.MultiBeam` FITS
filenames to search for, e.g., to concatenate separate G141 and G102
files of a single object:
>>> mb_files = glob.glob(f'{root}_{id:05d}.beams.fits')
fit_stacks : bool
Fit redshifts on the stacked spectra, which can be much faster than
for the separate "beams" fits, but where the model generation isn't
as robust. *This is generally deprecated, but should still run*.
only_stacks : bool
Only fit the stacks.
prior : None, (array, array)
Redshift prior (z, pz) passed to
`~grizli.fitting.GroupFitter.xfit_redshift`.
fcontam, min_mask, min_sens, mask_resid : float, float, float, bool
Contamination weighting passed to `~grizli.multifit.MultiBeam`
pline : dict
Parameters for drizzled line maps.
min_line_sn : float
If finite, then pass to `~grizli.multifit.MultiBeam.drizzle_fit_lines`
to determine which line maps to create.
mask_sn_limit : float
SN limit to pass to `~grizli.multifit.MultiBeam.drizzle_fit_lines`
fit_only_beams : bool
If True, only fit with `~grizli.multifit.MultiBeam` objects.
fit_beams : bool
Fit with `~grizli.multifit.MultiBeam` objects.
fit_trace_shift : bool
Fit for shifts of the traces fo each group oof beams.
phot : None, dict
Photometry dictionary passed to
`~grizli.fitting.GroupFitter.set_photometry`
use_phot_obj : bool
Use `phot_obj` if it is available.
phot_obj : None, `~grizli.pipeline.photoz.EazyPhot`
Catalog object for automatically generating `phot` dictionaries
verbose : bool
Some control over the runtime verbosity
scale_photometry : bool
If photometry is available, try to normalize the spectra and
photometry.
show_beams, scale_on_stacked_1d, loglam_1d : bool, bool
Passed to `~grizli.fitting.GroupFitter.xmake_fit_plot` for the final
redshift fit plot.
use_cached_templates : bool
Passed to `~grizli.fitting.GroupFitter.xfit_at_z`
overlap_threshold : float
Parameter for `~grizli.stack.StackFitter` when fitting on stacks.
MW_EBV : float
Galactic extinction E(B-V) (mag)
sys_err : float
Systematic error used for the spectra and photometry, multiplied to
the flux densities and added in quadrature to the nominal
uncertainties.
huber_delta : float
Passed to `~grizli.fitting.GroupFitter.xfit_at_z` for using a Huber
loss function.
get_student_logpdf : bool
Use Student-t likelihood on `~grizli.fitting.GroupFitter.redshift_fit`
get_dict : bool
Don't actually run anything, just return a dictionary with all of
the keyword parameters passed to the function
bad_pa_threshold : float
Threshold for identifying bad PAs when using
`~grizli.stack.StackFitter` objects (not beams)
units1d : str
Not used
redshift_only : bool
Just run the redshift fit, don't drizzle the line maps
line_size : float
Cutout size in arcsec of the line map figures.
use_psf : bool
Initialize the `~grizli.multifit.MultiBeam` objects with ``psf=True``
to fit the morphology using the `~grizli.utils.EffectivePSF` models.
get_line_width : bool
Try to fit for emission line velocity widths (developmental)
sed_args : dict
Keyword arguments passed to `~grizli.fitting.full_sed_plot` when
photometry + spectra are available
get_ir_psfs : bool
Include PSF extensions in the drizzled line maps derived from
the `~grizli.utils.EffectivePSF` models.
save_stack : bool
Generate a ``stack.fits`` file from the beams fit
full_line_list : list
Line list passed to `~grizli.fitting.show_drizzled_lines` to determine
which lines are always included in the drizzled line maps.
get_line_deviations : bool
Check plausibility of fit coefficients with
`~grizli.fitting.GroupFitter.check_tfit_coeffs`
write_fits_files : bool
Save 'full.fits' and 'stack.fits' files
save_figures, fig_type : bool, str
Save diagnostic figure files with extension `fig_type`
Returns
-------
mb : `~grizli.multifit.MultiBeam`
The beams object used for the redshift / template fits
st : `~grizli.stack.StackFitter`
The stacked spectrum object generated from the 'beams'
fit : `astropy.table.Table`
Table with the fit results
tfit : dict
Various parameters of the template fit at the final redshift
line_hdu : `~astropy.io.fits.HDUList`
Drizzled line maps
"""
import glob
import matplotlib.pyplot as plt
import grizli.multifit
from grizli.stack import StackFitter
from grizli.multifit import MultiBeam
#from . import __version__ as grizli__version
from .version import __long_version__ as grizli__version
from .pipeline import summary
if get_dict:
frame = inspect.currentframe()
args = inspect.getargvalues(frame).locals
for k in ['id', 'get_dict', 'frame', 'glob', 'plt', 'grizli', 'summary', 'StackFitter', 'MultiBeam']:
if k in args:
args.pop(k)
return args
mb_files = glob.glob('{0}_{1:05d}.beams.fits'.format(root, id))
st_files = glob.glob('{0}_{1:05d}.stack.fits'.format(root, id))
# Allow for fitter to be a string, or a 2-list with different
# values for the redshift and final fits
if isinstance(fitter, str):
fitter = [fitter, fitter]
if not only_stacks:
mb = MultiBeam(mb_files, fcontam=fcontam, group_name=group_name, MW_EBV=MW_EBV, sys_err=sys_err, verbose=verbose, psf=use_psf, min_mask=min_mask, min_sens=min_sens, mask_resid=mask_resid)
if bad_pa_threshold > 0:
# Check for PAs with unflagged contamination or otherwise
# discrepant fit
out = mb.check_for_bad_PAs(chi2_threshold=bad_pa_threshold,
poly_order=1, reinit=True,
fit_background=True)
fit_log, keep_dict, has_bad = out
if has_bad:
if verbose:
msg = '\nHas bad PA! Final list: {0}\n{1}'
print(msg.format(keep_dict, fit_log))
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=fcontam,
flambda=False, kernel='point',
size=32, diff=False)
if save_figures:
fig.savefig('{0}_{1:05d}.fix.stack.{2}'.format(group_name,
id, fig_type))
else:
plt.close(fig)
good_PAs = []
for k in keep_dict:
good_PAs.extend(keep_dict[k])
else:
good_PAs = None # All good
else:
good_PAs = None
else:
good_PAs = None # All good
redshift_only = True # can't drizzle line maps from stacks
if fit_only_beams:
st = None
else:
st = StackFitter(st_files, fit_stacks=fit_stacks, group_name=group_name, fcontam=fcontam, overlap_threshold=overlap_threshold, MW_EBV=MW_EBV, verbose=verbose, sys_err=sys_err, PAs=good_PAs, chi2_threshold=bad_pa_threshold)
st.initialize_masked_arrays()
if only_stacks:
mb = st
if not only_stacks:
if fit_trace_shift:
b = mb.beams[0]
b.compute_model()
sn_lim = fit_trace_shift*1
if (np.max((b.model/b.grism['ERR'])[b.fit_mask.reshape(b.sh)]) > sn_lim) | (sn_lim > 100):
if verbose:
print('Trace shift\n')
shift, _ = mb.fit_trace_shift(tol=1.e-3, verbose=verbose,
split_groups=True)
mb.initialize_masked_arrays()
# Get photometry from phot_obj
zspec = None
if (phot is None) & (phot_obj is not None) & (use_phot_obj):
phot_i, ii, dd = phot_obj.get_phot_dict(mb.ra, mb.dec)
if dd < 0.5*u.arcsec:
if verbose:
print('Match photometry object ix={0}, dr={1:.1f}'.format(ii, dd))
if phot_i['flam'] is not None:
phot = phot_i
else:
if 'pz' in phot_i:
if phot_i['pz'] is not None:
prior = phot_i['pz']
sed_args['photometry_pz'] = phot_i['pz']
if 'z_spec' in phot_i:
if phot_i['z_spec'] >= 0:
sed_args['zspec'] = phot_i['z_spec']*1
zspec = sed_args['zspec']
if verbose:
print('zspec = {0:.4f}'.format(zspec))
if prior is not None:
if verbose:
zpr = prior[0][np.argmax(prior[1])]
print('Use supplied prior, z[max(pz)] = {0:.3f}'.format(zpr))
if phot is not None:
if phot == 'vizier':
# Get photometry from Vizier catalogs
vizier_catalog = list(utils.VIZIER_BANDS.keys())
phot = utils.get_Vizier_photometry(mb.ra, mb.dec, verbose=verbose,
vizier_catalog=vizier_catalog)
if phot is not None:
zgrid = utils.log_zgrid(zr=zr, dz=0.005)
phot['tempfilt'] = utils.generate_tempfilt(t0,
phot['filters'],
zgrid=zgrid,
MW_EBV=MW_EBV)
if phot is not None:
if st is not None:
st.set_photometry(min_err=sys_err, **phot)
mb.set_photometry(min_err=sys_err, **phot)
if t0 is None:
t0 = utils.load_templates(line_complexes=True, fsps_templates=True, fwhm=fwhm)
if t1 is None:
t1 = utils.load_templates(line_complexes=False, fsps_templates=True, fwhm=fwhm)
# Fit on stacked spectra or individual beams
if fit_only_beams:
fit_obj = mb
else:
fit_obj = st
# Do scaling now with direct spectrum function
if (scale_photometry > 0) & (phot is not None):
try:
scl = mb.scale_to_photometry(z=0, method='lm', order=scale_photometry*1-1, tol=1.e-4, init=None, fit_background=True, Rspline=50, use_fit=True)
# tfit=None, tol=1.e-4, order=0, init=None, fit_background=True, Rspline=50, use_fit=True
except:
scl = [10.]
if hasattr(scl, 'status'):
if scl.status > 0:
print('scale_to_photometry: [{0}]'.format(', '.join(['{0:.2f}'.format(x_i) for x_i in scl.x])))
mb.pscale = scl.x
if st is not None:
st.pscale = scl.x
# First pass
fit_obj.Asave = {}
fit = fit_obj.xfit_redshift(templates=t0, zr=zr, dz=dz, prior=prior, fitter=fitter[0], verbose=verbose, bounded_kwargs=bounded_kwargs, huber_delta=huber_delta, get_student_logpdf=get_student_logpdf)
fit_hdu = pyfits.table_to_hdu(fit)
fit_hdu.header['EXTNAME'] = 'ZFIT_STACK'
if hasattr(fit_obj, 'pscale'):
fit_hdu.header['PSCALEN'] = (len(fit_obj.pscale)-1, 'PSCALE order')
for i, p in enumerate(fit_obj.pscale):
fit_hdu.header['PSCALE{0}'.format(i)] = (p, 'PSCALE parameter {0}'.format(i))
# Add photometry information
if (fit_obj.Nphot > 0) & hasattr(fit_obj, 'photom_filters'):
h = fit_hdu.header
h['NPHOT'] = fit_obj.Nphot, 'Number of photometry filters'
h['PHOTSRC'] = fit_obj.photom_source, 'Source of the photometry'
for i in range(len(fit_obj.photom_filters)):
h['PHOTN{0:03d}'.format(i)] = fit_obj.photom_filters[i].name.split()[0], 'Filter {0} name'.format(i)
h['PHOTL{0:03d}'.format(i)] = fit_obj.photom_pivot[i], 'Filter {0} pivot wavelength'.format(i)
h['PHOTF{0:03d}'.format(i)] = fit_obj.photom_flam[i], 'Filter {0} flux flam'.format(i)
h['PHOTE{0:03d}'.format(i)] = fit_obj.photom_eflam[i], 'Filter {0} err flam'.format(i)
# # Second pass if rescaling spectrum to photometry
# if scale_photometry:
# scl = mb.scale_to_photometry(z=fit.meta['z_map'][0], method='lm', templates=t0, order=scale_photometry*1-1)
# if scl.status > 0:
# mb.pscale = scl.x
# if st is not None:
# st.pscale = scl.x
#
# fit = fit_obj.xfit_redshift(templates=t0, zr=zr, dz=dz, prior=prior, fitter=fitter, verbose=verbose)
# fit_hdu = pyfits.table_to_hdu(fit)
# fit_hdu.header['EXTNAME'] = 'ZFIT_STACK'
# Zoom-in fit with individual beams
if fit_beams:
#z0 = fit.meta['Z50'][0]
z0 = fit.meta['z_map'][0]
#width = np.maximum(3*fit.meta['ZWIDTH1'][0], 3*0.001*(1+z0))
width = 20*0.001*(1+z0)
mb_zr = z0 + width*np.array([-1, 1])
mb.Asave = {}
mb_fit = mb.xfit_redshift(templates=t0, zr=mb_zr, dz=[0.001, 0.0002],
prior=prior, fitter=fitter[0],
verbose=verbose, huber_delta=huber_delta,
get_student_logpdf=get_student_logpdf,
bounded_kwargs=bounded_kwargs)
mb_fit_hdu = pyfits.table_to_hdu(mb_fit)
mb_fit_hdu.header['EXTNAME'] = 'ZFIT_BEAM'
else:
mb_fit = fit
# Get best-fit template
mb.Asave = {}
tfit = mb.template_at_z(z=mb_fit.meta['z_map'][0], templates=t1,
fit_background=True, fitter=fitter[-1],
bounded_kwargs=bounded_kwargs,
use_cached_templates=True)
has_spline = False
for t in t1:
if ' spline' in t:
has_spline = True
if has_spline:
tfit = mb.template_at_z(z=mb_fit.meta['z_map'][0], templates=t1,
fit_background=True, fitter=fitter[-1],
bounded_kwargs=bounded_kwargs,
use_cached_templates=True)
fit_hdu.header['CHI2_MAP'] = tfit['chi2'], 'Chi2 at z=z_map'
# Redrizzle? ... testing
if False:
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=fcontam,
flambda=False,
size=48, scale=1.,
kernel='point', pixfrac=0.1,
tfit=tfit)
# Fit covariance
cov_hdu = pyfits.ImageHDU(data=tfit['covar'], name='COVAR')
Next = mb_fit.meta['N']
cov_hdu.header['N'] = Next
# Get line deviations if multiple PAs/Grisms
# max_line, max_line_diff, compare = tfit_coeffs_res
if get_line_deviations:
tfit_coeffs_res = mb.check_tfit_coeffs(tfit, t1, fitter=fitter[1],
fit_background=True,
bounded_kwargs=bounded_kwargs,
refit_others=True)
cov_hdu.header['DLINEID'] = (tfit_coeffs_res[0], 'Line with maximum deviation')
cov_hdu.header['DLINESN'] = (tfit_coeffs_res[1], 'Maximum line deviation, sigmas')
# Line EWs & fluxes
coeffs_clip = tfit['coeffs'][mb.N:]
covar_clip = tfit['covar'][mb.N:, mb.N:]
lineEW = utils.compute_equivalent_widths(t1, coeffs_clip, covar_clip, max_R=5000, Ndraw=1000, z=tfit['z'])
for ik, key in enumerate(lineEW):
for j in range(3):
if not np.isfinite(lineEW[key][j]):
lineEW[key][j] = -1.e30
cov_hdu.header['FLUX_{0:03d}'.format(ik)] = tfit['cfit'][key][0], '{0} line flux; erg / (s cm2)'.format(key.strip('line '))
cov_hdu.header['ERR_{0:03d}'.format(ik)] = tfit['cfit'][key][1], '{0} line uncertainty; erg / (s cm2)'.format(key.strip('line '))
cov_hdu.header['EW16_{0:03d}'.format(ik)] = lineEW[key][0], 'Rest-frame {0} EW, 16th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EW50_{0:03d}'.format(ik)] = lineEW[key][1], 'Rest-frame {0} EW, 50th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EW84_{0:03d}'.format(ik)] = lineEW[key][2], 'Rest-frame {0} EW, 84th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EWHW_{0:03d}'.format(ik)] = (lineEW[key][2]-lineEW[key][0])/2, 'Rest-frame {0} EW, 1-sigma half-width; Angstrom'.format(key.strip('line '))
# Velocity width
if get_line_width:
if phot is not None:
mb.unset_photometry()
vel_width_res = mb.fit_line_width(z0=tfit['z'], bl=1.2, nl=1.2)
if verbose:
print('Velocity width: BL/NL = {0:.0f}/{1:.0f}, z={2:.4f}\n'.format(vel_width_res[0]*1000, vel_width_res[1]*1000, vel_width_res[2]))
fit_hdu.header['VEL_BL'] = vel_width_res[0]*1000, 'Broad line FWHM'
fit_hdu.header['VEL_NL'] = vel_width_res[1]*1000, 'Narrow line FWHM'
fit_hdu.header['VEL_Z'] = vel_width_res[2], 'Line width, best redshift'
fit_hdu.header['VEL_NFEV'] = vel_width_res[3], 'Line width, NFEV'
fit_hdu.header['VEL_FLAG'] = vel_width_res[4], 'Line width, NFEV'
if phot is not None:
mb.set_photometry(**phot)
# D4000
if (3700*(1+tfit['z']) > mb.wave_mask.min()) & (4200*(1+tfit['z']) < mb.wave_mask.max()):
if phot is not None:
mb.unset_photometry()
# D4000
res = mb.compute_D4000(tfit['z'], fit_background=True,
fit_type='D4000', fitter='lstsq')
_, _, _, d4000, d4000_sigma = res
fit_hdu.header['D4000'] = (d4000, 'Derived D4000 at Z_MAP')
fit_hdu.header['D4000_E'] = (d4000_sigma, 'Derived D4000 uncertainty')
res = mb.compute_D4000(tfit['z'], fit_background=True,
fit_type='Dn4000', fitter='lstsq')
_, _, _, dn4000, dn4000_sigma = res
fit_hdu.header['DN4000'] = (dn4000, 'Derived Dn4000 at Z_MAP')
fit_hdu.header['DN4000_E'] = (dn4000_sigma, 'Derived Dn4000 uncertainty')
if phot is not None:
mb.set_photometry(**phot)
else:
fit_hdu.header['D4000'] = (-99, 'Derived D4000 at Z_MAP')
fit_hdu.header['D4000_E'] = (-99, 'Derived D4000 uncertainty')
fit_hdu.header['DN4000'] = (-99, 'Derived Dn4000 at Z_MAP')
fit_hdu.header['DN4000_E'] = (-99, 'Derived Dn4000 uncertainty')
# Best-fit template itself
tfit_sp = utils.GTable()
for ik, key in enumerate(tfit['cfit']):
for save in [tfit_sp.meta]:
save['CVAL{0:03d}'.format(ik)] = tfit['cfit'][key][0], 'Coefficient for {0}'.format(key)
save['CERR{0:03d}'.format(ik)] = tfit['cfit'][key][1], 'Uncertainty for {0}'.format(key)
save['CNAME{0:03d}'.format(ik)] = key, 'Template name'
tfit_sp['wave'] = tfit['cont1d'].wave
tfit_sp['continuum'] = tfit['cont1d'].flux
tfit_sp['full'] = tfit['line1d'].flux
tfit_sp['wave'].unit = tfit['cont1d'].waveunits
tfit_sp['continuum'].unit = tfit['cont1d'].fluxunits
tfit_sp['full'].unit = tfit['line1d'].fluxunits
tfit_hdu = pyfits.table_to_hdu(tfit_sp)
tfit_hdu.header['EXTNAME'] = 'TEMPL'
# Make the plot
fig = mb.xmake_fit_plot(mb_fit, tfit, show_beams=show_beams, scale_on_stacked_1d=scale_on_stacked_1d, loglam_1d=loglam_1d, zspec=zspec)
# Add prior
if prior is not None:
fig.axes[0].plot(prior[0], np.log10(prior[1]), color='#1f77b4', alpha=0.5)
# Add stack fit to the existing plot
# fig.axes[0].plot(fit['zgrid'], np.log10(fit['pdf']), color='0.5', alpha=0.5)
# fig.axes[0].set_xlim(fit['zgrid'].min(), fit['zgrid'].max())
axz = fig.axes[0]
zmi, zma = fit['zgrid'].min(), fit['zgrid'].max()
if (zma-zmi) > 5:
ticks = np.arange(np.ceil(zmi), np.floor(zma), 1)
lz = np.log(1+fit['zgrid'])
axz.plot(lz, np.log10(fit['pdf']), color='0.5', alpha=0.5)
axz.set_xticks(np.log(1+ticks))
axz.set_xticklabels(np.cast[int](ticks))
axz.set_xlim(lz.min(), lz.max())
else:
axz.plot(fit['zgrid'], np.log10(fit['pdf']), color='0.5', alpha=0.5)
axz.set_xlim(zmi, zma)
if phot is not None:
fig.axes[1].errorbar(mb.photom_pivot/1.e4, mb.photom_flam/1.e-19, mb.photom_eflam/1.e-19, marker='s', alpha=0.5, color='k', linestyle='None')
#fig.axes[1].plot(tfit['line1d'].wave/1.e4, tfit['line1d'].flux/1.e-19, color='k', alpha=0.2, zorder=100)
# Save the figure
if save_figures:
fig.savefig('{0}_{1:05d}.full.{2}'.format(group_name, id, fig_type))
if only_stacks:
# Need to make output with just the stack results
line_hdu = pyfits.HDUList([pyfits.PrimaryHDU(header=st.h0)])
line_hdu.insert(1, fit_hdu)
line_hdu.insert(2, cov_hdu)
if fit_beams:
line_hdu.insert(2, mb_fit_hdu)
line_hdu.insert(3, tfit_hdu)
if write_fits_files:
line_hdu.writeto('{0}_{1:05d}.sfull.fits'.format(group_name, id),
overwrite=True, output_verify='fix')
else:
line_hdu = None
if redshift_only:
return mb, st, fit, tfit, line_hdu
# Make the line maps
if pline is None:
pzfit, pspec2, pline = grizli.multifit.get_redshift_fit_defaults()
if np.isfinite(min_line_sn):
line_hdu = mb.drizzle_fit_lines(tfit, pline,
force_line=utils.DEFAULT_LINE_LIST,
save_fits=False, mask_lines=True,
min_line_sn=min_line_sn,
mask_sn_limit=mask_sn_limit,
verbose=verbose, get_ir_psfs=get_ir_psfs)
else:
line_hdu = mb.make_simple_hdulist()
# Add beam exposure times
nexposures, exptime = mb.compute_exptime()
line_hdu[0].header['GRIZLIV'] = (grizli__version, 'Grizli version')
for k in exptime:
line_hdu[0].header['T_{0}'.format(k)] = (exptime[k], 'Total exposure time [s]')
line_hdu[0].header['N_{0}'.format(k)] = (nexposures[k], 'Number of individual exposures')
for gr in mb.PA:
line_hdu[0].header['P_{0}'.format(gr)] = (len(mb.PA[gr]), 'Number of PAs')
line_hdu.insert(1, fit_hdu)
line_hdu.insert(2, cov_hdu)
if fit_beams:
line_hdu.insert(2, mb_fit_hdu)
line_hdu.insert(3, tfit_hdu)
if write_fits_files:
full_file = '{0}_{1:05d}.full.fits'.format(group_name, id)
line_hdu.writeto(full_file, overwrite=True, output_verify='fix')
# Row for summary table
info = summary.summary_catalog(dzbin=None, filter_bandpasses=[],
files=[full_file])
info['grizli_version'] = grizli__version
row_file = '{0}_{1:05d}.row.fits'.format(group_name, id)
info.write(row_file, overwrite=True)
# 1D spectrum
oned_hdul = mb.oned_spectrum_to_hdu(tfit=tfit, bin=1, outputfile='{0}_{1:05d}.1D.fits'.format(group_name, id), loglam=loglam_1d) # , units=units1d)
oned_hdul[0].header['GRIZLIV'] = (grizli__version, 'Grizli version')
if save_stack:
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=fcontam, flambda=False,
kernel='point', size=32,
tfit=tfit, diff=False)
hdu[0].header['GRIZLIV'] = (grizli__version, 'Grizli version')
if save_figures:
fig.savefig('{0}_{1:05d}.stack.{2}'.format(group_name, id,
fig_type))
if write_fits_files:
hdu.writeto('{0}_{1:05d}.stack.fits'.format(group_name, id),
overwrite=True)
hdu_stack = hdu
else:
hdu_stack = None
######
# Show the drizzled lines and direct image cutout, which are
# extensions `DSCI`, `LINE`, etc.
if 'DSCI' in line_hdu:
s, si = 1, line_size
s = 4.e-19/np.max([beam.beam.total_flux for beam in mb.beams])
s = np.clip(s, 0.25, 4)
s /= (pline['pixscale']/0.1)**2
fig = show_drizzled_lines(line_hdu, size_arcsec=si, cmap='plasma_r',
scale=s, dscale=s,
full_line_list=full_line_list)
if save_figures:
fig.savefig('{0}_{1:05d}.line.{2}'.format(group_name, id,
fig_type))
if phot is not None:
out = mb, st, fit, tfit, line_hdu
if 'pz' in phot:
full_sed_plot(mb, tfit, zfit=fit, photometry_pz=phot['pz'],
save=fig_type, **sed_args)
else:
full_sed_plot(mb, tfit, zfit=fit, save=fig_type, **sed_args)
return mb, st, fit, tfit, line_hdu
###################################
def full_sed_plot(mb, tfit, zfit=None, bin=1, minor=0.1, save='png', sed_resolution=180, photometry_pz=None, zspec=None, spectrum_steps=False, xlim=[0.3, 9], **kwargs):
"""
Make a separate plot showing photometry and the spectrum
Parameters
----------
mb : `~grizli.multifit.MultiBeam`
Object containing the beams spectra.
tfit : dict
Dictionary of fit results (templates, coefficients, etc) from
`~grizli.fitting.GroupFitter.template_at_z`
zfit : `~astropy.table.Table`
Redshift fit information used to draw p(z) panel (this is `fit`
as output by `~grizli.fitting.run_all`)
bin : float
Binning factor relative to nominal spectral sampling of each grism
minor : float
Ticks on wavelength axis (microns)
save : str
Extension of figure file to save
sed_resolution : float
Smooth the 1D template before plotting with resolution R = lam/dlam
photometry_pz : (float, float)
p(z) for the photometry fit alone, as output by, e.g., `eazy`.
zspec : float
(external) spectroscopic redshift that will be indicated on the plot
spectrum_steps : bool
Plot grism spectra as steps rather than scatter points
xlim : (float, float)
Wavelength limits (microns)
Returns
-------
fig : `matplotlib.figure.Figure`
The figure object
"""
#import seaborn as sns
try:
import prospect.utils.smoothing
has_prospect = True
except:
has_prospect = False
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.gridspec as gridspec
#mpl_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
mpl_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# sns_colors = colors = sns.color_palette("cubehelix", 8)
# seaborn cubehelix colors
sns_colors = colors = [(0.1036, 0.094, 0.206),
(0.0825, 0.272, 0.307),
(0.1700, 0.436, 0.223),
(0.4587, 0.480, 0.199),
(0.7576, 0.476, 0.437),
(0.8299, 0.563, 0.776),
(0.7638, 0.757, 0.949),
(0.8106, 0.921, 0.937)]
# Best-fit
#mb = out[0]
#zfit = out[2]
#tfit = out[3]
t1 = tfit['templates']
best_model = mb.get_flat_model([tfit['line1d'].wave, tfit['line1d'].flux])
flat_model = mb.get_flat_model([tfit['line1d'].wave, tfit['line1d'].flux*0+1])
bg = mb.get_flat_background(tfit['coeffs'])
sp = mb.optimal_extract(mb.scif[mb.fit_mask][:-mb.Nphot] - bg, bin=bin) # ['G141']
spm = mb.optimal_extract(best_model, bin=bin) # ['G141']
spf = mb.optimal_extract(flat_model, bin=bin) # ['G141']
# Photometry
A_phot = mb._interpolate_photometry(z=tfit['z'], templates=t1)
A_model = A_phot.T.dot(tfit['coeffs'])
photom_mask = mb.photom_eflam > -98
A_model /= mb.photom_ext_corr[photom_mask]
##########
# Figure
if True:
if zfit is not None:
fig = plt.figure(figsize=[11, 9./3])
gs = gridspec.GridSpec(1, 3, width_ratios=[1, 1.5, 1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
else:
fig = plt.figure(figsize=[9, 9./3])
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1.5])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
else:
gs = None
fig = plt.figure(figsize=[9, 9./3])
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
# Photometry SED
ax1.errorbar(np.log10(mb.photom_pivot[photom_mask]/1.e4), (mb.photom_flam/mb.photom_ext_corr)[photom_mask]/1.e-19, (mb.photom_eflam/mb.photom_ext_corr)[photom_mask]/1.e-19, color='k', alpha=0.6, marker='s', linestyle='None', zorder=30)
if has_prospect:
sm = prospect.utils.smoothing.smoothspec(tfit['line1d'].wave, tfit['line1d'].flux, resolution=sed_resolution, smoothtype='R') # nsigma=10, inres=10)
else:
sm = tfit['line1d']
ax1.scatter(np.log10(mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color='w', marker='s', s=80, zorder=10)
ax1.scatter(np.log10(mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color=sns_colors[4], marker='s', s=20, zorder=11)
yl1 = ax1.get_ylim()
ax1.plot(np.log10(tfit['line1d'].wave/1.e4), sm/1.e-19, color=sns_colors[4], linewidth=1, zorder=0)
# ax1.grid()
ax1.set_xlabel(r'$\lambda$ / $\mu$m')
ax2.set_xlabel(r'$\lambda$ / $\mu$m')
# Spectrum
ymax, ymin = -1e30, 1e30
for g in sp:
sn = sp[g]['flux']/sp[g]['err']
clip = sn > 3
clip = spf[g]['flux'] > 0.2*spf[g]['flux'].max()
try:
scale = mb.compute_scale_array(mb.pscale, sp[g]['wave'])
except:
scale = 1
ax2.errorbar(sp[g]['wave'][clip]/1.e4, (sp[g]['flux']/spf[g]['flux']/scale)[clip]/1.e-19, (sp[g]['err']/spf[g]['flux']/scale)[clip]/1.e-19, marker='.', color='k', alpha=0.5, linestyle='None', elinewidth=0.5, zorder=11)
if spectrum_steps:
try:
ax2.plot(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10, linestyle='steps-mid')
except:
ax2.step(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10)
else:
ax2.plot(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10, marker='.')
ymax = np.maximum(ymax, (spm[g]['flux']/spf[g]['flux']/1.e-19)[clip].max())
ymin = np.minimum(ymin, (spm[g]['flux']/spf[g]['flux']/1.e-19)[clip].min())
ax1.errorbar(np.log10(sp[g]['wave'][clip]/1.e4), (sp[g]['flux']/spf[g]['flux']/scale)[clip]/1.e-19, (sp[g]['err']/spf[g]['flux']/scale)[clip]/1.e-19, marker='.', color='k', alpha=0.2, linestyle='None', elinewidth=0.5, zorder=-100)
xl, yl = ax2.get_xlim(), ax2.get_ylim()
yl = (ymin-0.3*ymax, 1.3*ymax)
# SED x range
if xlim is None:
okphot = (mb.photom_eflam > 0)
xlim = [np.minimum(xl[0]*0.7, 0.7*mb.photom_pivot[okphot].min()/1.e4), np.maximum(xl[1]/0.7, mb.photom_pivot[okphot].max()/1.e4/0.7)]
ax1.set_xlim(np.log10(xlim[0]), np.log10(xlim[1]))
ticks = np.array([0.5, 1, 2, 4, 8])
ticks = ticks[(ticks >= xlim[0]) & (ticks <= xlim[1])]
ax1.set_xticks(np.log10(ticks))
ax1.set_xticklabels(ticks)
# Back to spectrum
ax2.scatter((mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color='w', marker='s', s=80, zorder=11)
ax2.scatter((mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color=sns_colors[4], marker='s', s=20, zorder=12)
ax2.errorbar(mb.photom_pivot[photom_mask]/1.e4, mb.photom_flam[photom_mask]/1.e-19, mb.photom_eflam[photom_mask]/1.e-19, color='k', alpha=0.6, marker='s', linestyle='None', zorder=20)
ax2.set_xlim(xl)
ax2.set_ylim(yl)
ax2.set_yticklabels([])
#ax2.set_xticks(np.arange(1.1, 1.8, 0.1))
#ax2.set_xticklabels([1.1, '', 1.3, '', 1.5, '', 1.7])
ax2.xaxis.set_minor_locator(MultipleLocator(minor))
ax2.xaxis.set_major_locator(MultipleLocator(minor*2))
# Show spectrum range on SED panel
xb, yb = np.array([0, 1, 1, 0, 0]), np.array([0, 0, 1, 1, 0])
ax1.plot(np.log10(xl[0]+xb*(xl[1]-xl[0])), yl[0]+yb*(yl[1]-yl[0]), linestyle=':', color='k', alpha=0.4)
ymax = np.maximum(yl1[1], yl[1]+0.02*(yl[1]-yl[0]))
ax1.set_ylim(-0.1*ymax, ymax)
tick_diff = np.diff(ax1.get_yticks())[0]
ax2.yaxis.set_major_locator(MultipleLocator(tick_diff))
# ax2.set_yticklabels([])
for ax in [ax1, ax2]:
if ax.get_ylim()[0] < 0:
ax.hlines(0, ax.get_xlim()[0], ax.get_xlim()[1], color='k', zorder=-100, alpha=0.3, linestyle='--')
##########
# P(z)
if zfit is not None:
if photometry_pz is not None:
ax3.plot(photometry_pz[0], np.log10(photometry_pz[1]), color=mpl_colors[0])
ax3.plot(zfit['zgrid'], np.log10(zfit['pdf']), color=sns_colors[0])
ax3.fill_between(zfit['zgrid'], np.log10(zfit['pdf']), np.log10(zfit['pdf'])*0-100, color=sns_colors[0], alpha=0.3)
ax3.set_xlim(zfit['zgrid'].min(), zfit['zgrid'].max())
ax3.set_ylim(-3, 2.9) # np.log10(zfit['pdf']).max())
ax3.set_ylabel(r'log $p(z)$')
ax3.set_xlabel(r'$z$')
ax3.grid()
ax1.set_ylabel(r'$f_\lambda$ [$10^{-19}$ erg/s/cm2/A]')
axt = ax2
axt.text(0.95, 0.95, r'$z_\mathrm{grism}$='+'{0:.3f}'.format(tfit['z']), ha='right', va='top', transform=axt.transAxes, color=sns_colors[0], size=10) # , backgroundcolor='w')
if zspec is not None:
axt.text(0.95, 0.89, r'$z_\mathrm{spec}$='+'{0:.3f}'.format(zspec), ha='right', va='top', transform=axt.transAxes, color='r', size=10)
if zfit is not None:
ax3.scatter(zspec, 2.7, color='r', marker='v', zorder=100)
axt.text(0.05, 0.95, '{0}: {1:>6d}'.format(mb.group_name, mb.id), ha='left', va='top', transform=axt.transAxes, color='k', size=10) # , backgroundcolor='w')
# axt.text(0.05, 0.89, '{0:>6d}'.format(mb.id), ha='left', va='top', transform=axt.transAxes, color='k', size=10)#, backgroundcolor='w')
if gs is None:
gs.tight_layout(pad=0.1)
else:
if zfit is not None:
fig.tight_layout(pad=0.1)
else:
fig.tight_layout(pad=0.5)
if save:
fig.savefig('{0}_{1:05d}.sed.{2}'.format(mb.group_name, mb.id, save))
return fig
CDF_SIGMAS = np.linspace(-5, 5, 51)
def compute_cdf_percentiles(fit, cdf_sigmas=CDF_SIGMAS):
"""
Compute tabulated percentiles of the CDF for a (lossy) compressed version
of the redshift PDF.
The `pdf` values from the `fit` table are interpolated onto a fine
(``dz/(1+z) = 0.0001``) redshift grid before the full `cdf` is calculated
and interpolated.
The following shows an example including how to reconstruct the PDF
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from grizli import utils
from grizli.fitting import compute_cdf_percentiles, CDF_SIGMAS
# logarithmic redshift grid, but doesn't matter
zgrid = utils.log_zgrid([0.01, 3.4], 0.001)
# Fake PDF from some Gaussians
peaks = [[1, 0.1], [1.5, 0.4]]
pdf = np.zeros_like(zgrid)
for p in peaks:
pdf += norm.pdf(zgrid, loc=p[0], scale=p[1])/len(peaks)
# Put it in a table
fit = utils.GTable()
fit['zgrid'], fit['pdf'] = zgrid, pdf
cdf_x, cdf_y = compute_cdf_percentiles(fit, cdf_sigmas=CDF_SIGMAS)
# PDF is derivative of CDF
pdf_y = np.gradient(cdf_y)/np.gradient(cdf_x)
fig, ax = plt.subplots(1,1,figsize=(6,4))
ax.plot(zgrid, pdf, label='input PDF')
ax.step(cdf_x, pdf_y, label='compressed from CDF', where='mid', color='0.5')
ax.grid()
ax.legend()
ax.set_xlabel('z')
ax.set_ylabel('p(z)')
Parameters
----------
fit : `~astropy.table.Table`
Table that contains, at a minimum, columns of ``zgrid`` and ``pdf``,
e.g., as output from `grizli.fitting.GroupFitter.xfit_redshift`
cdf_sigmas : array-like
Places to evaluate the CDF, in terms of "sigma" of a Normal (Gaussian)
distribution, i.e.,
>>> import scipy.stats
>>> cdf_y = scipy.stats.norm.cdf(cdf_sigmas)
Returns
-------
cdf_x : array-like, size of `cdf_sigmas`
Redshifts where the CDF values correspond to the values `cdf_y` from
`cdf_sigmas` of a Normal distribution.
cdf_y : array-like
CDF values at `cdf_sigmas`
"""
from scipy.interpolate import Akima1DInterpolator
from scipy.integrate import cumtrapz
import scipy.stats
if cdf_sigmas is None:
cdf_sigmas = CDF_SIGMAS
cdf_y = scipy.stats.norm.cdf(cdf_sigmas)
if len(fit['zgrid']) == 1:
return np.ones_like(cdf_y)*fit['zgrid'][0], cdf_y
spl = Akima1DInterpolator(fit['zgrid'], np.log(fit['pdf']), axis=1)
zrfine = [fit['zgrid'].min(), fit['zgrid'].max()]
zfine = utils.log_zgrid(zr=zrfine, dz=0.0001)
ok = np.isfinite(spl(zfine))
pz_fine = np.exp(spl(zfine))
pz_fine[~ok] = 0
cdf_fine = cumtrapz(pz_fine, x=zfine)
cdf_x = np.interp(cdf_y, cdf_fine/cdf_fine[-1], zfine[1:])
return cdf_x, cdf_y
def make_summary_catalog(target='pg0117+213', sextractor='pg0117+213-f140w.cat', verbose=True, files=None, filter_bandpasses=[], get_sps=False, write_table=True, cdf_sigmas=CDF_SIGMAS):
import glob
import os
from collections import OrderedDict
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.io.fits as pyfits
import numpy as np
import grizli
from grizli import utils
keys = OrderedDict()
keys['PRIMARY'] = ['ID', 'RA', 'DEC', 'NINPUT', 'REDSHIFT', 'T_G102', 'T_G141', 'T_G800L', 'N_G102', 'N_G141', 'N_G800L', 'P_G102', 'P_G141', 'P_G800L', 'NUMLINES', 'HASLINES']
keys['ZFIT_STACK'] = ['CHI2POLY', 'CHI2SPL', 'SPLF01', 'SPLE01',
'SPLF02', 'SPLE02', 'SPLF03', 'SPLE03', 'SPLF04', 'SPLE04',
'HUBERDEL', 'ST_DF', 'ST_LOC', 'ST_SCL', 'AS_EPSF',
'DOF', 'CHIMIN', 'CHIMAX', 'BIC_POLY', 'BIC_SPL', 'BIC_TEMP',
'Z02', 'Z16', 'Z50', 'Z84', 'Z97', 'ZWIDTH1', 'ZWIDTH2',
'ZRMIN', 'ZRMAX', 'Z_MAP', 'Z_RISK', 'MIN_RISK',
'VEL_BL', 'VEL_NL', 'VEL_Z', 'VEL_NFEV', 'VEL_FLAG',
'D4000', 'D4000_E', 'DN4000', 'DN4000_E']
keys['ZFIT_BEAM'] = keys['ZFIT_STACK'].copy()
keys['COVAR'] = ['DLINEID', 'DLINESN']
keys['COVAR'] += ' '.join(['FLUX_{0:03d} ERR_{0:03d} EW50_{0:03d} EWHW_{0:03d}'.format(i) for i in range(64)]).split()
lines = []
pdf_max = []
if files is None:
files = glob.glob('{0}*full.fits'.format(target))
files.sort()
roots = ['_'.join(os.path.basename(file).split('_')[:-1]) for file in files]
template_mags = []
sps_params = []
cdf_array = None
for ii, file in enumerate(files):
print(utils.NO_NEWLINE+file)
line = []
full = pyfits.open(file)
if 'ZFIT_STACK' not in full:
continue
tab = utils.GTable.read(full['ZFIT_STACK'])
pdf_max.append(tab['pdf'].max())
for ext in keys:
if ext not in full:
for k in keys[ext]:
line.append(np.nan)
continue
h = full[ext].header
for k in keys[ext]:
if k in h:
line.append(h[k])
else:
line.append(np.nan)
# SPS params, stellar mass, etc.
if get_sps:
try:
sps = compute_sps_params(full)
except:
sps = {'Lv': np.nan*u.solLum, 'MLv': np.nan*u.solMass/u.solLum, 'MLv_rms': np.nan*u.solMass/u.solLum, 'SFRv': np.nan*u.solMass/u.year, 'SFRv_rms': np.nan*u.solMass/u.year, 'templ': np.nan}
else:
sps = {'Lv': np.nan*u.solLum, 'MLv': np.nan*u.solMass/u.solLum, 'MLv_rms': np.nan*u.solMass/u.solLum, 'SFRv': np.nan*u.solMass/u.year, 'SFRv_rms': np.nan*u.solMass/u.year, 'templ': np.nan}
sps_params.append(sps)
cdf_x, cdf_y = compute_cdf_percentiles(tab,
cdf_sigmas=np.linspace(-5, 5, 51))
if cdf_array is None:
cdf_array = np.zeros((len(files), len(cdf_x)), dtype=np.float32)
cdf_array[ii, :] = cdf_x
lines.append(line)
# Integrate best-fit template through filter bandpasses
if filter_bandpasses:
tfit = utils.GTable.gread(full['TEMPL'])
sp = utils.SpectrumTemplate(wave=tfit['wave'], flux=tfit['full'])
mags = [sp.integrate_filter(bp, abmag=True)
for bp in filter_bandpasses]
template_mags.append(mags)
columns = []
for ext in keys:
if ext == 'ZFIT_BEAM':
columns.extend(['beam_{0}'.format(k) for k in keys[ext]])
else:
columns.extend(keys[ext])
info = utils.GTable(rows=lines, names=columns)
info['PDF_MAX'] = pdf_max
info['CDF_Z'] = cdf_array
info.meta['NCDF'] = cdf_array.shape[1], 'cdf_sigmas = np.linspace(-5, 5, 51)'
root_col = utils.GTable.Column(name='root', data=roots)
info.add_column(root_col, index=0)
for k in ['Lv', 'MLv', 'MLv_rms', 'SFRv', 'SFRv_rms']:
datak = [sps[k].value for sps in sps_params]
info[k] = datak
info[k].unit = sps[k].unit
info['sSFR'] = info['SFRv']/info['MLv']
info['stellar_mass'] = info['Lv']*info['MLv']
info['Lv'].format = '.1e'
info['MLv'].format = '.2f'
info['MLv_rms'].format = '.2f'
info['SFRv'].format = '.1f'
info['SFRv_rms'].format = '.1f'
info['sSFR'].format = '.1e'
info['stellar_mass'].format = '.1e'
if filter_bandpasses:
arr = np.array(template_mags)
for i, bp in enumerate(filter_bandpasses):
info['mag_{0}'.format(bp.name)] = arr[:, i]
info['mag_{0}'.format(bp.name)].format = '.3f'
for c in info.colnames:
info.rename_column(c, c.lower())
# Emission line names
# files=glob.glob('{0}*full.fits'.format(target))
im = pyfits.open(files[0])
h = im['COVAR'].header
for i in range(64):
key = 'FLUX_{0:03d}'.format(i)
if key not in h:
continue
line = h.comments[key].split()[0]
for root in ['flux', 'err', 'ew50', 'ewhw']:
col = '{0}_{1}'.format(root, line)
old_col = '{0}_{1:03d}'.format(root, i)
if old_col in info.colnames:
info.rename_column(old_col, col)
if col not in info.colnames:
continue
if root.startswith('ew'):
info[col].format = '.1f'
else:
info[col].format = '.1f'
if 'err_'+line in info.colnames:
info['sn_{0}'.format(line)] = info['flux_'+line]/info['err_'+line]
info['sn_{0}'.format(line)][info['err_'+line] == 0] = -99
#info['sn_{0}'.format(line)].format = '.1f'
info['chinu'] = info['chimin']/info['dof']
info['chinu'].format = '.2f'
info['bic_diff'] = info['bic_poly'] - info['bic_temp']
info['bic_diff'].format = '.1f'
info['log_risk'] = np.log10(info['min_risk'])
info['log_risk'].format = '.2f'
info['log_pdf_max'] = np.log10(info['pdf_max'])
info['log_pdf_max'].format = '.2f'
info['zq'] = info['log_risk'] - info['log_pdf_max']
info['zq'].format = '.2f'
info['beam_chinu'] = info['beam_chimin']/info['beam_dof']
info['beam_chinu'].format = '.2f'
info['beam_bic_diff'] = info['beam_bic_poly'] - info['beam_bic_temp']
info['beam_bic_diff'].format = '.1f'
info['beam_log_risk'] = np.log10(info['beam_min_risk'])
info['beam_log_risk'].format = '.2f'
info['log_mass'] = np.log10(info['stellar_mass'])
info['log_mass'].format = '.2f'
# ID with link to CDS
idx = ['<a href="http://vizier.u-strasbg.fr/viz-bin/VizieR?-c={0:.6f}+{1:.6f}&-c.rs=2">#{2:05d}</a>'.format(info['ra'][i], info['dec'][i], info['id'][i]) for i in range(len(info))]
info['idx'] = idx
# PNG columns
for ext in ['stack', 'full', 'line']:
png = ['{0}_{1:05d}.{2}.png'.format(root, id, ext) for root, id in zip(info['root'], info['id'])]
info['png_{0}'.format(ext)] = ['<a href={0}><img src={0} height=200></a>'.format(p) for p in png]
# Thumbnails
png = ['../Thumbnails/{0}_{1:05d}.{2}.png'.format(root, id, 'rgb') for root, id in zip(info['root'], info['id'])]
#info['png_{0}'.format('rgb')] = ['<a href={1}><img src={0} height=200></a>'.format(p, p.replace('.rgb.png', '.thumb.fits')) for p in png]
#info['png_{0}'.format('rgb')] = ['<a href={1}><img src={0} onmouseover="this.src=\'{2}\'" onmouseout="this.src=\'{0}\'" height=200></a>'.format(p, p.replace('.rgb.png', '.thumb.png'), p.replace('.rgb.png', '.seg.png')) for p in png]
info['png_{0}'.format('rgb')] = ['<a href={1}><img src={0} onmouseover="this.src = this.src.replace(\'rgb.pn\', \'seg.pn\')" onmouseout="this.src = this.src.replace(\'seg.pn\', \'rgb.pn\')" height=200></a>'.format(p, p.replace('.rgb.png', '.thumb.png'), p.replace('.rgb.png', '.seg.png')) for p in png]
# Column formats
for col in info.colnames:
if col.strip('beam_').startswith('z'):
info[col].format = '.4f'
if col in ['ra', 'dec']:
info[col].format = '.6f'
if ('d4000' in col.lower()) | ('dn4000' in col.lower()):
info[col].format = '.2f'
# Sextractor catalog
if sextractor is None:
if write_table:
info.write('{0}.info.fits'.format(target), overwrite=True)
return info
#sextractor = glob.glob('{0}-f*cat'.format(target))[0]
try:
hcat = grizli.utils.GTable.gread(sextractor) # , format='ascii.sextractor')
except:
hcat = grizli.utils.GTable.gread(sextractor, sextractor=True)
for c in hcat.colnames:
hcat.rename_column(c, c.lower())
idx, dr = hcat.match_to_catalog_sky(info, self_radec=('x_world', 'y_world'), other_radec=None)
for c in hcat.colnames:
info.add_column(hcat[c][idx])
if write_table:
info.write('{0}.info.fits'.format(target), overwrite=True)
return info
def compute_sps_params(full='j021820-051015_01276.full.fits', cosmology=Planck15):
import numpy as np
from astropy.io import fits as pyfits
from astropy.table import Table
import astropy.units as u
from grizli import utils
import pysynphot as S
if isinstance(full, str):
im = pyfits.open(full)
else:
im = full
h = im['TEMPL'].header
templ = Table(im['TEMPL'].data)
z = im['ZFIT_STACK'].header['Z_MAP']
# Get coefffs
coeffs, keys, ix = [], [], []
count = 0
for k in h:
if k.startswith('CNAME'):
if h[k].startswith('fsps'):
ix.append(count)
keys.append(h[k])
coeffs.append(h[k.replace('CNAME', 'CVAL')])
count += 1
cov = im['COVAR'].data[np.array(ix), :][:, np.array(ix)]
covd = cov.diagonal()
# Normalize to V band, fsps_QSF_12_v3
normV = np.array([3.75473763e-15, 2.73797790e-15, 1.89469588e-15,
1.32683449e-15, 9.16760812e-16, 2.43922395e-16, 4.76835746e-15,
3.55616962e-15, 2.43745972e-15, 1.61394625e-15, 1.05358710e-15,
5.23733297e-16])
coeffsV = np.array(coeffs)*normV
rmsV = np.sqrt(covd)*normV
rms_norm = rmsV/coeffsV.sum()
coeffs_norm = coeffsV/coeffsV.sum()
param_file = os.path.join(os.path.dirname(__file__), 'data/templates/fsps/fsps_QSF_12_v3.param.fits')
tab_temp = Table.read(param_file)
temp_MLv = tab_temp['mass']/tab_temp['Lv']
temp_SFRv = tab_temp['sfr']
mass_norm = (coeffs_norm*tab_temp['mass']).sum()*u.solMass
Lv_norm = (coeffs_norm*tab_temp['Lv']).sum()*u.solLum
MLv = mass_norm / Lv_norm
SFR_norm = (coeffs_norm*tab_temp['sfr']).sum()*u.solMass/u.yr
SFRv = SFR_norm / Lv_norm
mass_var = ((rms_norm*tab_temp['mass'])**2).sum()
Lv_var = ((rms_norm*tab_temp['Lv'])**2).sum()
SFR_var = ((rms_norm*tab_temp['sfr'])**2).sum()
MLv_var = MLv**2 * (mass_var/mass_norm.value**2 + Lv_var/Lv_norm.value**2)
MLv_rms = np.sqrt(MLv_var)
SFRv_var = SFRv**2 * (SFR_var/SFR_norm.value**2 + Lv_var/Lv_norm.value**2)
SFRv_rms = np.sqrt(SFRv_var)
vband = S.ObsBandpass('v')
vbandz = S.ArrayBandpass(vband.wave*(1+z), vband.throughput)
best_templ = utils.SpectrumTemplate(templ['wave'], templ['full'])
fnu = best_templ.integrate_filter(vbandz)*(u.erg/u.s/u.cm**2/u.Hz)
dL = cosmology.luminosity_distance(z).to(u.cm)
Lnu = fnu*4*np.pi*dL**2
pivotV = vbandz.pivot()*u.Angstrom
nuV = (const.c/pivotV).to(u.Hz)
Lv = (nuV*Lnu).to(u.L_sun)
mass = MLv*Lv
SFR = SFRv*Lv
sps = {'Lv': Lv, 'MLv': MLv, 'MLv_rms': MLv_rms, 'SFRv': SFRv, 'SFRv_rms': SFRv_rms, 'templ': best_templ}
return sps
def _loss(dz, gamma=0.15):
"""Risk / Loss function, Tanaka et al. (https://arxiv.org/abs/1704.05988)
Parameters
----------
gamma : float
Returns
-------
loss : float
"""
return 1-1/(1+(dz/gamma)**2)
def refit_beams(root='j012017+213343', append='x', id=708, keep_dict={'G141': [201, 291]}, poly_order=3, make_products=True, run_fit=True, **kwargs):
"""
Regenerate a MultiBeam object selecting only certiain PAs
Parameters
----------
root : str
Root of the "beams.fits" file to load.
append : str
String to append to the rootname of the updated products.
id : int
Object ID. The input filename is built like
>>> beams_file = f'{root}_{id:05d}.beams.fits'
keep_dict : bool
Dictionary of the PAs/grisms to keep. (See the
`grizli.multifit.MultiBeam.PA` attribute.)
poly_order : int
Order of the polynomial to fit.
make_products : bool
Make stacked spectra and diagnostic figures.
run_fit : bool
Run the redshift fit on the new products
kwargs : dict
Optional keywords passed to `~grizli.fitting.run_all_parallel`.
Returns
-------
mb : `~grizli.multifit.MultiBeam`
New beam object.
"""
import numpy as np
try:
from grizli import utils, fitting, multifit
except:
from . import utils, fitting, multifit
MultiBeam = multifit.MultiBeam
mb = MultiBeam('{0}_{1:05d}.beams.fits'.format(root, id), group_name=root)
keep_beams = []
for g in keep_dict:
if g not in mb.PA:
continue
for pa in keep_dict[g]:
if float(pa) in mb.PA[g]:
keep_beams.extend([mb.beams[i] for i in mb.PA[g][float(pa)]])
mb = MultiBeam(keep_beams, group_name=root+append)
mb.write_master_fits()
if not make_products:
return mb
wave = np.linspace(2000, 2.5e4, 100)
poly_templates = utils.polynomial_templates(wave, order=poly_order)
pfit = mb.template_at_z(z=0, templates=poly_templates, fit_background=True, fitter='lstsq', get_uncertainties=2)
try:
fig1 = mb.oned_figure(figsize=[5, 3], tfit=pfit, loglam_1d=True)
fig1.savefig('{0}_{1:05d}.1D.png'.format(root+append, id))
except:
pass
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.5, flambda=False, kernel='point', size=32, zfit=pfit)
fig.savefig('{0}_{1:05d}.stack.png'.format(root+append, id))
if run_fit:
fitting.run_all_parallel(id, group_name=root+append, root=root+'x', verbose=True, **kwargs)
return mb
class GroupFitter(object):
"""Base class for `~grizli.stack.StackFitter` and `~grizli.multifit.MultiBeam` spectrum fitting objects
"""
def _get_slices(self, masked=False):
"""Precompute array slices for how the individual components map into the single combined arrays.
Parameters
----------
masked : bool
Return indices of masked arrays rather than simple slices of the
full beams.
Returns
-------
slices : list
List of slices.
"""
x = 0
slices = []
# use masked index arrays rather than slices
if masked:
for i in range(self.N):
beam = self.beams[i]
if beam.fit_mask.sum() == 0:
slices.append(None)
continue
idx = np.arange(beam.fit_mask.sum())+x
slices.append(idx) # [slice(x+0, x+beam.size)][beam.fit_mask])
x = idx[-1]+1
else:
for i in range(self.N):
slices.append(slice(x+0, x+self.beams[i].size))
x += self.beams[i].size
return slices
def _update_beam_mask(self):
"""
Compute versions of the masked arrays
"""
for ib, b in enumerate(self.beams):
b.fit_mask &= self.fit_mask[self.slices[ib]]
self.mslices = self._get_slices(masked=True)
self.Nmask = self.fit_mask.sum()
if hasattr(self, 'Nphot'):
self.Nspec = self.Nmask - self.Nphot
else:
self.Nspec = self.Nmask
def _init_background(self, masked=True):
"""Initialize the (flat) background model components
Parameters
----------
masked : bool
If true, output array has size of unmasked pixels
Returns
-------
A_bg : `~numpy.ndarray`
Array with dimensions (``N``, ``Nmask``) (masked=True) or
(``N``, ``Ntot``) (masked=False) for fitting a background
component
"""
if masked:
A_bg = np.zeros((self.N, self.Nmask))
for i in range(self.N):
A_bg[i, self.mslices[i]] = 1.
else:
A_bg = np.zeros((self.N, self.Ntot))
for i in range(self.N):
A_bg[i, self.slices[i]] = 1.
return A_bg
def get_SDSS_photometry(self, bands='ugriz', templ=None, radius=2, SDSS_CATALOG='V/147/sdss12', get_panstarrs=False):
"""
Try too get SDSS photometry from `astroquery`
(developmental)
"""
#from astroquery.sdss import SDSS
#from astropy import coordinates as coords
import astropy.units as u
from astroquery.vizier import Vizier
import astropy.coordinates as coord
import pysynphot as S
from eazy.templates import Template
from eazy.filters import FilterFile
from eazy.photoz import TemplateGrid
from eazy.filters import FilterDefinition
if get_panstarrs:
SDSS_CATALOG = 'II/349'
bands = 'grizy'
from astroquery.vizier import Vizier
import astropy.units as u
import astropy.coordinates as coord
coo = coord.SkyCoord(ra=self.ra, dec=self.dec, unit=(u.deg, u.deg),
frame='icrs')
v = Vizier(catalog=SDSS_CATALOG, columns=['+_r', '*'])
try:
tab = v.query_region(coo, radius="{0}s".format(radius),
catalog=SDSS_CATALOG)[0]
ix = np.argmin(tab['rmag'])
tab = tab[ix]
except:
return None
filters = [FilterDefinition(bp=S.ObsBandpass('sdss,{0}'.format(b))) for b in bands]
pivot = {}
for ib, b in enumerate(bands):
pivot[b] = filters[ib].pivot
to_flam = 10**(-0.4*(48.6))*3.e18 # / pivot(Ang)**2
flam = np.array([10**(-0.4*(tab[b+'mag']))*to_flam/pivot[b]**2 for ib, b in enumerate(bands)])
eflam = np.array([tab['e_{0}mag'.format(b)]*np.log(10)/2.5*flam[ib] for ib, b in enumerate(bands)])
phot = {'flam': flam, 'eflam': eflam, 'filters': filters, 'tempfilt': None}
if templ is None:
return phot
# Make fast SDSS template grid
templates = [Template(arrays=[templ[t].wave, templ[t].flux], name=t) for t in templ]
zgrid = utils.log_zgrid(zr=[0.01, 3.4], dz=0.005)
tempfilt = TemplateGrid(zgrid, templates, filters=filters, add_igm=True, galactic_ebv=0, Eb=0, n_proc=0)
#filters = [all_filters.filters[f-1] for f in [156,157,158,159,160]]
phot = {'flam': flam, 'eflam': eflam, 'filters': filters, 'tempfilt': tempfilt}
return phot
# Vizier
def set_photometry(self, flam=[], eflam=[], filters=[], ext_corr=1, lc=None, force=False, tempfilt=None, min_err=0.02, TEF=None, pz=None, source='unknown', **kwargs):
"""
Set photometry attributes
Parameters
----------
flam, eflam : array-like
Flux densities and uncertainties in f-lambda cgs units
filters : list
List of `~eazy.filters.FilterDefinition` objects
ext_corr : float or array-like
MW extinction correction
lc : array-like
Precomputed filter central wavelengths. Will automatically
be computed from `filters` if not specified
force : bool
Don't try to set if already specified (`Nphot` > 0)
tempfilt : `eazy.photoz.TemplateGrid`
Precomputed grid of templates integrated through the `filters`
bandpasses
min_err : float
minimum or systematic error to add in quadrature to `eflam`
TEF : `eazy.templates.TemplateError`
Template error function
pz : None, (array, array)
Precomputed (z, pz) pdf from, e.g., `eazy`
source : str
String to indicate the provenance of the photometry
Returns
-------
photom_flam : array_like
Flux densities from `flam`
photom_eflam : array-like
Uncertainties including `min_err`
photom_filters : list
`filters`
Nphot : int
Number of photometry bandpasses
The returned parameters above are not returned but are rather
attributes that are set. This function also updates the
`sivarf`, `weightf`, `fit_mask` attributes to include the
spectra + photometry
"""
if (self.Nphot > 0) & (not force):
print('Photometry already set (Nphot={0})'.format(self.Nphot))
return True
okphot = (eflam > 0) & np.isfinite(eflam) & np.isfinite(flam)
self.Nphot = okphot.sum() # len(flam)
self.Nphotbands = len(eflam)
if self.Nphot == 0:
return True
if (len(flam) != len(eflam)) | (len(flam) != len(filters)):
print('flam/eflam/filters dimensions don\'t match')
return False
self.photom_flam = flam*1
self.photom_eflam = np.sqrt(eflam**2+(min_err*flam)**2)
self.photom_flam[~okphot] = -99
self.photom_eflam[~okphot] = -99
self.photom_filters = filters
self.photom_source = source
self.photom_ext_corr = ext_corr
self.sivarf = np.hstack([self.sivarf, 1/self.photom_eflam])
self.weightf = np.hstack([self.weightf, np.ones_like(self.photom_eflam)])
self.fit_mask = np.hstack([self.fit_mask, okphot])
self.fit_mask &= self.weightf > 0
#self.flat_flam = np.hstack((self.flat_flam, self.photom_eflam*0.))
# Mask for just spectra
self.fit_mask_spec = self.fit_mask & True
self.fit_mask_spec[-self.Nphotbands:] = False
self.Nmask = self.fit_mask.sum()
self.Nspec = self.Nmask - self.Nphot
self.scif = np.hstack((self.scif, flam))
self.idf = np.hstack((self.idf, flam*0-1))
self.DoF = int((self.weightf*self.fit_mask).sum())
self.is_spec = np.isfinite(self.scif)
self.is_spec[-len(flam):] = False
self.photom_pivot = np.array([filter.pivot for filter in filters])
self.wavef = np.hstack((self.wavef, self.photom_pivot))
# eazypy tempfilt for faster interpolation
self.tempfilt = tempfilt
self.TEF = TEF
def unset_photometry(self):
"""
Unset photometry-related attributes
"""
if self.Nphot == 0:
return True
Nbands = self.Nphotbands
self.sivarf = self.sivarf[:-Nbands]
self.weightf = self.weightf[:-Nbands]
#self.flat_flam = self.flat_flam[:-Nbands]
self.fit_mask = self.fit_mask[:-Nbands]
self.fit_mask &= self.weightf > 0
self.fit_mask_spec = self.fit_mask & True
self.scif = self.scif[:-Nbands]
self.idf = self.idf[:-Nbands]
self.wavef = self.wavef[:-Nbands]
self.DoF = int((self.weightf*self.fit_mask).sum())
self.is_spec = 1
self.Nphot = 0
self.Nphotbands = 0
self.Nmask = self.fit_mask.sum()
self.Nspec = self.Nmask - self.Nphot
self.tempfilt = None
def _interpolate_photometry(self, z=0., templates=[]):
"""
Interpolate templates through photometric filters
xx: TBD better handling of emission line templates and use eazpy tempfilt
object for huge speedup
"""
NTEMP = len(templates)
A_phot = np.zeros((NTEMP+self.N, len(self.photom_flam)))
mask = self.photom_eflam > 0
if (self.tempfilt is not None):
if (self.tempfilt.NTEMP == NTEMP):
A_phot[self.N:, :] = self.tempfilt(z)
A_phot *= 3.e18/self.photom_pivot**2*(1+z)
A_phot[~np.isfinite(A_phot)] = 0
return A_phot[:, mask]
for it, key in enumerate(templates):
tz = templates[key].zscale(z, scalar=1)
for ifilt, filt in enumerate(self.photom_filters):
A_phot[self.N+it, ifilt] = tz.integrate_filter(filt)*3.e18/self.photom_pivot[ifilt]**2
return A_phot[:, mask]
def xfit_at_z(self, z=0, templates=[], fitter='nnls', fit_background=True, get_uncertainties=False, get_design_matrix=False, pscale=None, COEFF_SCALE=1.e-19, get_components=False, huber_delta=4, get_residuals=False, include_photometry=True, use_cached_templates=False, bounded_kwargs=BOUNDED_DEFAULTS, apply_sensitivity=True):
"""Fit the 2D spectra with a set of templates at a specified redshift.
Parameters
----------
z : float
Redshift.
templates : list
List of templates to fit.
fitter : str
Minimization algorithm to compute template coefficients.
Available options are:
- 'nnls', Non-negative least squares (`scipy.optimize.nnls`)
- 'lstsq', Standard least squares (`numpy.linalg.lstsq`)
- 'bounded', Bounded least squares (`scipy.optimize.lsq_linear`)
For the last option, the line flux limits are set by the limits in
the global `grizli.fitting.LINE_BOUNDS` list and `bounded_kwargs`
are passed to `~scipy.optimize.lsq_linear`.
fit_background : bool
Fit additive pedestal background offset.
get_uncertainties : bool, int
Compute coefficient uncertainties from the covariance matrix.
If specified as an int > 1, then the covariance matrix is
computed only for templates with non-zero coefficients
get_design_matrix : bool
Return design matrix and data, rather than nominal outputs.
huber_delta : float
Use the Huber loss function (`scipy.special.huber`) rather than
direct chi-squared. If ``huber_delta < 0``, then fall back to
chi-squared.
Returns
-------
chi2 : float
Chi-squared of the fit
coeffs, coeffs_err : `numpy.ndarray`
Template coefficients and uncertainties.
covariance : `numpy.ndarray`
Full covariance
"""
import scipy.optimize
#import scipy.sparse
from scipy.special import huber
NTEMP = len(templates)
if (self.Nphot > 0) & include_photometry:
A = np.zeros((self.N+NTEMP, self.Nmask))
else:
A = np.zeros((self.N+NTEMP, self.Nspec))
if fit_background:
A[:self.N, :self.Nspec] = self.A_bgm
lower_bound = np.zeros(self.N+NTEMP)
upper_bound = np.ones(self.N+NTEMP)*np.inf
# Background limits
lower_bound[:self.N] = -0.05
upper_bound[:self.N] = 0.05
# A = scipy.sparse.csr_matrix((self.N+NTEMP, self.Ntot))
# bg_sp = scipy.sparse.csc_matrix(self.A_bg)
try:
obj_IGM_MINZ = np.maximum(IGM_MINZ,
(self.wave_mask.min()-200)/1216.-1)
except:
obj_IGM_MINZ = np.maximum(IGM_MINZ,
(self.wavef.min()-200)/1216.-1)
# compute IGM directly for spectrum wavelengths
if use_cached_templates & ('spline' not in fitter):
if z > obj_IGM_MINZ:
if IGM is None:
wigmz = 1.
else:
wavem = self.wavef[self.fit_mask]
lylim = wavem/(1+z) < 1250
wigmz = np.ones_like(wavem)
wigmz[lylim] = IGM.full_IGM(z, wavem[lylim])
#print('Use z-igm')
else:
wigmz = 1.
else:
wigmz = 1.
# Cached first
for i, t in enumerate(templates):
if use_cached_templates:
if t in self.Asave:
#print('\n\nUse saved: ',t)
A[self.N+i, :] += self.Asave[t]*wigmz
for i, t in enumerate(templates):
if use_cached_templates:
if t in self.Asave:
continue
if t.startswith('line'):
lower_bound[self.N+i] = LINE_BOUNDS[0]/COEFF_SCALE
upper_bound[self.N+i] = LINE_BOUNDS[1]/COEFF_SCALE
ti = templates[t]
rest_template = ti.name.split()[0] in ['bspl', 'step', 'poly']
if z > obj_IGM_MINZ:
if IGM is None:
igmz = 1.
else:
lylim = ti.wave < 1250
igmz = np.ones_like(ti.wave)
igmz[lylim] = IGM.full_IGM(z, ti.wave[lylim]*(1+z))
else:
igmz = 1.
# Don't redshift spline templates
if rest_template:
s = [ti.wave, ti.flux]
else:
if hasattr(ti, 'flux_flam'):
# eazy-py Template object
s = [ti.wave*(1+z), ti.flux_flam(z=z)/(1+z)*igmz]
else:
s = [ti.wave*(1+z), ti.flux/(1+z)*igmz]
for j, beam in enumerate(self.beams):
mask_i = beam.fit_mask.reshape(beam.sh)
clip = mask_i.sum(axis=0) > 0
if clip.sum() == 0:
continue
lam_beam = beam.wave[clip]
if ((s[0].min() > lam_beam.max()) |
(s[0].max() < lam_beam.min())):
continue
sl = self.mslices[j]
if t in beam.thumbs:
#print('Use thumbnail!', t)
A[self.N+i, sl] = beam.compute_model(thumb=beam.thumbs[t], spectrum_1d=s, in_place=False, is_cgs=True, apply_sensitivity=apply_sensitivity)[beam.fit_mask]*COEFF_SCALE
else:
A[self.N+i, sl] = beam.compute_model(spectrum_1d=s, in_place=False, is_cgs=True, apply_sensitivity=apply_sensitivity)[beam.fit_mask]*COEFF_SCALE
# Multiply spline templates by single continuum template
if ('spline' in t) & ('spline' in fitter):
ma = None
for k, t_i in enumerate(templates):
if t_i in self.Asave:
ma = A[self.N+k, :].sum()
ma = ma if ma > 0 else 1
ma = 1
try:
A[self.N+k, :] *= A[self.N+i, :]/self._A*COEFF_SCALE # COEFF_SCALE
print('Mult _A')
except:
A[self.N+k, :] *= A[self.N+i, :]/ma # COEFF_SCALE
templates[t_i].max_norm = ma
# print('spline, set to zero: ', t)
if ma is not None:
A[self.N+i, :] *= 0
# Save step templates for faster computation
if rest_template and use_cached_templates:
print('Cache rest-frame template: ', t)
self.Asave[t] = A[self.N+i, :]*1
# if j == 0:
# m = beam.compute_model(spectrum_1d=s, in_place=False, is_cgs=True)
# ds9.frame(i)
# ds9.view(m.reshape(beam.sh))
if fit_background:
if fitter.split()[0] in ['nnls', 'lstsq']:
pedestal = 0.04
else:
pedestal = 0.
else:
pedestal = 0
#oktemp = (A*self.fit_mask).sum(axis=1) != 0
oktemp = A.sum(axis=1) != 0
# Photometry
if (self.Nphot > 0):
if include_photometry:
A_phot = self._interpolate_photometry(z=z,
templates=templates)
A[:, -self.Nphot:] = A_phot*COEFF_SCALE # np.hstack((A, A_phot))
full_fit_mask = self.fit_mask
else:
full_fit_mask = self.fit_mask_spec
else:
full_fit_mask = self.fit_mask
# Weight design matrix and data by 1/sigma
#Ax = A[oktemp,:]*self.sivarf[full_fit_mask]
# Include `weight` variable to account for contamination
sivarf = self.sivarf*np.sqrt(self.weightf)
Ax = A[oktemp, :]*sivarf[full_fit_mask]
#AxT = Ax[:,full_fit_mask].T
# Scale photometry
if hasattr(self, 'pscale'):
if (self.pscale is not None):
scale = self.compute_scale_array(self.pscale, self.wavef[full_fit_mask])
if self.Nphot > 0:
scale[-self.Nphot:] = 1.
Ax *= scale
if fit_background:
for i in range(self.N):
Ax[i, :] /= scale
# Need transpose
AxT = Ax.T
# Masked data array, including background pedestal
data = ((self.scif+pedestal*self.is_spec)*sivarf)[full_fit_mask]
if get_design_matrix:
return AxT, data
# Run the minimization
if fitter.split()[0] == 'nnls':
coeffs_i, rnorm = scipy.optimize.nnls(AxT, data)
elif fitter.split()[0] == 'lstsq':
coeffs_i, residuals, rank, s = np.linalg.lstsq(AxT, data,
rcond=utils.LSTSQ_RCOND)
else:
# Bounded Least Squares
func = scipy.optimize.lsq_linear
bounds = (lower_bound[oktemp], upper_bound[oktemp])
lsq_out = func(AxT, data, bounds=bounds, **bounded_kwargs)
coeffs_i = lsq_out.x
if False:
r = AxT.dot(coeffs_i) - data
# Compute background array
if fit_background:
background = np.dot(coeffs_i[:self.N], A[:self.N, :]) - pedestal
if self.Nphot > 0:
background[-self.Nphot:] = 0.
coeffs_i[:self.N] -= pedestal
else:
background = self.scif[full_fit_mask]*0.
# Full model
if fit_background:
model = np.dot(coeffs_i[self.N:], Ax[self.N:, :]/sivarf[full_fit_mask])
else:
model = np.dot(coeffs_i, Ax/sivarf[full_fit_mask])
# Model photometry
if self.Nphot > 0:
self.photom_model = model[-self.Nphot:]*1
# Residuals and Chi-squared
resid = self.scif[full_fit_mask] - model - background
if get_components:
return model, background
#chi2 = np.sum(resid[full_fit_mask]**2*self.sivarf[full_fit_mask]**2)
norm_resid = resid*(sivarf)[full_fit_mask]
# Use Huber loss function rather than direct chi2
if get_residuals:
chi2 = norm_resid
else:
if huber_delta > 0:
chi2 = huber(huber_delta, norm_resid)*2.
else:
chi2 = norm_resid**2
chi2 = np.sum(chi2)
# Uncertainties from covariance matrix
if get_uncertainties:
try:
# Covariance is inverse of AT.A
#covar_i = np.matrix(np.dot(AxT.T, AxT)).I.A
covar_i = utils.safe_invert(np.dot(AxT.T, AxT))
covar = utils.fill_masked_covar(covar_i, oktemp)
covard = np.sqrt(covar.diagonal())
# Compute covariances after masking templates with coeffs = 0
if get_uncertainties == 2:
nonzero = coeffs_i != 0
if nonzero.sum() > 0:
AxTm = AxT[:, nonzero]
#mcoeffs_i, rnorm = scipy.optimize.nnls(AxTm, data)
#mcoeffs_i[:self.N] -= pedestal
#mcovar_i = np.matrix(np.dot(AxTm.T, AxTm)).I.A
mcovar_i = utils.safe_invert(np.dot(AxTm.T, AxTm))
mcovar = utils.fill_masked_covar(mcovar_i, nonzero)
mcovar = utils.fill_masked_covar(mcovar, oktemp)
mcovard = np.sqrt(mcovar.diagonal())
covar = mcovar
covard = mcovard
except:
print('Except: covar!')
covar = np.zeros((self.N+NTEMP, self.N+NTEMP))
covard = np.zeros(self.N+NTEMP) # -1.
mcovard = covard
else:
covar = np.zeros((self.N+NTEMP, self.N+NTEMP))
covard = np.zeros(self.N+NTEMP) # -1.
coeffs = np.zeros(self.N+NTEMP)
coeffs[oktemp] = coeffs_i # [self.N:]] = coeffs[self.N:]
coeffs_err = covard # np.zeros(NTEMP)
#full_coeffs_err[oktemp[self.N:]] = covard[self.N:]
del(A)
del(Ax)
del(AxT)
# if fit_background:
coeffs[self.N:] *= COEFF_SCALE
coeffs_err[self.N:] *= COEFF_SCALE
#covar[self.N:,self.N:] *= COEFF_SCALE**2
covar[self.N:, :] *= COEFF_SCALE
covar[:, self.N:] *= COEFF_SCALE
return chi2, coeffs, coeffs_err, covar
def xfit_redshift(self, prior=None,
templates={},
fwhm=1200, line_complexes=True, fsps_templates=False,
zr=[0.65, 1.6], dz=[0.005, 0.0004], zoom=True,
verbose=True, fit_background=True,
fitter='nnls', bounded_kwargs=BOUNDED_DEFAULTS,
delta_chi2_threshold=0.004,
poly_order=3,
make_figure=True, figsize=[8, 5],
use_cached_templates=True,
get_uncertainties=True,
Rspline=30, huber_delta=4, get_student_logpdf=False):
"""
Two-step procedure for fitting redshifts
1. polynomial, spline template fits
2. redshift grids
3. ...
Parameters
----------
prior : None, (array, array)
Redshift prior (z, pz). Will be interpolated to the redshift
fit grid
templates : dict
Dictionary the `~grizli.utils.SpectrumTemplate` objects to use
for the fits
fwhm, line_complexes, fsps_templates : float, bool, bool
Parameters passed to `~grizli.utils.utils.load_templates` if
`templates` is empty.
make_figure, fig_size : bool, (float, float)
Make the diagnostic figure with dimensions `fig_size`
zr : (float, float)
Redshift limits of the logarithmic (1+z) redshift grid
dz : (float, float)
Step size of the grid. The second value will be used to "zoom in"
on the peaks found in the coarse grid step from the first value.
zoom : bool
Do the second pass with the `dz[1]` step size
verbose : bool
Some verbosity control
fit_background : bool
Include contribution of additive background
fitter, bounded_kwargs : str, dict
Least-squares optimization method. See
`~grizli.fitting.GroupFitter.xfit_at_z`
delta_chi2_threshold : float
*Not used*
poly_order : int
Order of polynomials for the "uninformative" polynomial fit.
The parameters of the polynomial and full template fits are
computed to evaluate the extent to which the galaxy / stellar
templates improve the fit
Rspline : float
Spectral resolution, ``R``, of spline templates for another
"uninformative" fit.
use_cached_templates : bool
Try to used cached versions of dispersed template models for
templates that don't depend on redshift (polynomials, splines)
get_uncertainties : bool
Get template fit coefficient uncertainties from the fit
covariance matrix
huber_delta : float
Parameter for Huber loss function (see
`~grizli.fitting.GroupFitter.xfit_at_z`)
get_student_logpdf : bool
Get logpdf for likelihood assuming Student-t distribution rather
than standard normal assumption
Returns
-------
fit : `~astropy.table.Table`
Table with fit information on the redshift grid and metadata
on some fit characteristics.
**Table metadata**
+----------------+-----------------------------------------------+
| Meta | Description |
+================+===============================================+
| N | Number of spectrum extensions / beams |
+----------------+-----------------------------------------------+
| polyord | Order of the polynomial fit |
+----------------+-----------------------------------------------+
| chi2poly | :math:`\chi^2` of the polynomial fit |
+----------------+-----------------------------------------------+
| chi2spl | :math:`\chi^2` of the spline fit |
+----------------+-----------------------------------------------+
| Rspline | Spectral resolution of the spline templates |
+----------------+-----------------------------------------------+
| kspl | Effective number of parameters of spline fit |
+----------------+-----------------------------------------------+
| huberdel | `huber_delta` |
+----------------+-----------------------------------------------+
| `splf[i]` | Flux of spline fit at fixed wavelengths |
+----------------+-----------------------------------------------+
| `sple[i]` | Unc. of spline fit at fixed wavelengths |
+----------------+-----------------------------------------------+
| NTEMP | Number of `templates` |
+----------------+-----------------------------------------------+
| DoF | Degrees of freedom of the fit |
| | (total number of unmasked pixels in all |
| | 2D beams) |
+----------------+-----------------------------------------------+
| ktempl | N parameters of the template fit |
+----------------+-----------------------------------------------+
| chimin | Minimum :math:`\chi^2` of the template fit |
+----------------+-----------------------------------------------+
| chimax | Maximum :math:`\chi^2` of the template fit |
+----------------+-----------------------------------------------+
| `fitter` | Least squares method |
+----------------+-----------------------------------------------+
| as_epsf | Fit was done as `~grizli.utils.EffectivePSF` |
+----------------+-----------------------------------------------+
| bic_poly | Bayesian Information Criterion (BIC) of |
| | the **polynomial** fit. |
| | ``BIC = log(DoF)*k + min(chi2) + C`` |
+----------------+-----------------------------------------------+
| bic_spl | BIC of the **spline** fit |
+----------------+-----------------------------------------------+
| bic_temp | BIC of the **template** (redshift) fit |
+----------------+-----------------------------------------------+
| st_df | Student-`~scipy.stats.t` df of spline fit |
+----------------+-----------------------------------------------+
| st_loc | Student-`~scipy.stats.t` loc of spline fit |
+----------------+-----------------------------------------------+
| st_scl | Student-`~scipy.stats.t` scale of spline fit |
+----------------+-----------------------------------------------+
| Z02 | Integrated `cdf(<z) = 0.025` |
+----------------+-----------------------------------------------+
| Z16 | Integrated `cdf(<z) = 0.16` |
+----------------+-----------------------------------------------+
| Z50 | Integrated `cdf(<z) = 0.50` |
+----------------+-----------------------------------------------+
| Z84 | Integrated `cdf(<z) = 0.84` |
+----------------+-----------------------------------------------+
| Z97 | Integrated `cdf(<z) = 0.975` |
+----------------+-----------------------------------------------+
| ZWIDTH1 | ``Z84 - Z16`` |
+----------------+-----------------------------------------------+
| ZWIDTH2 | ``Z97 - Z02`` |
+----------------+-----------------------------------------------+
| z_map | Redshift at ``Max(PDF)`` |
+----------------+-----------------------------------------------+
| zrmin | Start of the redshift grid `zr` |
+----------------+-----------------------------------------------+
| zrmax | End of the redshift grid `zr` |
+----------------+-----------------------------------------------+
| z_risk | Redshift at minimum ``risk`` |
+----------------+-----------------------------------------------+
| min_risk | Minimum ``risk`` |
+----------------+-----------------------------------------------+
| gam_loss | ``gamma`` parameter of ``risk`` |
+----------------+-----------------------------------------------+
+----------------+-----------------------------------------------+
| Column | Description |
+================+===============================================+
| zgrid | Redshift grid ``NZ`` |
+----------------+-----------------------------------------------+
| chi2 | :math:`\chi^2(z)` ``NZ`` |
+----------------+-----------------------------------------------+
| student_logpdf | log PDF of student-t likelihood ``NZ`` |
+----------------+-----------------------------------------------+
| coeffs | Template coefficients ``(NZ,NTEMP)`` |
+----------------+-----------------------------------------------+
| covar | Template covariance ``(NZ,NTEMP,NTEMP)`` |
+----------------+-----------------------------------------------+
| pdf | Full likelihood including optional `prior` |
+----------------+-----------------------------------------------+
| risk | "Risk" parameter from |
| | Tanaka et al. (arXiv/1704.05988) |
+----------------+-----------------------------------------------+
"""
from numpy import polyfit, polyval
from scipy.stats import t as student_t
from scipy.special import huber
import peakutils
if isinstance(zr, int):
if zr == 0:
stars = True
zr = [0, 0.01]
fitter = 'nnls'
else:
stars = False
if len(zr) == 1:
zgrid = np.array([zr[0]])
zoom = False
elif len(zr) == 3:
# Range from zr[0] += zr[1]*zr[2]*(1+zr[0]) by zr[1]*(1+zr[0])
step = zr[1]*zr[2]*(1+zr[0])
zgrid = utils.log_zgrid(zr[0] + np.array([-1, 1])*step, dz=zr[1])
zoom = False
else:
zgrid = utils.log_zgrid(zr, dz=dz[0])
NZ = len(zgrid)
############
# Polynomial template fit
wpoly = np.linspace(1000, 5.2e4, 1000)
tpoly = utils.polynomial_templates(wpoly, order=poly_order,
line=False)
out = self.xfit_at_z(z=0., templates=tpoly, fitter='lstsq',
fit_background=True, get_uncertainties=False,
include_photometry=False, huber_delta=huber_delta,
use_cached_templates=False)
chi2_poly, coeffs_poly, err_poly, cov = out
###########
# Spline template fit
wspline = np.arange(4200, 2.5e4)
df_spl = len(utils.log_zgrid(zr=[wspline[0], wspline[-1]],
dz=1./Rspline))
tspline = utils.bspline_templates(wspline, df=df_spl+2, log=True,
clip=0.0001)
out = self.xfit_at_z(z=0., templates=tspline, fitter='lstsq',
fit_background=True, get_uncertainties=True,
include_photometry=False, get_residuals=True,
use_cached_templates=False)
spline_resid, coeffs_spline, err_spline, cov = out
if huber_delta > 0:
chi2_spline = (huber(huber_delta, spline_resid)*2.).sum()
else:
chi2_spline = (spline_resid**2).sum()
student_t_pars = student_t.fit(spline_resid)
# Set up for template fit
if templates == {}:
templates = utils.load_templates(fwhm=fwhm, stars=stars,
line_complexes=line_complexes,
fsps_templates=fsps_templates)
else:
if verbose:
print('User templates! N={0} \n'.format(len(templates)))
NTEMP = len(templates)
out = self.xfit_at_z(z=0., templates=templates, fitter=fitter,
fit_background=fit_background,
get_uncertainties=False,
use_cached_templates=use_cached_templates)
chi2, coeffs, coeffs_err, covar = out
# Set up arrays
chi2 = np.zeros(NZ)
logpdf = np.zeros(NZ)
coeffs = np.zeros((NZ, coeffs.shape[0]))
covar = np.zeros((NZ, covar.shape[0], covar.shape[1]))
chi2min = 1e30
iz = 0
# Now run the fit on the redshift grid
for i in range(NZ):
out = self.xfit_at_z(z=zgrid[i], templates=templates,
fitter=fitter, fit_background=fit_background,
get_uncertainties=get_uncertainties,
get_residuals=True,
use_cached_templates=use_cached_templates,
bounded_kwargs=bounded_kwargs)
fit_resid, coeffs[i, :], coeffs_err, covar[i, :, :] = out
if huber_delta > 0:
chi2[i] = (huber(huber_delta, fit_resid)*2.).sum()
else:
chi2[i] = (fit_resid**2).sum()
if get_student_logpdf:
logpdf[i] = student_t.logpdf(fit_resid, *student_t_pars).sum()
if chi2[i] < chi2min:
iz = i
chi2min = chi2[i]
if verbose:
line = ' {0:.4f} {1:9.1f} ({2:.4f}) {3:d}/{4:d}'
print(utils.NO_NEWLINE +
line.format(zgrid[i], chi2[i], zgrid[iz], i+1, NZ))
if verbose:
print('First iteration: z_best={0:.4f}\n'.format(zgrid[iz]))
##########
# Find peaks
# Make "negative" chi2 for peak-finding
chi2_test = chi2_spline
# Find peaks including the prior, if specified
if prior is not None:
pzi = np.interp(zgrid, prior[0], prior[1], left=0, right=0)
pzi /= np.maximum(np.trapz(pzi, zgrid), 1.e-10)
logpz = np.log(pzi)
chi2_i = chi2 - 2*logpz
else:
chi2_i = chi2
if chi2_test > (chi2_i.min()+100):
chi2_rev = (chi2_i.min() + 100 - chi2_i)/self.DoF
elif chi2_test < (chi2_i.min() + 9):
chi2_rev = (chi2_i.min() + 16 - chi2_i)/self.DoF
else:
chi2_rev = (chi2_test - chi2_i)/self.DoF
if len(zgrid) > 1:
chi2_rev[chi2_rev < 0] = 0
indexes = peakutils.indexes(chi2_rev, thres=0.4, min_dist=8)
num_peaks = len(indexes)
so = np.argsort(chi2_rev[indexes])
indexes = indexes[so[::-1]]
else:
num_peaks = 1
zoom = False
max_peaks = 3
######
# Now zoom in on the peaks found in the first iteration
# delta_chi2 = (chi2.max()-chi2.min())/self.DoF
# if delta_chi2 > delta_chi2_threshold:
if (num_peaks > 0) & (not stars) & zoom & (len(dz) > 1):
zgrid_zoom = []
for ix in indexes[:max_peaks]:
if (ix > 0) & (ix < len(chi2)-1):
c = polyfit(zgrid[ix-1:ix+2], chi2[ix-1:ix+2], 2)
zi = -c[1]/(2*c[0])
chi_i = polyval(c, zi)
zgrid_zoom.extend(np.arange(zi-2*dz[0],
zi+2*dz[0]+dz[1]/10., dz[1]))
NZOOM = len(zgrid_zoom)
chi2_zoom = np.zeros(NZOOM)
logpdf_zoom = np.zeros(NZOOM)
coeffs_zoom = np.zeros((NZOOM, coeffs.shape[1]))
covar_zoom = np.zeros((NZOOM, coeffs.shape[1], covar.shape[2]))
iz = 0
chi2min = 1.e30
for i in range(NZOOM):
out = self.xfit_at_z(z=zgrid_zoom[i], templates=templates,
fitter=fitter,
fit_background=fit_background,
get_uncertainties=get_uncertainties,
get_residuals=True,
use_cached_templates=use_cached_templates,
bounded_kwargs=bounded_kwargs)
fit_resid, coeffs_zoom[i, :], e, covar_zoom[i, :, :] = out
if huber_delta > 0:
chi2_zoom[i] = (huber(huber_delta, fit_resid)*2.).sum()
else:
chi2_zoom[i] = (fit_resid**2).sum()
if get_student_logpdf:
logpdf_zoom[i] = student_t.logpdf(fit_resid,
*student_t_pars).sum()
#A, coeffs_zoom[i,:], chi2_zoom[i], model_2d = out
if chi2_zoom[i] < chi2min:
chi2min = chi2_zoom[i]
iz = i
if verbose:
line = '- {0:.4f} {1:9.1f} ({2:.4f}) {3:d}/{4:d}'
print(utils.NO_NEWLINE +
line.format(zgrid_zoom[i], chi2_zoom[i],
zgrid_zoom[iz], i+1, NZOOM))
# Concatenate, will resort at the end
zgrid = np.append(zgrid, zgrid_zoom)
chi2 = np.append(chi2, chi2_zoom)
logpdf = np.append(logpdf, logpdf_zoom)
coeffs = np.append(coeffs, coeffs_zoom, axis=0)
covar = np.vstack((covar, covar_zoom))
# Resort the concatenated arrays
so = np.argsort(zgrid)
zgrid = zgrid[so]
chi2 = chi2[so]
logpdf = logpdf[so]
coeffs = coeffs[so, :]
covar = covar[so, :, :]
# Make the output table
fit = utils.GTable()
fit.meta['N'] = (self.N, 'Number of spectrum extensions')
fit.meta['polyord'] = (poly_order, 'Order polynomial fit')
fit.meta['chi2poly'] = (chi2_poly, 'Chi^2 of polynomial fit')
kspl = (coeffs_spline != 0).sum()
fit.meta['chi2spl'] = (chi2_spline, 'Chi^2 of spline fit')
fit.meta['Rspline'] = (Rspline, 'R=lam/dlam of spline fit')
fit.meta['kspl'] = (kspl, 'Parameters, k, of spline fit')
fit.meta['huberdel'] = (huber_delta,
'Huber delta parameter, see scipy.special.huber')
# Evaluate spline at wavelengths for stars
xspline = np.array([6060, 8100, 9000, 1.27e4, 1.4e4])
flux_spline = utils.eval_bspline_templates(xspline, tspline,
coeffs_spline[self.N:])
fluxerr_spline = utils.eval_bspline_templates(xspline, tspline,
err_spline[self.N:])
for i in range(len(xspline)):
fit.meta['splf{0:02d}'.format(i+1)] = (flux_spline[i],
'Spline flux at {0:.2f} um'.format(xspline[i]/1.e4))
fit.meta['sple{0:02d}'.format(i+1)] = (fluxerr_spline[i],
'Spline flux err at {0:.2f} um'.format(xspline[i]/1.e4))
izbest = np.argmin(chi2)
clip = coeffs[izbest,:] != 0
ktempl = clip.sum()
fit.meta['NTEMP'] = (len(templates), 'Number of fitting templates')
fit.meta['DoF'] = (self.DoF, 'Degrees of freedom (number of pixels)')
fit.meta['ktempl'] = (ktempl, 'Parameters, k, of template fit')
fit.meta['chimin'] = (chi2.min(), 'Minimum chi2 of template fit')
fit.meta['chimax'] = (chi2.max(), 'Maximum chi2 of template fit')
fit.meta['fitter'] = (fitter, 'Minimization algorithm')
fit.meta['as_epsf'] = ((self.psf_param_dict is not None)*1,
'Object fit with effective PSF morphology')
# Bayesian information criteria, normalized to template min_chi2
# BIC = log(number of data points)*(number of params) + min(chi2) + C
# https://en.wikipedia.org/wiki/Bayesian_information_criterion
scale_chinu = self.DoF/chi2.min()
scale_chinu = 1 # Don't rescale
bic_poly = (np.log(self.DoF)*(poly_order+1+self.N) +
(chi2_poly-chi2.min())*scale_chinu)
fit.meta['bic_poly'] = (bic_poly, 'BIC of polynomial fit')
bic_spl = np.log(self.DoF)*kspl + (chi2_spline-chi2.min())*scale_chinu
fit.meta['bic_spl'] = (bic_spl, 'BIC of spline fit')
fit.meta['bic_temp'] = np.log(self.DoF)*ktempl, 'BIC of template fit'
# Template info
for i, tname in enumerate(templates):
fit.meta['T{0:03d}NAME'.format(i+1)] = (templates[tname].name,
'Template name')
if tname.startswith('line '):
fit.meta['T{0:03d}FWHM'.format(i+1)] = (templates[tname].fwhm,
'FWHM, if emission line')
dtype = np.float64
fit['zgrid'] = np.cast[dtype](zgrid)
fit['chi2'] = np.cast[dtype](chi2)
if get_student_logpdf:
fit['student_logpdf'] = np.cast[dtype](logpdf)
fit.meta['st_df'] = student_t_pars[0], 'Student-t df of spline fit'
fit.meta['st_loc'] = student_t_pars[1], 'Student-t loc of spline fit'
fit.meta['st_scl'] = (student_t_pars[2],
'Student-t scale of spline fit')
#fit['chi2poly'] = chi2_poly
fit['coeffs'] = np.cast[dtype](coeffs)
fit['covar'] = np.cast[dtype](covar)
fit = self._parse_zfit_output(fit, prior=prior)
return fit
def _parse_zfit_output(self, fit, risk_gamma=0.15, prior=None):
"""Parse best-fit redshift, etc.
Parameters
----------
fit : `~astropy.table.Table`
Result of `~grizli.fitting.GroupFitter.xfit_redshift`
risk_gamma : float
``gamma`` parameter of the redshift "risk"
prior : None, (array, array)
Optional redshift prior
Returns
-------
Adds metadata and `pdf` and `risk` columns to `fit` table
"""
import scipy.interpolate
from scipy.interpolate import Akima1DInterpolator
from scipy.integrate import cumtrapz
# Normalize to min(chi2)/DoF = 1.
scl_nu = fit['chi2'].min()/self.DoF
# PDF
pdf = np.exp(-0.5*(fit['chi2']-fit['chi2'].min())/scl_nu)
if prior is not None:
interp_prior = np.interp(fit['zgrid'], prior[0], prior[1])
pdf *= interp_prior
fit.meta['hasprior'] = True, 'Prior applied to PDF'
fit['prior'] = interp_prior
else:
interp_prior = None
fit.meta['hasprior'] = False, 'Prior applied to PDF'
# Normalize PDF
if len(pdf) > 1:
pdf /= np.trapz(pdf, fit['zgrid'])
pdf = np.maximum(pdf, 1.e-40)
# Interpolate pdf for more continuous measurement
spl = Akima1DInterpolator(fit['zgrid'], np.log(pdf), axis=1)
zrfine = [fit['zgrid'].min(), fit['zgrid'].max()]
zfine = utils.log_zgrid(zr=zrfine, dz=0.0001)
splz = spl(zfine)
ok = np.isfinite(splz)
pz_fine = np.exp(splz)
pz_fine[~ok] = 0
#norm = np.trapz(pzfine, zfine)
# Compute CDF and probability intervals
#dz = np.gradient(zfine[ok])
#cdf = np.cumsum(np.exp(spl(zfine[ok]))*dz/norm)
cdf = cumtrapz(pz_fine, x=zfine)
percentiles = np.array([2.5, 16, 50, 84, 97.5])/100.
pz_percentiles = np.interp(percentiles, cdf/cdf[-1], zfine[1:])
# Risk parameter
#dz = np.gradient(fit['zgrid'])
trdz = utils.trapz_dx(fit['zgrid'])
zsq = np.dot(fit['zgrid'][:, None],
np.ones_like(fit['zgrid'])[None, :])
L = _loss((zsq-fit['zgrid'])/(1+fit['zgrid']), gamma=risk_gamma)
risk = np.dot(pdf*L, trdz)
# Fit a parabola around min(risk)
zi = np.argmin(risk)
if (zi < len(risk)-1) & (zi > 0):
c = np.polyfit(fit['zgrid'][zi-1:zi+2], risk[zi-1:zi+2], 2)
z_risk = -c[1]/(2*c[0])
else:
z_risk = fit['zgrid'][zi]
risk_loss = _loss((z_risk-fit['zgrid'])/(1+fit['zgrid']),
gamma=risk_gamma)
min_risk = np.trapz(pdf*risk_loss, fit['zgrid'])
# MAP, maximum p(z) from parabola fit around tabulated maximum
zi = np.argmax(pdf)
if (zi < len(pdf)-1) & (zi > 0):
c = np.polyfit(fit['zgrid'][zi-1:zi+2], pdf[zi-1:zi+2], 2)
z_map = -c[1]/(2*c[0])
else:
z_map = fit['zgrid'][zi]
else:
risk = np.zeros_like(pdf)-1
pz_percentiles = np.zeros(5)
z_map = fit['zgrid'][0]
min_risk = -1
z_risk = z_map
# Store data in the fit table
fit['pdf'] = pdf
fit['risk'] = risk
fit.meta['Z02'] = pz_percentiles[0], 'Integrated p(z) = 0.025'
fit.meta['Z16'] = pz_percentiles[1], 'Integrated p(z) = 0.16'
fit.meta['Z50'] = pz_percentiles[2], 'Integrated p(z) = 0.5'
fit.meta['Z84'] = pz_percentiles[3], 'Integrated p(z) = 0.84'
fit.meta['Z97'] = pz_percentiles[4], 'Integrated p(z) = 0.975'
fit.meta['ZWIDTH1'] = (pz_percentiles[3]-pz_percentiles[1],
'Width between the 16th and 84th p(z) percentiles')
fit.meta['ZWIDTH2'] = (pz_percentiles[4]-pz_percentiles[0],
'Width between the 2.5th and 97.5th p(z) percentiles')
fit.meta['z_map'] = z_map, 'Redshift at MAX(PDF)'
fit.meta['zrmin'] = fit['zgrid'].min(), 'z grid start'
fit.meta['zrmax'] = fit['zgrid'].max(), 'z grid end'
fit.meta['z_risk'] = z_risk, 'Redshift at minimum risk'
fit.meta['min_risk'] = min_risk, 'Minimum risk'
fit.meta['gam_loss'] = (risk_gamma,
'Gamma factor of the risk/loss function')
return fit
def template_at_z(self, z=0, templates=None, fwhm=1400, get_uncertainties=2, draws=0, **kwargs):
"""
Get the best-fit template at a specified redshift
Parameters
----------
z : float
Redshift
templates : dict
Dictionary of `~grizli.utils.SpectrumTemplate` objects
fwhm : float
FWHM of line templates if `templates` generated in-place
get_uncertainties : int
Get coefficient uncertainties from covariance matrix
draws : int
Number of random draws from covariance matrix
kwargs : dict
Any additional keywords are passed to
`~grizli.fitting.GroupFitter.xfit_at_z`
Returns
-------
tfit : dict
Dictionary of fit results, used in various other places like
`oned_figure`, etc.
+--------------+---------------------------------------------+
| Keyword | Description |
+==============+=============================================+
| cfit | Dict of template normalizations and |
| | uncertainties |
+--------------+---------------------------------------------+
| cont1d | `~grizli.utils.SpectrumTemplate` of |
| | best-fit *continuum* |
+--------------+---------------------------------------------+
| line1d | `~grizli.utils.SpectrumTemplate` of |
| | best-fit *continuum + emission line* |
+--------------+---------------------------------------------+
| coeffs | Array of fit coefficients |
+--------------+---------------------------------------------+
| chi2 | (float) chi-squared of the fit |
+--------------+---------------------------------------------+
| z | (float) The input redshift |
+--------------+---------------------------------------------+
| templates | Copy of the input `templates` dictionary |
+--------------+---------------------------------------------+
| line1d_err | If ``draws > 0``, this will be template |
| | draws with the same dimension as `line1d` |
+--------------+---------------------------------------------+
"""
if templates is None:
templates = utils.load_templates(line_complexes=False,
fsps_templates=True, fwhm=fwhm)
kwargs['z'] = z
kwargs['templates'] = templates
kwargs['get_uncertainties'] = 2
out = self.xfit_at_z(**kwargs)
chi2, coeffs, coeffs_err, covar = out
cont1d, line1d = utils.dot_templates(coeffs[self.N:], templates, z=z,
apply_igm=(z > IGM_MINZ))
# if False:
# # Test draws from covariance matrix
# NDRAW = 100
# nonzero = coeffs[self.N:] != 0
# covarx = covar[self.N:, self.N:][nonzero, :][:, nonzero]
# draws = np.random.multivariate_normal(coeffs[self.N:][nonzero],
# covarx, NDRAW)
#
# contarr = np.zeros((NDRAW, len(cont1d.flux)))
# linearr = np.zeros((NDRAW, len(line1d.flux)))
# for i in range(NDRAW):
# print(i)
# coeffs_i = np.zeros(len(nonzero))
# coeffs_i[nonzero] = draws[i, :]
# _out = utils.dot_templates(coeffs_i, templates, z=z,
# apply_igm=(z > IGM_MINZ))
# contarr[i, :], linearr[i, :] = _out[0].flux, _out[1].flux
#
# contrms = np.std(contarr, axis=0)
# linerms = np.std(linearr, axis=0)
# Parse template coeffs
cfit = OrderedDict()
for i in range(self.N):
cfit['bg {0:03d}'.format(i)] = coeffs[i], coeffs_err[i]
for j, key in enumerate(templates):
i = j+self.N
cfit[key] = coeffs[i], coeffs_err[i]
tfit = OrderedDict()
tfit['cont1d'] = cont1d
tfit['line1d'] = line1d
tfit['cfit'] = cfit
tfit['coeffs'] = coeffs
tfit['chi2'] = chi2
tfit['covar'] = covar
tfit['z'] = z
tfit['templates'] = templates
if draws > 0:
xte, yte, lte = utils.array_templates(templates, max_R=5000, z=z)
err = np.sqrt(covar.diagonal())
nonzero = err > 0
cov_norm = ((covar/err).T/err)[nonzero, :][:, nonzero]
draw_coeff = np.zeros((draws, len(err)))
draw_coeff[:, nonzero] = np.random.multivariate_normal((coeffs/err)[nonzero], cov_norm, draws)*err[nonzero]
draw_spec = draw_coeff[:, self.N:].dot(yte)
err_spec = np.diff(np.percentile(draw_spec, [16, 84], axis=0), axis=0).flatten()/2.
tfit['line1d_err'] = err_spec
return tfit # cont1d, line1d, cfit, covar
def check_tfit_coeffs(self, tfit, templates, refit_others=True, fit_background=True, fitter='nnls', bounded_kwargs=BOUNDED_DEFAULTS):
"""
Compare emission line fluxes fit at each grism/PA to the combined
value. If ``refit_others=True``, then compare the line fluxes to a fit
from a new object generated *excluding* that grism/PA.
Returns
-------
max_line : str
Line species with the maximum deviation
max_line_diff : float
The maximum deviation for ``max_line`` (sigmas).
compare : dict
The full comparison dictionary
"""
from . import multifit
count_grism_pas = 0
for gr in self.PA:
for pa in self.PA[gr]:
count_grism_pas += 1
if count_grism_pas == 1:
return 'N/A', 0, {}
weightf_orig = self.weightf*1
compare = {}
for t in templates:
compare[t] = 0, ''
for gr in self.PA:
all_grism_ids = []
for pa in self.PA[gr]:
all_grism_ids.extend(self.PA[gr][pa])
for pa in self.PA[gr]:
beams = [self.beams[i] for i in self.PA[gr][pa]]
this_weight = weightf_orig*0
for i in self.PA[gr][pa]:
this_weight += (self.idf == i)
self.weightf = weightf_orig*(this_weight + 1.e-10)
tfit_i = self.template_at_z(tfit['z'], templates=templates,
fit_background=fit_background,
fitter=fitter,
bounded_kwargs=bounded_kwargs)
tfit_i['dof'] = (self.weightf > 0).sum()
# Others
if (count_grism_pas) & (refit_others):
self.weightf = weightf_orig*((this_weight == 0) + 1.e-10)
tfit0 = self.template_at_z(tfit['z'], templates=templates,
fit_background=fit_background,
fitter=fitter,
bounded_kwargs=bounded_kwargs)
tfit0['dof'] = self.DoF
else:
tfit0 = tfit
#beam_tfit[gr][pa] = tfit_i
for t in templates:
cdiff = tfit_i['cfit'][t][0] - tfit0['cfit'][t][0]
cdiff_v = tfit_i['cfit'][t][1]**2 + tfit0['cfit'][t][1]**2
diff_sn = cdiff/np.sqrt(cdiff_v)
if diff_sn > compare[t][0]:
compare[t] = diff_sn, (gr, pa)
max_line_diff, max_line = 0, 'N/A'
for t in compare:
if not t.startswith('line '):
continue
if compare[t][0] > max_line_diff:
max_line_diff = compare[t][0]
max_line = t.strip('line ')
self.weightf = weightf_orig
return max_line, max_line_diff, compare
def compute_D4000(self, z, fit_background=True, fit_type='D4000', fitter='lstsq'):
"""Compute D4000 with step templates
Parameters
----------
z : float
Redshift where to evaluate D4000
fit_background : bool
Include background in step template fit
fit_type : 'D4000', 'Dn4000'
Definition to use:
D4000 = f_nu(3750-3950) / f_nu(4050-4250)
Dn4000 = f_nu(3850-3950) / f_nu(4000-4100)
fitter : str
Least-squares method passed to
`~grizli.fitting.GroupFitter.template_at_z`.
Returns
-------
w_d4000, t_d4000 : `~numpy.ndarray`, dict
Step wavelengths and template dictionary
tfit : dict
Fit dictionary returned by
`~grizli.fitting.GroupFitter.template_at_z`.
d4000, d4000_sigma : float
D4000 estimate and uncertainty from simple error propagation and
step template fit covariance.
"""
w_d4000, t_d4000 = utils.step_templates(special=fit_type)
tfit = self.template_at_z(z, templates=t_d4000, fitter=fitter,
fit_background=fit_background)
# elements for the D4000 bins
if fit_type == 'D4000':
to_fnu = 3850**2/4150**2
mask = np.array([c in ['rstep 3750-3950 0', 'rstep 4050-4250 0']
for c in tfit['cfit']])
elif fit_type == 'Dn4000':
to_fnu = 3900**2/4050**2
mask = np.array([c in ['rstep 3850-3950 0', 'rstep 4000-4100 0']
for c in tfit['cfit']])
else:
print(f'compute_d4000: fit_type={fit_type} not recognized')
return -np.inf, -np.inf, -np.inf, -np.inf, -np.inf
blue, red = tfit['coeffs'][mask]
cov = tfit['covar'][mask, :][:, mask]
# Error propagation
sb, sr = cov.diagonal()
sbr = cov[0, 1]
d4000 = red/blue
d4000_sigma = d4000*np.sqrt(sb/blue**2+sr/red**2-2*sbr/blue/red)
if (not np.isfinite(d4000)) | (not np.isfinite(d4000_sigma)):
d4000 = -99
d4000_sigma = -99
res = (w_d4000, t_d4000, tfit, d4000, d4000_sigma)
return res
def xmake_fit_plot(self, fit, tfit, show_beams=True, bin=1, minor=0.1,
scale_on_stacked_1d=True, loglam_1d=True, zspec=None):
"""
Make a diagnostic plot of the redshift fit
Parameters
----------
fit : `~astropy.table.Table`
Redshift fit results from
`~grizli.fitting.GroupFitter.xfit_redshift`
tfit : dict
Template fit at best redshift from
`~grizli.fitting.GroupFitter.template_at_z`
show_beams : bool
Show 1D spectra of all individual "beams"
bin : float
Binning factor relative to nominal wavelength resolution (1 pix)
of each grism
minor : float
Minor axis ticks, microns
scale_on_stacked_1d : bool
Set y limits based on stacked spectrum
loglam_1d : bool
Show log wavelengths
zspec : float, None
Spectroscopic redshift that will be indicated on the figure
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object
"""
import time
import matplotlib.pyplot as plt
import matplotlib.gridspec
from matplotlib.ticker import MultipleLocator
import grizli.model
# Initialize plot window
Ng = len(self.grisms)
gs = matplotlib.gridspec.GridSpec(1, 2,
width_ratios=[1, 1.5+0.5*(Ng > 1)],
hspace=0.)
xsize = 8+4*(Ng > 1)
fig = plt.figure(figsize=[xsize, 3.5])
# p(z)
axz = fig.add_subplot(gs[-1, 0]) # 121)
label = (self.group_name + '\n' +
'ID={0:<5d} z={1:.4f}'.format(self.id, fit.meta['z_map'][0]))
axz.text(0.95, 0.96, label, ha='right', va='top',
transform=axz.transAxes, fontsize=9)
if 'FeII-VC2004' in tfit['cfit']:
# Quasar templates
axz.text(0.04, 0.96, 'quasar templ.', ha='left', va='top',
transform=axz.transAxes, fontsize=5)
zmi, zma = fit['zgrid'].min(), fit['zgrid'].max()
if (zma-zmi) > 5:
ticks = np.arange(np.ceil(zmi), np.floor(zma)+0.5, 1)
lz = np.log(1+fit['zgrid'])
axz.plot(lz, np.log10(fit['pdf']), color='k')
axz.set_xticks(np.log(1+ticks))
axz.set_xticklabels(np.cast[int](ticks))
axz.set_xlim(lz.min(), lz.max())
else:
axz.plot(fit['zgrid'], np.log10(fit['pdf']), color='k')
axz.set_xlim(zmi, zma)
axz.set_xlabel(r'$z$')
axz.set_ylabel(r'$\log\ p(z)$'+' / ' + r'$\chi^2=\frac{{{0:.0f}}}{{{1:d}}}={2:.2f}$'.format(fit.meta['chimin'][0], fit.meta['DoF'][0], fit.meta['chimin'][0]/fit.meta['DoF'][0]))
# axz.set_yticks([1,4,9,16,25])
pzmax = np.log10(fit['pdf'].max())
axz.set_ylim(pzmax-6, pzmax+0.9)
axz.grid()
axz.yaxis.set_major_locator(MultipleLocator(base=1))
if zspec is not None:
label = '\n\n'+r'$z_\mathrm{spec}$='+'{0:.4f}'.format(zspec)
axz.text(0.95, 0.95, label, ha='right', va='top',
transform=axz.transAxes, color='r', fontsize=9)
axz.scatter(zspec, pzmax+0.3, color='r', marker='v', zorder=-100)
# Spectra
axc = fig.add_subplot(gs[-1, 1]) # 224)
self.oned_figure(bin=bin, show_beams=show_beams, minor=minor,
tfit=tfit, axc=axc,
scale_on_stacked=scale_on_stacked_1d,
loglam_1d=loglam_1d)
gs.tight_layout(fig, pad=0.1, w_pad=0.1)
fig.text(1-0.015*8./xsize, 0.02, time.ctime(), ha='right',
va='bottom', transform=fig.transFigure, fontsize=5)
return fig
def scale_to_photometry(self, tfit=None, tol=1.e-4, order=0, init=None, fit_background=True, Rspline=50, use_fit=True, **kwargs):
"""Compute scale factor between spectra and photometry
Parameters
----------
tfit : dict
Template fit info at a specific redshift from
`~grizli.fitting.GroupFitter.template_at_z`. If not specified,
then makes and fits spline templates
tol : float
Fit tolerance passed to the minimizer
order : int
Order of the polynomial scaling to fit
init : None
Initial parameters
fit_background : bool
Include additive background
Rspline : float
Spectral resolution ``R`` of spline templates
use_spline : bool
Use spline templates
Returns
-------
res : object
Result from `scipy.optimize.least_squares`. The coefficients
of the linear scaling are in ``res.x``.
"""
from scipy.optimize import minimize, least_squares
if self.Nphot == 0:
return np.array([10.])
if (tfit is None) & (fit_background):
wspline = np.arange(4200, 2.5e4)
df_spl = len(utils.log_zgrid(zr=[wspline[0], wspline[-1]],
dz=1./Rspline))
tspline = utils.bspline_templates(wspline, df=df_spl+2, log=True,
clip=0.0001)
tfit = self.template_at_z(z=0, templates=tspline,
include_photometry=False,
fit_background=fit_background,
draws=1000)
if use_fit:
oned = self.oned_spectrum(tfit=tfit, loglam=False)
wmi = np.min([oned[k]['wave'].min() for k in oned])
wma = np.max([oned[k]['wave'].max() for k in oned])
clip = (tfit['line1d'].wave > wmi) & (tfit['line1d'].wave < wma)
clip &= (tfit['line1d_err'] > 0)
spl_temp = utils.SpectrumTemplate(wave=tfit['line1d'].wave[clip],
flux=tfit['line1d'].flux[clip],
err=tfit['line1d_err'][clip])
args = (self, {'spl': spl_temp})
else:
oned = self.oned_spectrum(tfit=tfit, loglam=False)
args = (self, oned)
if init is None:
init = np.zeros(order+1)
init[0] = 10.
scale_fit = least_squares(self._objective_scale_direct, init,
jac='2-point', method='lm', ftol=tol,
xtol=tol, gtol=tol, x_scale=1.0,
loss='linear', f_scale=1.0, diff_step=None,
tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None,
verbose=0, args=args, kwargs={})
# pscale = scale_fit.x
return scale_fit
@staticmethod
def compute_scale_array(pscale, wave):
"""Return the scale array given the coefficients
Parameters
----------
pscale : array-like
Coefficients of the linear model normalized by factors of 10 per
order, i.e, ``pscale = [10]`` is a constant unit scaling. Note
that parameter order is reverse that expected by
`numpy.polyval`.
wave : array-like
Wavelength grid in Angstroms. Scaling is normalized to
``(wave - 1e4)/1000``.
Returns
-------
wscale : array-like
Scale factor
>>> pscale = [10]
>>> N = len(pscale)
>>> rescale = 10**(np.arange(N)+1)
>>> wscale = np.polyval((pscale/rescale)[::-1], (wave-1.e4)/1000.)
"""
N = len(pscale)
rescale = 10**(np.arange(N)+1)
wscale = np.polyval((pscale/rescale)[::-1], (wave-1.e4)/1000.)
return wscale
@staticmethod
def objfun_scale(pscale, AxT, data, self, retval):
"""
Objective function for fitting for a scale term between photometry and
spectra
"""
import scipy.optimize
from numpy import polyval
scale = self.compute_scale_array(pscale, self.wavef[self.fit_mask])
scale[-self.Nphot:] = 1.
Ax = (AxT.T*scale)
# Remove scaling from background component
for i in range(self.N):
Ax[i, :] /= scale
coeffs, rnorm = scipy.optimize.nnls(Ax.T, data)
#coeffs, rnorm, rank, s = np.linalg.lstsq(Ax.T, data)
full = np.dot(coeffs, Ax)
resid = data - full # - background
chi2 = np.sum(resid**2*self.weightf[self.fit_mask])
print('{0} {1:.1f}'.format(' '.join(['{0:6.2f}'.format(p) for p in pscale]), chi2))
if retval == 'resid':
return resid*np.sqrt(self.weightf[self.fit_mask])
if retval == 'coeffs':
return coeffs, full, resid, chi2, AxT
else:
return chi2
@staticmethod
def _objective_scale_direct(pscale, self, oned):
"""
Objective function for scaling spectra to photometry
"""
from eazy.filters import FilterDefinition
flam = []
eflam = []
spec_flux = []
filters = []
for filt in self.photom_filters:
clip = filt.throughput > 0.001*filt.throughput.max()
filters.append(FilterDefinition(name=filt.name,
wave=filt.wave[clip],
throughput=filt.throughput[clip]))
filters = np.array(filters)
lc = self.photom_pivot
for k in oned:
#spec, okfilt, lc = spec1d[k]
# Covered filters
if isinstance(oned[k], utils.SpectrumTemplate):
spec1 = utils.SpectrumTemplate(wave=oned[k].wave,
flux=3.e18/oned[k].wave**2)
else:
spec1 = utils.SpectrumTemplate(wave=oned[k]['wave'],
flux=3.e18/oned[k]['wave']**2)
flux1 = np.array([spec1.integrate_filter(filt, use_wave='filter')
for filt in filters])
okfilt = flux1 > 0.98
if okfilt.sum() == 0:
continue
if isinstance(oned[k], utils.SpectrumTemplate):
scale = 1./self.compute_scale_array(pscale, oned[k].wave)
spec = utils.SpectrumTemplate(wave=oned[k].wave,
flux=oned[k].flux*scale,
err=oned[k].err*scale)
else:
scale = 1./self.compute_scale_array(pscale, oned[k]['wave'])
spec = utils.SpectrumTemplate(wave=oned[k]['wave'],
flux=oned[k]['flux']*scale/np.maximum(oned[k]['flat'], 1),
err=oned[k]['err']*scale/np.maximum(oned[k]['flat'], 1))
filt_fnu = [spec.integrate_filter(filt, use_wave='templ')
for filt in filters[okfilt]]
spec_flux.append((np.array(filt_fnu).T*3.e18/lc[okfilt]**2).T)
flam.append((self.photom_flam/self.photom_ext_corr)[okfilt])
eflam.append((self.photom_eflam/self.photom_ext_corr)[okfilt])
if flam == []:
return [0]
spec_flux = np.vstack(spec_flux)
flam = np.hstack(flam)
eflam = np.hstack(eflam)
chi2 = (flam-spec_flux[:, 0])**2/(eflam**2+spec_flux[:, 1]**2)
#print(pscale, chi2.sum())
return chi2
def xfit_star(self, tstar=None, spline_correction=True, fitter='nnls', fit_background=True, spline_args={'Rspline': 5}, oned_args={}):
"""Fit stellar templates
Parameters
----------
tstar : dict
Dictionary of stellar `~grizli.utils.SpectrumTemplate` objects
spline_correction : bool
Include spline scaling correction for template mismatch
fitter : str
Least-squares method passed to
`~grizli.fitting.GroupFitter.template_at_z`.
fit_background : bool
Fit for additive background component
spline_args : dict
Parameters passed to `~grizli.utils.split_spline_template` for
generating the spline correction arrays
oned_args : dict
Keywords passed to `oned_figure`
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object
line : str
Line of text describing the best fit
tfit : dict
Fit information from `~grizli.fitting.GroupFitter.template_at_z`
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec
from matplotlib.ticker import MultipleLocator
if tstar is None:
tstar = utils.load_templates(fwhm=1200, line_complexes=True,
fsps_templates=True, stars=True)
NTEMP = len(tstar)
chi2 = np.zeros(NTEMP)
types = np.array(list(tstar.keys()))
split_templates = []
split_fits = []
# Spline only
if spline_correction:
w0 = np.arange(3000, 2.e4, 100)
t0 = utils.SpectrumTemplate(wave=w0, flux=np.ones_like(w0))
ts = utils.split_spline_template(t0, **spline_args)
sfit0 = self.template_at_z(z=0, templates=ts,
fit_background=fit_background,
fitter=fitter, get_uncertainties=2)
else:
sfit0 = None
########
# Loop over templates
for ik, k in enumerate(tstar):
if spline_correction:
ts = utils.split_spline_template(tstar[k], **spline_args)
else:
ts = {k: tstar[k]}
split_templates.append(ts)
print(k)
sfit = self.template_at_z(z=0, templates=ts,
fit_background=fit_background,
fitter=fitter, get_uncertainties=2)
split_fits.append(sfit)
chi2 = np.array([sfit['chi2'] for sfit in split_fits])
ixbest = np.argmin(chi2)
# Initialize plot window
Ng = len(self.grisms)
gs = matplotlib.gridspec.GridSpec(1, 2,
width_ratios=[1, 1.5+0.5*(Ng > 1)],
hspace=0.)
figsize = [8+4*(Ng > 1), 3.5]
fig = plt.figure(figsize=figsize)
# p(z)
axz = fig.add_subplot(gs[-1, 0]) # 121)
if ('_g' in k) & ('_t' in k):
hast = True
teff = np.array([float(k.split('_')[1][1:]) for k in tstar])
logg = np.array([float(k.split('_')[2][1:]) for k in tstar])
met = np.array([float(k.split('_')[3][1:]) for k in tstar])
if 'bt-settl_t04000_g4.5_m-1.0' in tstar:
# Order by metallicity
for g in np.unique(met):
ig = met == g
so = np.argsort(teff[ig])
axz.plot(teff[ig][so], chi2[ig][so]-chi2.min(),
label='m{0:.1f}'.format(g))
else:
# Order by log-g
for g in np.unique(logg):
ig = logg == g
so = np.argsort(teff[ig])
axz.plot(teff[ig][so], chi2[ig][so]-chi2.min(),
label='g{0:.1f}'.format(g))
if logg[ixbest] == 0.:
label = 'carbon'
else:
label = '{0} t{1:.0f} g{2:.1f} m{3:.1f}'
label = label.format(k.split('_')[0], teff[ixbest],
logg[ixbest], met[ixbest])
else:
hast = False
axz.plot(chi2-chi2.min(), marker='.', color='k')
label = types[np.argmin(chi2)].strip('stars/').strip('.txt')
axz.text(0.95, 0.96,
self.group_name + '\n' + f'ID={self.id:<5d} {label:s}',
ha='right', va='top', transform=axz.transAxes, fontsize=9,
bbox=dict(facecolor='w', alpha=0.8))
if hast:
axz.set_xlabel(r'Teff')
axz.legend(fontsize=7, loc='lower right')
else:
axz.set_xlabel(r'Sp. Type')
axz.set_ylabel(r'$\chi^2_\nu$'+' ; ' + r'$\chi^2_\mathrm{{min}}=\frac{{{0:.0f}}}{{{1:d}}}={2:.2f}$'.format(chi2.min(), self.DoF, chi2.min()/self.DoF))
if len(tstar) < 30:
tx = [t.strip('stars/').strip('.txt') for t in types]
axz.set_xticks(np.arange(len(tx)))
tl = axz.set_xticklabels(tx)
for ti in tl:
ti.set_size(8)
axz.set_ylim(-2, 27)
axz.set_yticks([1, 4, 9, 16, 25])
axz.grid()
# axz.yaxis.set_major_locator(MultipleLocator(base=1))
# Spectra
axc = fig.add_subplot(gs[-1, 1]) # 224)
self.oned_figure(tfit=split_fits[ixbest], axc=axc, **oned_args)
if spline_correction:
sfit = split_fits[ixbest]
cspl = np.array([sfit['cfit'][t] for t in sfit['cfit']])
spline_func = sfit['templates'].tspline.dot(cspl[self.N:, 0])
yl = axc.get_ylim()
xl = axc.get_xlim()
y0 = np.interp(np.mean(xl),
sfit['templates'].wspline/1.e4,
spline_func)
spl, = axc.plot(sfit['templates'].wspline/1.e4,
spline_func/y0*yl[1]*0.8, color='k',
linestyle='--', alpha=0.5,
label='Spline correction')
# Spline-only
splt = sfit0['cont1d']
delta_chi = sfit0['chi2'] - sfit['chi2']
label2 = r'Spline only - $\Delta\chi^2$ = '
label2 += '{0:.1f}'.format(delta_chi)
spl2, = axc.plot(splt.wave/1.e4, splt.flux/1.e-19,
color='pink', alpha=0.8, label=label2)
axc.legend([spl, spl2], ['Spline correction', label2],
loc='upper right', fontsize=8)
gs.tight_layout(fig, pad=0.1, w_pad=0.1)
fig.text(1-0.015*12./figsize[0], 0.02, time.ctime(), ha='right',
va='bottom', transform=fig.transFigure, fontsize=5)
best_templ = list(tstar.keys())[ixbest]
if best_templ.startswith('bt-settl_t05000_g0.0'):
best_templ = 'carbon'
tfit = split_fits[ixbest]
# Non-zero templates
if fit_background:
nk = (tfit['coeffs'][self.N:] > 0).sum()
else:
nk = (tfit['coeffs'] > 0).sum()
if sfit0 is not None:
chi2_flat = sfit0['chi2']
else:
chi2_flat = chi2[ixbest]
line = '# root id ra dec chi2 chi2_flat dof nk best_template as_epsf\n'
line += '{0:16} {1:>5d} {2:.6f} {3:.6f} {4:10.3f} {5:10.3f} {6:>10d} {7:>10d} {8:20s} {9:0d}'.format(self.group_name, self.id, self.ra, self.dec, chi2[ixbest], chi2_flat, self.DoF, nk, best_templ, (self.psf_param_dict is not None)*1)
print('\n'+line+'\n')
return fig, line, tfit
def oned_figure(self, bin=1, wave=None, show_beams=True, minor=0.1, tfit=None, show_rest=False, axc=None, figsize=[6, 4], fill=False, units='flam', min_sens_show=0.1, ylim_percentile=2, scale_on_stacked=False, show_individual_templates=False, apply_beam_mask=True, loglam_1d=True, trace_limits=None, show_contam=False, beam_models=None):
"""
Make a figure showing the 1D spectra
Parameters
----------
bin : float
Binning factor relative to nominal resolution (per pix) of each
grism
wave : None, array
Fixed wavelength array for the sampled spectra
show_beams : bool
Show all individual beams
minor : float
Minor axis tick interval (microns)
tfit : dict
Fit information from `~grizli.fitting.GroupFitter.template_at_z`.
If provided, then will include the best-fit models in the figure
show_rest : bool
Show rest-frame wavelengths
acx : `~matplotlib.axes._subplots.AxesSubplot`
If provided, then draw into existing axis without making a new
figure
figsize : (float, float)
Figure size (inches)
fill : bool
plot filled spectra
show_individual_templates : bool
Show each individual template with its scaling along with the
best-fit combination
units : str
Y-axis units
- 'flam' = Scaled f-lambda cgs
- 'nJy' = nanoJansky
- 'mJy' = milliJansky
- 'eps' = native detector units of electrons per second
- 'meps' = "milli"-electrons per second
- 'spline[N]' = Divide out a spline continuum
- 'resid' = Residuals w.r.t. model in `tfit`
loglam_1d : bool
Plot as log wavelength
trace_limits : (float, float)
If provided, extract spectra relative to the (tilted) spectral
trace
show_contam : bool
Include curves for contamination model
min_sens_show : float
ylim_percentile : float
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object
"""
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
if (tfit is None) & (units in ['resid', 'nresid', 'spline']):
print('`tfit` not specified. Can\'t plot units=\'{0}\'.'.format(units))
return False
# Spectra
if axc is None:
fig = plt.figure(figsize=figsize)
axc = fig.add_subplot(111)
newfigure = True
else:
newfigure = False
ymin = 1.e30
ymax = -1.e30
#wmin = 1.e30
#wmax = -1.e30
if not show_beams:
scale_on_stacked = True
if wave is not None:
show_beams = False
if units.startswith('spline'):
ran = ((tfit['cont1d'].wave >= self.wavef.min()) &
(tfit['cont1d'].wave <= self.wavef.max()))
if ran.sum() == 0:
print('No overlap with template')
return False
if True:
try:
df = int(units.split('spline')[1])
except:
df = 21
Aspl = utils.bspline_templates(tfit['cont1d'].wave[ran],
degree=3, df=df,
get_matrix=True, log=True)
cspl, _, _, _ = np.linalg.lstsq(Aspl, tfit['cont1d'].flux[ran], rcond=-1)
yspl = tfit['cont1d'].flux*0.
yspl[ran] = Aspl.dot(cspl)
else:
spl = UnivariateSpline(tfit['cont1d'].wave[ran],
tfit['cont1d'].flux[ran], ext=1)
yspl = spl(tfit['cont1d'].wave)
mspl = (tfit['cont1d'].wave, yspl)
else:
mspl = None
# 1D Model
xlabel, zp1 = r'$\lambda$', 1.
if tfit is not None:
sp = tfit['line1d'].wave, tfit['line1d'].flux
w = sp[0]
if show_rest:
zp1 = (1+tfit['z'])
xlabel = r'$\lambda_\mathrm{rest}$'+' (z={0:.2f})'.format(tfit['z'])
else:
sp = None
w = np.arange(self.wavef.min()-201, self.wavef.max()+201, 100)
spf = w, w*0+1
for i in range(self.N):
beam = self.beams[i]
if apply_beam_mask:
b_mask = beam.fit_mask.reshape(beam.sh)
else:
b_mask = 1
if tfit is not None:
m_i = beam.compute_model(spectrum_1d=sp, is_cgs=True, in_place=False).reshape(beam.sh)
elif beam_models is not None:
m_i = beam_models[i]
else:
m_i = None
if mspl is not None:
mspl_i = beam.compute_model(spectrum_1d=mspl, is_cgs=True, in_place=False).reshape(beam.sh)
try:
f_i = beam.flat_flam.reshape(beam.sh)*1
except:
f_i = beam.compute_model(spectrum_1d=spf, is_cgs=True, in_place=False).reshape(beam.sh)
if hasattr(beam, 'init_epsf'): # grizli.model.BeamCutout
if beam.grism.instrument == 'NIRISS':
grism = beam.grism.pupil
else:
grism = beam.grism.filter
clean = beam.grism['SCI'] - beam.contam
if tfit is not None:
clean -= tfit['cfit']['bg {0:03d}'.format(i)][0]
if m_i is not None:
w, flm, erm = beam.beam.optimal_extract(m_i, bin=bin, ivar=beam.ivar*b_mask)
else:
flm = None
if mspl is not None:
w, flspl, erm = beam.beam.optimal_extract(mspl_i, bin=bin, ivar=beam.ivar*b_mask)
w, fl, er = beam.beam.optimal_extract(clean, bin=bin, ivar=beam.ivar*b_mask)
#w, flc, erc = beam.beam.optimal_extract(beam.contam, bin=bin, ivar=beam.ivar*b_mask)
w, sens, ers = beam.beam.optimal_extract(f_i, bin=bin, ivar=beam.ivar*b_mask)
#sens = beam.beam.sensitivity
else:
grism = beam.grism
clean = beam.sci - beam.contam
if tfit is not None:
clean -= - tfit['cfit']['bg {0:03d}'.format(i)][0]
if m_i is not None:
w, flm, erm = beam.optimal_extract(m_i, bin=bin, ivar=beam.ivar*b_mask)
if mspl is not None:
w, flspl, erm = beam.beam.optimal_extract(mspl_i, bin=bin, ivar=beam.ivar*b_mask)
w, fl, er = beam.optimal_extract(clean, bin=bin, ivar=beam.ivar*b_mask)
#w, flc, erc = beam.optimal_extract(beam.contam, bin=bin, ivar=beam.ivar*b_mask)
w, sens, ers = beam.optimal_extract(f_i, bin=bin, ivar=beam.ivar*b_mask)
#sens = beam.sens
sens[~np.isfinite(sens)] = 1
pscale = 1.
if hasattr(self, 'pscale'):
if (self.pscale is not None):
pscale = self.compute_scale_array(self.pscale, w)
if units.lower() == 'njy':
unit_corr = 1./sens*w**2/2.99e18/1.e-23/1.e-9 # /pscale
unit_label = r'$f_\nu$ (nJy)'
elif units.lower() == 'ujy':
unit_corr = 1./sens*w**2/2.99e18/1.e-23/1.e-6 # /pscale
unit_label = r'$f_\nu$ ($\mu$Jy)'
elif units == 'meps':
unit_corr = 1000.
unit_label = 'milli-e/s'
elif units == 'eps':
unit_corr = 1.
unit_label = 'e/s'
elif units == 'resid':
unit_corr = 1./sens*1.e19
unit_label = r'resid ($f_\lambda \times 10^{19}$)'
elif units == 'nresid':
unit_corr = 1./flm
unit_label = 'norm. resid'
elif units.startswith('spline'):
unit_corr = 1./flspl
unit_label = 'spline resid'
else: # 'flam
unit_corr = 1./sens/1.e-19 # /pscale
unit_label = r'$f_\lambda$ [$10^{-19}$ erg/s/cm2/A]'
w = w/1.e4
clip = (sens > min_sens_show*sens.max())
clip &= (er > 0)
if clip.sum() == 0:
continue
fl *= unit_corr/pscale # /1.e-19
# flc *= unit_corr/pscale#/1.e-19
er *= unit_corr/pscale # /1.e-19
if flm is not None:
flm *= unit_corr # /1.e-19
if units == 'resid':
fl -= flm
flm -= flm
f_alpha = 1./(self.Ngrism[grism.upper()])*0.8 # **0.5
# Plot
# pscale = 1.
# if hasattr(self, 'pscale'):
# if (self.pscale is not None):
# pscale = self.compute_scale_array(self.pscale, w[clip]*1.e4)
if show_beams:
if (show_beams == 1) & (f_alpha < 0.09):
axc.errorbar(w[clip]/zp1, fl[clip], er[clip], color='k', alpha=f_alpha, marker='.', linestyle='None', zorder=1)
else:
axc.errorbar(w[clip]/zp1, fl[clip], er[clip], color=GRISM_COLORS[grism], alpha=f_alpha, marker='.', linestyle='None', zorder=1)
if flm is not None:
axc.plot(w[clip]/zp1, flm[clip], color='r', alpha=f_alpha, linewidth=2, zorder=10)
# Plot limits
ep = np.percentile(er[clip], ylim_percentile)
ymax = np.maximum(ymax, np.percentile((flm+ep)[clip],
100-ylim_percentile))
ymin = np.minimum(ymin, np.percentile((flm-er*0.)[clip],
ylim_percentile))
else:
# Plot limits
ep = np.percentile(er[clip], ylim_percentile)
ymax = np.maximum(ymax, np.percentile((fl+ep)[clip], 95))
ymin = np.minimum(ymin, np.percentile((fl-er*0.)[clip], 5))
#wmax = np.maximum(wmax, w[clip].max())
#wmin = np.minimum(wmin, w[clip].min())
lims = [utils.GRISM_LIMITS[g][:2] for g in self.PA]
wmin = np.min(lims) # *1.e4
wmax = np.max(lims) # *1.e4
# Cleanup
axc.set_xlim(wmin/zp1, wmax/zp1)
try:
axc.semilogx(subs=[wmax])
except:
axc.semilogx(subsx=[wmax])
# axc.set_xticklabels([])
axc.set_xlabel(xlabel)
axc.set_ylabel(unit_label)
# axc.xaxis.set_major_locator(MultipleLocator(0.1))
for ax in [axc]: # [axa, axb, axc]:
labels = np.arange(np.ceil(wmin/minor/zp1), np.ceil(wmax/minor/zp1))*minor
ax.set_xticks(labels)
if minor < 0.1:
ax.set_xticklabels(['{0:.2f}'.format(li) for li in labels])
else:
ax.set_xticklabels(['{0:.1f}'.format(li) for li in labels])
# Binned spectrum by grism
if (tfit is None) | (scale_on_stacked) | (not show_beams):
ymin = 1.e30
ymax = -1.e30
if self.Nphot > 0:
sp_flat = self.optimal_extract(self.flat_flam[self.fit_mask[:-self.Nphotbands]], bin=bin, wave=wave, loglam=loglam_1d, trace_limits=trace_limits)
else:
sp_flat = self.optimal_extract(self.flat_flam[self.fit_mask], bin=bin, wave=wave, loglam=loglam_1d, trace_limits=trace_limits)
if tfit is not None:
bg_model = self.get_flat_background(tfit['coeffs'], apply_mask=True)
m2d = self.get_flat_model(sp, apply_mask=True, is_cgs=True)
sp_model = self.optimal_extract(m2d, bin=bin, wave=wave,
loglam=loglam_1d,
trace_limits=trace_limits)
else:
bg_model = 0.
sp_model = 1.
if mspl is not None:
m2d = self.get_flat_model(mspl, apply_mask=True, is_cgs=True)
sp_spline = self.optimal_extract(m2d, bin=bin, wave=wave,
loglam=loglam_1d,
trace_limits=trace_limits)
sp_data = self.optimal_extract(self.scif_mask[:self.Nspec]-bg_model,
bin=bin, wave=wave, loglam=loglam_1d,
trace_limits=trace_limits)
# Contamination
if show_contam:
sp_contam = self.optimal_extract(self.contamf_mask[:self.Nspec],
bin=bin, wave=wave, loglam=loglam_1d,
trace_limits=trace_limits)
for g in sp_data:
clip = (sp_flat[g]['flux'] != 0) & np.isfinite(sp_data[g]['flux']) & np.isfinite(sp_data[g]['err']) & np.isfinite(sp_flat[g]['flux'])
if tfit is not None:
clip &= np.isfinite(sp_model[g]['flux'])
if clip.sum() == 0:
continue
pscale = 1.
if hasattr(self, 'pscale'):
if (self.pscale is not None):
pscale = self.compute_scale_array(self.pscale, sp_data[g]['wave'])
if units.lower() == 'njy':
unit_corr = sp_data[g]['wave']**2/sp_flat[g]['flux']
unit_corr *= 1/2.99e18/1.e-23/1.e-9 # /pscale
elif units.lower() == 'ujy':
unit_corr = sp_data[g]['wave']**2/sp_flat[g]['flux']
unit_corr *= 1/2.99e18/1.e-23/1.e-6 # /pscale
elif units == 'meps':
unit_corr = 1000.
elif units == 'eps':
unit_corr = 1.
elif units == 'resid':
unit_corr = 1./sp_flat[g]['flux']
elif units == 'nresid':
unit_corr = 1./sp_model[g]['flux']
elif units.startswith('spline'):
unit_corr = 1./sp_spline[g]['flux']
else: # 'flam
unit_corr = 1./sp_flat[g]['flux']/1.e-19 # /pscale
flux = (sp_data[g]['flux']*unit_corr/pscale)[clip]
err = (sp_data[g]['err']*unit_corr/pscale)[clip]
if units == 'resid':
flux -= (sp_model[g]['flux']*unit_corr)[clip]
ep = np.percentile(err, ylim_percentile)
if fill:
axc.fill_between(sp_data[g]['wave'][clip]/zp1/1.e4, flux-err, flux+err, color=GRISM_COLORS[g], alpha=0.8, zorder=1, label=g)
else:
axc.errorbar(sp_data[g]['wave'][clip]/zp1/1.e4, flux, err, color=GRISM_COLORS[g], alpha=0.8, marker='.', linestyle='None', zorder=1, label=g)
if show_contam:
contam = (sp_contam[g]['flux']*unit_corr/pscale)[clip]
axc.plot(sp_data[g]['wave'][clip]/zp1/1.e4, contam,
color='brown')
if ((tfit is None) & (clip.sum() > 0)) | (scale_on_stacked):
# Plot limits
ymax = np.maximum(ymax, np.percentile((flux+ep),
100-ylim_percentile))
ymin = np.minimum(ymin, np.percentile((flux-ep),
ylim_percentile))
if (ymin < 0) & (ymax > 0):
ymin = -0.1*ymax
if not np.isfinite(ymin+ymax):
ymin = 0.
ymax = 10.
axc.set_ylim(ymin-0.2*ymax, 1.2*ymax)
axc.grid()
if (ymin-0.2*ymax < 0) & (1.2*ymax > 0):
axc.plot([wmin/zp1, wmax/zp1], [0, 0], color='k', linestyle=':', alpha=0.8)
# Individual templates
if ((tfit is not None) & (show_individual_templates > 0) &
(units.lower() in ['flam', 'njy', 'ujy'])):
xt, yt, mt = utils.array_templates(tfit['templates'], z=tfit['z'],
apply_igm=(tfit['z'] > IGM_MINZ))
cfit = np.array([tfit['cfit'][t][0] for t in tfit['cfit']])
xt *= (1+tfit['z'])
if units.lower() == 'njy':
unit_corr = xt**2/2.99e18/1.e-23/1.e-9 # /pscale
elif units.lower() == 'ujy':
unit_corr = xt**2/2.99e18/1.e-23/1.e-6 # /pscale
else: # 'flam
unit_corr = 1./1.e-19 # /pscale
tscl = (yt.T*cfit[self.N:]).T/(1+tfit['z'])*unit_corr
t_names = np.array(list(tfit['cfit'].keys()))[self.N:]
is_spline = np.array([t.split()[0] in ['bspl', 'step', 'poly'] for t in tfit['cfit']][self.N:])
if is_spline.sum() > 0:
spline_templ = tscl[is_spline,:].sum(axis=1)
axc.plot(xt/1.e4, spline_templ, color='k', alpha=0.5)
for ti in tscl[is_spline,:]:
axc.plot(xt/zp1/1.e4, ti, color='k', alpha=0.1)
for ci, ti, tn in zip(cfit[self.N:][~is_spline], tscl[~is_spline,:], t_names[~is_spline]):
if ci == 0:
continue
if show_individual_templates > 1:
axc.plot(xt/zp1/1.e4, ti, alpha=0.6, label=tn.strip('line '))
else:
axc.plot(xt/zp1/1.e4, ti, alpha=0.6)
if show_individual_templates > 1:
axc.legend(fontsize=6)
# Photometry?
if newfigure:
axc.text(0.95, 0.95, '{0} {1:>5d}'.format(self.group_name, self.id), ha='right', va='top', transform=axc.transAxes)
fig.tight_layout(pad=0.2)
return fig
else:
return True
###
# Generic functions for generating flat model and background arrays
###
def optimal_extract(self, data=None, bin=1, wave=None, ivar=None, trace_limits=None, loglam=True, **kwargs):
"""
Binned optimal extractions by grism with algorithm from `Horne 1984 <http://adsabs.harvard.edu/full/1986PASP...98..609H>`_
The spatial profile for each beam is the 2D model spectrum generated
using its attached direct image thumbnail. The Horne (1984) algorithm
is essentially a least-squares fit of the spatial model to the
observed 2D spectrum, weighted by the uncertainties.
Along with the optimal extraction, this method also implements an
option to extract an effective "aperture" within a specified region
above and below the spectral trace.
While the traces may not be directly aligned with the `x` axis of the
2D spectra, both the optimal and trace extractions extract along `y`
pixels at a given `x`.
Parameters
----------
data : `~numpy.ndarray`, None
Data array with same dimensions as ``self.scif_mask``
(flattened & masked) 2D spectra of all beams. If ``None``, then
use ``self.scif_mask``.
bin : bool
Binning factor relative to the grism-dependent resolution values,
specified in `~grizli.utils.GRISM_LIMITS`.
wave : `~numpy.ndarray`, None
Wavelength bin edges. If `None`, then compute from parameters in
`~grizli.utils.GRISM_LIMITS`.
ivar : `~numpy.ndarray`, None
Inverse variance array with same dimensions as ``self.scif_mask``
(flattened & masked) 2D spectra of all beams. If ``None``, then
use ``self.weighted_sigma2_mask``.
trace_limits : [float, float] or None
If specified, perform a simple sum in cross-dispersion axis
between ``trace_limits`` relative to the central pixel of the
trace rather than the optimally-weighted sum. Similarly, the
output variances are the sum of the input variances in the trace
interval.
Note that the trace interval is evaluated with ``< >``, as opposed
to ``<= >=``, as the center of the trace is a float rather than an
integer pixel index.
loglam : bool
If True and ``wave`` not specified (see above), then output
wavelength grid is log-spaced.
Returns
-------
tab : dict
Dictionary of `~astropy.table.Table` spectra for each available
grism.
"""
import astropy.units as u
if not hasattr(self, 'optimal_profile_mask'):
self.initialize_masked_arrays()
if data is None:
data = self.scif_mask
if data.size not in [self.Nmask, self.Nspec]:
print('`data` has to be sized like masked arrays (self.fit_mask)')
return False
if ivar is None:
#ivar = 1./self.sigma2_mask
ivar = 1./self.weighted_sigma2_mask
if trace_limits is None:
prof = self.optimal_profile_mask
# Horne (1986) optimal extraction
# f_opt = Sum(P*D/V) / Sum(P**2/V)
num = prof[:self.Nspec]*data[:self.Nspec]*ivar[:self.Nspec]
den = prof[:self.Nspec]**2*ivar[:self.Nspec]
else:
# Trace extraction, sum of fluxes and variances
prof = np.isfinite(self.optimal_profile_mask)
trace_mask = ((self.yp_trace_mask > trace_limits[0]) &
(self.yp_trace_mask < trace_limits[1]))[:self.Nspec]
num = data[:self.Nspec]*trace_mask
den = (1/ivar[:self.Nspec])*trace_mask
den[~np.isfinite(den)] = 0
dmask = den > 0
out = {}
for grism in self.Ngrism:
Ng = self.Ngrism[grism]
lim = utils.GRISM_LIMITS[grism]
if wave is None:
if loglam:
ran = np.array(lim[:2])*1.e4
ran[1] += lim[2]*bin
wave_bin = utils.log_zgrid(ran, lim[2]*bin/np.mean(ran))
else:
wave_bin = np.arange(lim[0]*1.e4, lim[1]*1.e4+lim[2]*bin,
lim[2]*bin)
else:
wave_bin = wave
flux_bin = wave_bin[:-1]*0.
var_bin = wave_bin[:-1]*0.
n_bin = wave_bin[:-1]*0.
gmask = self.grism_name_mask == grism
for j in range(len(wave_bin)-1):
#ix = np.abs(self.wave_mask-wave_bin[j]) < lim[2]*bin/2.
# Wavelength bin
ix = (self.wave_mask >= wave_bin[j])
ix &= (self.wave_mask < wave_bin[j+1])
ix &= gmask
if ix.sum() > 0:
n_bin[j] = ix.sum()
if trace_limits is None:
var_bin[j] = 1./den[ix].sum()
flux_bin[j] = num[ix].sum()*var_bin[j]
else:
Nj = len(np.unique(self.exposure_id_mask[ix]))*bin
#_ids, counts = np.unique(self.exposure_id_mask[ix],
# return_counts=True)
#Nj = counts.sum()
#Nj = bin*Ng
var_bin[j] = den[ix].sum()/Nj
flux_bin[j] = num[ix].sum()/Nj
binned_spectrum = utils.GTable()
w_g = (wave_bin[:-1]+np.diff(wave_bin)/2)*u.Angstrom
binned_spectrum['wave'] = w_g
binned_spectrum['flux'] = flux_bin*(u.electron/u.second)
binned_spectrum['err'] = np.sqrt(var_bin)*(u.electron/u.second)
binned_spectrum['npix'] = np.cast[int](n_bin)
binned_spectrum.meta['GRISM'] = (grism, 'Grism name')
binned_spectrum.meta['BIN'] = (bin, 'Spectrum binning factor')
binned_spectrum.meta['NEXP'] = (Ng, 'Number of exposures')
out[grism] = binned_spectrum
return out
def initialize_masked_arrays(self, seg_ids=None):
"""
Initialize flat masked arrays for faster likelihood calculation
"""
if isinstance(self.beams[0], model.BeamCutout):
# MultiBeam
if self.Nphot > 0:
self.contamf_mask = self.contamf[self.fit_mask[:-self.Nphotbands]]
else:
self.contamf_mask = self.contamf[self.fit_mask]
p = []
for beam in self.beams:
for attr in ['xp','xp_mask']:
if hasattr(beam, attr):
delattr(beam, attr)
beam.beam.init_optimal_profile(seg_ids=seg_ids)
p.append(beam.beam.optimal_profile.flatten()[beam.fit_mask])
self.optimal_profile_mask = np.hstack(p)
# trace offset
p = []
for beam in self.beams:
# w.r.t trace
yp, xp = np.indices(beam.sh)
ypt = (yp + 1 - (beam.sh[0]/2.+beam.beam.ytrace))
beam.ypt = ypt
p.append(ypt.flatten()[beam.fit_mask])
self.yp_trace_mask = np.hstack(p)
# Inverse sensitivity
self.sens_mask = np.hstack([np.dot(np.ones(beam.sh[0])[:, None], beam.beam.sensitivity[None, :]).flatten()[beam.fit_mask] for beam in self.beams])
self.grism_name_mask = np.hstack([[beam.grism.pupil]*beam.fit_mask.sum() if beam.grism.instrument == 'NIRISS' else [beam.grism.filter]*beam.fit_mask.sum() for beam in self.beams])
self.exposure_id_mask = np.hstack([[i]*beam.fit_mask.sum() for i, beam in enumerate(self.beams)])
else:
# StackFitter
self.contamf_mask = np.hstack([beam.contamf[beam.fit_mask]
for beam in self.beams])
p = []
for beam in self.beams:
beam.init_optimal_profile()
p.append(beam.optimal_profile.flatten()[beam.fit_mask])
self.optimal_profile_mask = np.hstack(p)
# Inverse sensitivity
self.sens_mask = np.hstack([np.dot(np.ones(beam.sh[0])[:, None], beam.sens[None, :]).flatten()[beam.fit_mask] for beam in self.beams])
self.grism_name_mask = np.hstack([[beam.grism]*beam.fit_mask.sum() for beam in self.beams])
self.exposure_id_mask = np.hstack([[i]*beam.fit_mask.sum() for i, beam in enumerate(self.beams)])
self.wave_mask = np.hstack([np.dot(np.ones(beam.sh[0])[:, None], beam.wave[None, :]).flatten()[beam.fit_mask] for beam in self.beams])
# (scif attribute is already contam subtracted)
self.scif_mask = self.scif[self.fit_mask]
# sigma
self.sigma_mask = 1/self.sivarf[self.fit_mask]
# sigma-squared
self.sigma2_mask = self.sigma_mask**2
#self.sigma2_mask = 1/self.ivarf[self.fit_mask]
# weighted sigma-squared
#self.weighted_sigma2_mask = 1/(self.weightf*self.ivarf)[self.fit_mask]
self.weighted_sigma2_mask = 1/(self.weightf*self.sivarf**2)[self.fit_mask]
self.Nmask = self.fit_mask.sum()
if hasattr(self, 'Nphot'):
self.Nspec = self.Nmask - self.Nphot
else:
self.Nspec = self.Nmask
def get_flat_model(self, spectrum_1d, id=None, apply_mask=True, is_cgs=True):
"""
Generate model array based on the model 1D spectrum in ``spectrum_1d``
Parameters
----------
spectrum_1d : tuple, -1
Tuple of 1D arrays (wavelength, flux). If ``-1``, then use the
in_place ``model`` attributes of each beam.
id : int
Value that identifies pixels in the segmentation thumbnail with
the desired object to model
apply_mask : bool
Return the model pixels applying the `~grizli.model.BeamCutout`
``fit_mask`` attribute
is_cgs : bool
``spectrum_1d`` flux array has CGS f-lambda flux density units.
Returns
-------
model : Array with dimensions ``(self.fit_mask.sum(),)``
Flattened, masked model array.
"""
mfull = []
for ib, beam in enumerate(self.beams):
if spectrum_1d is -1:
model_i = beam.model*1
else:
model_i = beam.compute_model(id=id, spectrum_1d=spectrum_1d,
is_cgs=is_cgs, in_place=False)
if apply_mask:
mfull.append(model_i.flatten()[beam.fit_mask])
else:
mfull.append(model_i.flatten())
return np.hstack(mfull)
def get_flat_background(self, bg_params, apply_mask=True):
"""
Generate background array the same size as the flattened total
science array.
Parameters
----------
bg_params : array with shape (self.N) or (self.N, M)
Background parameters for each beam, where the ``M`` axis is
polynomial cofficients in the order expected by
`~astropy.modeling.models.Polynomial2D`. If the array is 1D,
then provide a simple pedestal background.
Returns
-------
bg_model : Array with dimensions ``(self.fit_mask.sum(),)``
Flattened, masked background array.
"""
from astropy.modeling.models import Polynomial2D
# Initialize beam pixel coordinates
for beam in self.beams:
needs_init = not hasattr(beam, 'xp')
if hasattr(beam, 'xp_mask'):
needs_init |= apply_mask is not beam.xp_mask
if needs_init:
#print('Initialize xp/yp')
yp, xp = np.indices(beam.sh)
xp = (xp - beam.sh[1]/2.)/(beam.sh[1]/2.)
# normalized to center
yp = (yp - beam.sh[0]/2.)/(beam.sh[0]/2.)
if apply_mask:
beam.xp = xp.flatten()[beam.fit_mask]
beam.yp = yp.flatten()[beam.fit_mask]
else:
beam.xp = xp.flatten()
beam.yp = yp.flatten()
beam.xp_mask = apply_mask
if (not hasattr(beam, 'ones')) | needs_init:
if apply_mask:
beam.ones = np.ones(beam.fit_mask.sum())
else:
beam.ones = np.ones(beam.fit_mask.size)
# Initialize 2D polynomial
poly = None
if bg_params.ndim > 1:
if bg_params.shape[1] > 1:
M = bg_params.shape[1]
order = {3: 1, 6: 2, 10: 3}
poly = Polynomial2D(order[M])
#mfull = self.scif[self.fit_mask]
bg_full = []
for ib, beam in enumerate(self.beams):
if poly is not None:
poly.parameters = bg_params[ib, :]
bg_i = poly(beam.xp, beam.yp)
else:
# Order = 0, pedestal offset
bg_i = beam.ones*bg_params[ib]
bg_full.append(bg_i)
return np.hstack(bg_full)
@staticmethod
def _objective_line_width(params, self, verbose):
"""
Objective function for emission line velocity widths
"""
bl, nl, z = params
t0, t1 = utils.load_quasar_templates(uv_line_complex=False, broad_fwhm=bl*1000, narrow_fwhm=nl*1000, t1_only=True)
tfit = self.template_at_z(z=z, templates=t1, fitter='nnls', fit_background=True, get_residuals=True)
if verbose:
print(params, tfit['chi2'].sum())
return tfit['chi2']
def fit_line_width(self, bl=2.5, nl=1.1, z0=1.9367, max_nfev=100, tol=1.e-3, verbose=False):
"""
Fit for emisson line width
Returns:
width/(1000 km/s), z, nfev, (nfev==max_nfev)
"""
from scipy.optimize import least_squares
init = [bl, nl, z0]
args = (self, verbose)
out = least_squares(self._objective_line_width, init, jac='2-point', method='lm', ftol=tol, xtol=tol, gtol=tol, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=max_nfev, verbose=0, args=args, kwargs={})
params = out.x
res = [out.x[0], out.x[1], out.x[2], out.nfev, out.nfev == max_nfev]
return res
def show_drizzled_lines(line_hdu, full_line_list=['OII', 'Hb', 'OIII', 'Ha+NII', 'Ha', 'SII', 'SIII'], size_arcsec=2, cmap='cubehelix_r', scale=1., dscale=1, direct_filter=['F140W', 'F160W', 'F125W', 'F105W', 'F110W', 'F098M']):
"""Make a figure with the drizzled line maps
Parameters
----------
line_hdu : `~astropy.io.fits.HDUList`
Result from `~grizli.multifit.MultiBeam.drizzle_fit_lines`
full_line_list : list
Line species too always show
size_arcsec : float
Thumbnail size in arcsec
cmap : str
colormap string
scale : float
Scale factor for line panels
dscale : float
Scale factor for direct image panel
direct_filter : list
Filter preference to show in the direct image panel. Step through
and stop if the indicated filter is available.
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object
"""
import time
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
show_lines = []
for line in full_line_list:
if line in line_hdu[0].header['HASLINES'].split():
show_lines.append(line)
if full_line_list == 'all':
show_lines = line_hdu[0].header['HASLINES'].split()
#print(line_hdu[0].header['HASLINES'], show_lines)
# Dimensions
pix_size = np.abs(line_hdu['DSCI'].header['CD1_1']*3600)
majorLocator = MultipleLocator(1.) # /pix_size)
N = line_hdu['DSCI'].data.shape[0]/2
crp = line_hdu['DSCI'].header['CRPIX1'], line_hdu['DSCI'].header['CRPIX2']
crv = line_hdu['DSCI'].header['CRVAL1'], line_hdu['DSCI'].header['CRVAL2']
imsize_arcsec = line_hdu['DSCI'].data.shape[0]*pix_size
# Assume square
sh = line_hdu['DSCI'].data.shape
dp = -0.5*pix_size # FITS reference is center of a pixel, array is edge
extent = (-imsize_arcsec/2.-dp, imsize_arcsec/2.-dp,
-imsize_arcsec/2.-dp, imsize_arcsec/2.-dp)
NL = len(show_lines)
xsize = 3*(NL+1)
fig = plt.figure(figsize=[xsize, 3.4])
# Direct
ax = fig.add_subplot(1, NL+1, 1)
dext = 'DSCI'
# Try preference for direct filter
for filt in direct_filter:
if ('DSCI', filt) in line_hdu:
dext = 'DSCI', filt
break
ax.imshow(line_hdu[dext].data*dscale, vmin=-0.02, vmax=0.6, cmap=cmap, origin='lower', extent=extent)
ax.set_title('Direct {0} z={1:.3f}'.format(line_hdu[0].header['ID'], line_hdu[0].header['REDSHIFT']))
if 'FILTER' in line_hdu[dext].header:
ax.text(0.03, 0.97, line_hdu[dext].header['FILTER'],
transform=ax.transAxes, ha='left', va='top', fontsize=8)
ax.set_xlabel('RA')
ax.set_ylabel('Decl.')
# 1" ticks
ax.errorbar(-0.5, -0.9*size_arcsec, yerr=0, xerr=0.5, color='k')
ax.text(-0.5, -0.9*size_arcsec, r'$1^{\prime\prime}$', ha='center', va='bottom', color='k')
# Line maps
for i, line in enumerate(show_lines):
ax = fig.add_subplot(1, NL+1, 2+i)
ax.imshow(line_hdu['LINE', line].data*scale, vmin=-0.02, vmax=0.6, cmap=cmap, origin='lower', extent=extent)
ax.set_title(r'%s %.3f $\mu$m' % (line, line_hdu['LINE', line].header['WAVELEN']/1.e4))
# End things
for ax in fig.axes:
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xlim(np.array([-1, 1])*size_arcsec)
ax.set_ylim(np.array([-1, 1])*size_arcsec)
#x0 = np.mean(ax.get_xlim())
#y0 = np.mean(ax.get_xlim())
ax.scatter(0, 0, marker='+', color='k', zorder=100, alpha=0.5)
ax.scatter(0, 0, marker='+', color='w', zorder=101, alpha=0.5)
ax.xaxis.set_major_locator(majorLocator)
ax.yaxis.set_major_locator(majorLocator)
fig.tight_layout(pad=0.1, w_pad=0.5)
fig.text(1-0.015*12./xsize, 0.02, time.ctime(), ha='right', va='bottom',
transform=fig.transFigure, fontsize=5)
return fig
| mit |
rwgdrummer/maskgen | hp_tool/hp/hp_data.py | 1 | 29663 | """
hp_data
Various helper functions that perform the backend processing for the HP Tool
"""
import shutil
import os
import datetime
import csv
import hashlib
import tkFileDialog
import tkMessageBox
import maskgen.tool_set
import pandas as pd
import numpy as np
import subprocess
import json
import data_files
from PIL import Image
from hp.GAN_tools import SeedProcessor
from zipfile import ZipFile
exts = {'IMAGE': [x[1][1:] for x in maskgen.tool_set.imagefiletypes],
'VIDEO': [x[1][1:] for x in maskgen.tool_set.videofiletypes] + [".zip"],
'AUDIO': [x[1][1:] for x in maskgen.tool_set.audiofiletypes],
'MODEL': ['.3d.zip'],
'nonstandard': ['.lfr']}
model_types = [x[1][1:] for x in maskgen.tool_set.modelfiletypes]
orgs = {'RIT': 'R', 'Drexel': 'D', 'U of M': 'M', 'PAR': 'P', 'CU Denver': 'C'}
RVERSION = '#@version=01.14'
thumbnail_conversion = {}
def copyrename(image, path, usrname, org, seq, other, containsmodels):
"""
Performs the copy/rename operation
:param image: original filename (full path)
:param path: destination path. This must have 3 subdirectories: images, video, and csv
:param usrname: username for new filename
:param org: organization code for new filename
:param seq: sequence # for new filename
:param other: other info for new filename
:return: full path of new file
"""
global exts
global thumbnails
newNameStr = datetime.datetime.now().strftime('%Y%m%d')[2:] + '-' + \
org + usrname + '-' + seq
if other:
newNameStr = newNameStr + '-' + other
currentExt = os.path.splitext(image)[1]
files_in_dir = os.listdir(os.path.dirname(image)) if containsmodels else []
if any(filename.lower().endswith('.3d.zip') for filename in files_in_dir):
sub = 'model'
elif any(os.path.splitext(filename)[1].lower() in exts["nonstandard"] for filename in files_in_dir):
sub = 'nonstandard'
elif currentExt.lower() in exts['VIDEO']:
sub = 'video'
elif currentExt.lower() in exts['AUDIO']:
sub = 'audio'
elif currentExt.lower() in exts['IMAGE']:
sub = 'image'
else:
return image
if sub not in ['model', 'nonstandard']:
if currentExt == ".zip":
full_ext = os.path.splitext(os.path.splitext(image)[0])[1] + ".zip"
newPathName = os.path.join(path, sub, '.hptemp', newNameStr + full_ext)
else:
newPathName = os.path.join(path, sub, '.hptemp', newNameStr + currentExt)
else:
sub = 'image' if sub == 'nonstandard' else 'model'
thumbnail_folder = os.path.join(path, sub, '.hptemp', newNameStr) if sub == 'model' else os.path.join(path,
'thumbnails',
'.hptemp')
if not os.path.isdir(thumbnail_folder):
os.mkdir(thumbnail_folder)
file_dir = os.path.normpath(os.path.dirname(image))
thumbnail_conversion[file_dir] = {}
thumbnail_counter = 0
for i in os.listdir(file_dir):
currentExt = os.path.splitext(i)[1].lower()
if i.lower().endswith(".3d.zip"):
newPathName = os.path.join(path, sub, '.hptemp', newNameStr, newNameStr + ".3d.zip")
elif currentExt in exts["nonstandard"]:
newPathName = os.path.join(path, sub, '.hptemp', newNameStr + currentExt)
elif currentExt in exts['IMAGE']:
newThumbnailName = "{0}_{1}{2}".format(newNameStr, str(thumbnail_counter), currentExt)
dest = os.path.join(thumbnail_folder, newThumbnailName)
with Image.open(os.path.join(file_dir, i)) as im:
if im.width > 264:
im.thumbnail((264, 192), Image.ANTIALIAS)
im.save(dest)
else:
shutil.copy2(os.path.join(file_dir, i), dest)
thumbnail_conversion[file_dir][i] = newThumbnailName
thumbnail_counter += 1
else:
tkMessageBox.showwarning("File Copy Error", i + " will not be copied to the output directory as it is"
" an unrecognized file format")
shutil.copy2(image, newPathName)
return newPathName
def check_settings(self):
"""
Check settings for new seq and additional filetypes
:param self: reference to HP GUI
"""
# reset sequence if date is new
if self.settings.get_key('date') != datetime.datetime.now().strftime('%Y%m%d')[2:]:
self.settings.save('seq', '00000')
else:
self.settings.save('date', datetime.datetime.now().strftime('%Y%m%d')[2:])
add_types(self.settings.get_key('imagetypes'), 'image')
add_types(self.settings.get_key('videotypes'), 'video')
add_types(self.settings.get_key('audiotypes'), 'audio')
def add_types(data, mformat):
global exts
mformat = mformat.upper()
# type == str when settings have been opened, None otherwise
if type(data) == str:
data = data.replace(',', ' ').split(' ')
for i in data:
if i not in exts[mformat] and len(i) > 0:
exts[mformat].append(i)
# def convert_GPS(coordinate):
# """
# Converts lat/long output from exiftool (DMS) to decimal degrees
# :param coordinate: string of coordinate in the form 'X degrees Y' Z' N/S/W/E'
# :return: (string) input coordinate in decimal degrees, rounded to 6 decimal places
# """
# if coordinate:
# coord = coordinate.split(' ')
# whole = float(coord[0])
# direction = coord[-1]
# min = float(coord[2][:-1])
# sec = float(coord[3][:-1])
# dec = min + (sec/60)
# coordinate = round(whole + dec/60, 6)
#
# if direction == 'S' or direction == 'W':
# coordinate *= -1
#
# return str(coordinate)
def pad_to_5_str(num):
"""
Converts an int to a string, and pads to 5 chars (1 -> '00001')
:param num: int to be padded
:return: padded string
"""
return '{:=05d}'.format(num)
def grab_dir(inpath, outdir=None, r=False):
"""
Grabs all image files in a directory
:param inpath: path to directory of desired files
:param outdir: path to output csv directory, to check for existing images
:param r: Recursively grab images from all subdirectories as well
:return: list of images in directory
"""
imageList = []
names = os.listdir(inpath)
valid_exts = tuple(exts['IMAGE'] + exts['VIDEO'] + exts['AUDIO'] + ['.dng.zip'])
if r:
for dirname, dirnames, filenames in os.walk(inpath, topdown=True):
for filename in filenames:
if filename.lower().endswith(valid_exts) and not filename.startswith('.'):
imageList.append(os.path.join(dirname, filename))
else:
for f in names:
if f.lower().endswith(valid_exts) and not f.startswith('.'):
imageList.append(os.path.join(inpath, f))
elif os.path.isdir(os.path.join(inpath, f)):
for obj in os.listdir(os.path.join(inpath, f)):
if obj.lower().endswith('.3d.zip') or os.path.splitext(obj)[1].lower() in exts["nonstandard"]:
imageList.append(os.path.normpath(os.path.join(inpath, f, obj)))
imageList = sorted(imageList, key=str.lower)
if outdir:
repeated = []
ritCSV = None
if os.path.exists(outdir):
for f in os.listdir(outdir):
if f.endswith('.csv') and 'rit' in f:
ritCSV = os.path.join(outdir, f)
rit = pd.read_csv(ritCSV, dtype=str)
repeated = rit['OriginalImageName'].tolist()
break
removeList = []
for name in imageList:
for repeatedName in repeated:
if repeatedName:
if os.path.basename(repeatedName) == os.path.basename(name):
removeList.append(name)
break
for imageName in removeList:
imageList.remove(imageName)
return imageList
def find_rit_file(outdir):
"""
Find a file ending in a dir, ending with 'rit.csv'
:param outdir: string directory name
:return: string w/ rit filename, None if not found
"""
rit_file = None
if not os.path.exists(outdir):
return None
for f in os.listdir(outdir):
if f.endswith('rit.csv'):
rit_file = os.path.join(outdir, f)
return rit_file
def build_keyword_file(image, keywords, csvFile):
"""
Adds keywords to specified file
:param image: image filename that keywords apply to
:param keywords: list of keywords
:param csvFile: csv file to store information
:return: None
"""
with open(csvFile, 'a') as csv_keywords:
keywordWriter = csv.writer(csv_keywords, lineterminator='\n', quoting=csv.QUOTE_MINIMAL)
if keywords:
for word in keywords:
keywordWriter.writerow([image, word])
else:
keywordWriter.writerow([image])
def build_csv_file(self, oldNameList, newNameList, info, csvFile, type):
"""
Write out desired csv file, using headers from data/headers.json
:param self: reference to HP GUI
:param imageList: original image names
:param newNameList: new image names, in same order as imageList
:param info: ordered list of dictionaries of image info. should be in same order as image lists
:param csvFile: csv file name
:param type: name of list of headers to take, labeled in headers.json (rit, rankone, history)
:return:
"""
newFile = not os.path.isfile(csvFile)
headers = load_json_dictionary(data_files._HEADERS)
with open(csvFile, 'a') as c:
wtr = csv.writer(c, lineterminator='\n', quoting=csv.QUOTE_ALL)
if newFile:
if type == 'rankone':
wtr.writerow([RVERSION])
wtr.writerow(headers[type])
for imNo in xrange(len(oldNameList)):
row = []
if type == 'keywords':
row.extend([os.path.basename(newNameList[imNo]), '', '', ''])
wtr.writerow(row)
continue
for h in headers[type]:
if h == 'MD5':
md5 = hashlib.md5(open(newNameList[imNo], 'rb').read()).hexdigest()
row.append(md5)
elif h == 'ImportDate':
row.append(datetime.datetime.today().strftime('%m/%d/%Y %I:%M:%S %p'))
elif h == 'DeviceSN':
row.append(info[imNo]['DeviceSerialNumber'])
elif h == 'OriginalImageName' or h == 'Original Name':
row.append(os.path.basename(oldNameList[imNo]))
elif h == 'ImageFilename' or h == 'New Name':
row.append(os.path.basename(newNameList[imNo]))
elif h == 'HP-HDLocation' and not info[imNo]['HP-HDLocation']:
row.append(os.path.dirname(oldNameList[imNo]))
else:
try:
row.append(info[imNo][h])
except KeyError:
print('Could not find column ' + h)
row.append('ERROR')
wtr.writerow(row)
def check_create_subdirectories(path):
"""
Check if temporary image, video, audio, and csv subdirectories exist in a path, and creates them if not.
:param path: directory path
:return: None
"""
subs = ['image', 'video', 'audio', 'model', 'thumbnails', 'csv']
for sub in subs:
if not os.path.exists(os.path.join(path, sub, '.hptemp')):
os.makedirs(os.path.join(path, sub, '.hptemp'))
for f in os.listdir(os.path.join(path, sub, '.hptemp')):
oldFile = os.path.join(path, sub, '.hptemp', f)
if os.path.isfile(oldFile):
os.remove(oldFile)
def remove_temp_subs(path):
"""
Move files out of temporary subdirectories and into output folder.
:param path: Path containing temp subdirectories
:return:
"""
subs = ['image', 'video', 'audio', 'model', 'thumbnails', 'csv']
for sub in subs:
for f in os.listdir(os.path.join(path, sub, '.hptemp')):
shutil.move(os.path.join(path, sub, '.hptemp', f), os.path.join(path, sub))
os.rmdir(os.path.join(path, sub, '.hptemp'))
if not os.listdir(os.path.join(path, sub)):
os.rmdir(os.path.join(path, sub))
def load_json_dictionary(path):
"""
Load a json file into a dictionary
:param path: path to json file
:return: Dictionary containing json-format data
"""
with open(path) as j:
data = json.load(j)
return data
def remove_dash(item):
"""
Remove the first character in a string.
:param item: String
:return: input string, with first character removed
"""
return item[1:]
def combine_exif(exif_data, lut, d):
"""
Add extracted exif information to master list of HP data
:param exif_data: dictionary of data extracted from an image
:param lut: LUT to translate exiftool output to fields
:param fields: master dictionary of HP data
:return: fields - a dictionary with updated exif information
"""
for k in lut:
if k in exif_data and exif_data[k] != '-':
d[lut[k]] = exif_data[k]
return d
def set_other_data(self, data, imfile, set_primary):
"""
Set implicit metadata to data.
:param data: Dictionary of field data from one image
:param imfile: name of corresponding image file
:return: data with more information completed
"""
def get_model_ext(model):
zf = ZipFile(model)
exts_in_zip = [os.path.splitext(x)[1] for x in zf.namelist()]
matching_types = [x for x in exts_in_zip if x in model_types]
if matching_types:
return matching_types[0]
return "3d.zip"
imext = os.path.splitext(imfile)[1]
if imext.lower() in exts['AUDIO']:
data['Type'] = 'audio'
elif imfile.lower().endswith('.3d.zip'):
data['Type'] = 'model'
elif imext.lower() in exts['VIDEO']:
data['Type'] = 'video'
else:
data['Type'] = 'image'
data['FileType'] = imext[1:] if imext[1:] != "zip" else os.path.splitext(os.path.splitext(imfile)[0])[1][1:] +\
imext if data['Type'] != "model" else get_model_ext(imfile)
# data['GPSLatitude'] = convert_GPS(data['GPSLatitude'])
# data['GPSLongitude'] = convert_GPS(data['GPSLongitude'])
data['HP-Username'] = self.settings.get_key('username')
try:
if int(data['ImageWidth']) < int(data['ImageHeight']):
data['HP-Orientation'] = 'portrait'
else:
data['HP-Orientation'] = 'landscape'
except ValueError:
# no/invalid image width or height in metadata
pass
if set_primary and set_primary != "model":
data['HP-PrimarySecondary'] = 'primary'
if 'back' in data['LensModel']:
data['HP-PrimarySecondary'] = 'primary'
elif 'front' in data['LensModel']:
data['HP-PrimarySecondary'] = 'secondary'
return data
def check_outdated(ritCSV, path):
"""
If an old CSV directory is loaded, check for any updates.
Future update functions should be called from here for backwards compatibility. (For new column headings, etc)
:param ritCSV: path to RIT csv
:param path: path to data
:return:
"""
current_headers = load_json_dictionary(data_files._FIELDNAMES)
rit_data = pd.read_csv(ritCSV, dtype=str)
rit_headers = list(rit_data)
diff = [x for x in current_headers.keys() if x not in rit_headers] # list all new items
for new_item in diff:
if new_item == 'HP-Collection':
rit_data.rename(columns={'HP-CollectionRequestID': 'HP-Collection'}, inplace=True)
print('Updating: Changed HP-CollectionRequestID to HP-Collection.')
elif new_item == 'CameraMake':
add_exif_column(rit_data, 'CameraMake', '-Make', path)
if diff:
rit_data.to_csv(ritCSV, index=False, quoting=csv.QUOTE_ALL)
def add_exif_column(df, title, exif_tag, path):
"""
Add a new column of exif data to a pandas dataframe containing image names
:param df: pandas dataframe, contains image data
:param title: string, column title
:param exif_tag: exif tag to run exiftool with (e.g. -Make)
:param path: path to root process directory
:return: None
"""
print('Updating: Adding new column: ' + title + '. This may take a moment for large sets of data... '),
exifDataResult = subprocess.Popen(['exiftool', '-f', '-j', '-r', exif_tag, path], stdout=subprocess.PIPE).communicate()[0]
exifDataResult = json.loads(exifDataResult)
exifDict = {}
for item in exifDataResult:
exifDict[os.path.normpath(item['SourceFile'])] = item
# create an empty column, and add the new exif data to it
a = np.empty(df.shape[0])
a[:] = np.NaN
new = pd.Series(a, index=df.index)
try:
for index, row in df.iterrows():
image = row['ImageFilename']
sub = row['Type']
key = os.path.join(path, sub, image)
val = exifDict[os.path.normpath(key)][exif_tag[1:]]
new[index] = val if val != '-' else ''
except KeyError:
print('Could not add column. You may encounter validation errors. It is recommended to re-process your data.')
return
df[title] = new
print('done')
def parse_image_info(self, imageList, cameraData, **kwargs):
"""
One of the critical backend functions for the HP tool. Parses out exifdata all of the images, and sorts into
dictionary
:param self: reference to HP GUI
:param imageList: list of image filepaths
:param kwargs: additional settings or metadata, including: rec (recursion T/F, path (input directory
:return: data: dictionary containing image names and their gathered data
"""
fields = load_json_dictionary(data_files._FIELDNAMES)
master = {}
exiftoolargs = []
for fkey in fields:
master[fkey] = ''
if fields[fkey].startswith('-'):
exiftoolargs.append(fields[fkey])
for kkey in kwargs:
if kkey in fields:
master[kkey] = kwargs[kkey]
exiftoolparams = ['exiftool', '-f', '-j', '-r', '-software', '-make', '-model', '-serialnumber'] if kwargs['rec'] else ['exiftool', '-f', '-j', '-software', '-make', '-model', '-serialnumber']
exifDataResult = subprocess.Popen(exiftoolparams + exiftoolargs + [kwargs['path']], stdout=subprocess.PIPE).communicate()[0]
# exifDataResult is in the form of a String json ("[{SourceFile:im1.jpg, imageBitsPerSample:blah}, {SourceFile:im2.jpg,...}]")
try:
exifDataResult = json.loads(exifDataResult)
except:
print('Exiftool could not return data for all input.')
# further organize exif data into a dictionary based on source filename
exifDict = {}
for item in exifDataResult:
exifDict[os.path.normpath(item['SourceFile'])] = item
try:
set_primary = cameraData[cameraData.keys()[0]]["camera_type"] != "CellPhone"
except IndexError:
set_primary = "model"
data = {}
reverseLUT = dict((remove_dash(v), k) for k, v in fields.iteritems() if v)
for i in xrange(0, len(imageList)):
if not (imageList[i].lower().endswith('.3d.zip') or os.path.splitext(imageList[i])[1].lower() in exts["nonstandard"]):
try:
data[i] = combine_exif(exifDict[os.path.normpath(imageList[i])], reverseLUT, master.copy())
except KeyError:
data[i] = combine_exif({}, reverseLUT, master.copy())
else:
image_file_list = os.listdir(os.path.normpath(os.path.dirname(imageList[i])))
del image_file_list[image_file_list.index(os.path.basename(imageList[i]))]
data[i] = combine_exif({"Thumbnail": "; ".join(image_file_list)},
reverseLUT, master.copy())
data[i] = set_other_data(self, data[i], imageList[i], set_primary)
return data
def process_metadata(dir, metadata, recursive=False, quiet=False):
"""
Attempts to add tags containing metadata gathered from preferences to all output images. Some media files will not
be writable by exiftool
Adds the following:
copyright
artist
by-line
credit
usage terms
copyright notice
:param dir: string, root directory
:param metadata: dictionary, metadata tags to write
:param recursive: boolean, whether or not to recursively edit metadata of subdirectories too
:param quiet: boolean, set to True for no progress messages
:return:
"""
exifToolInput = ['exiftool', '-progress']
for key, value in metadata.iteritems():
exifToolInput.append('-' + key + '=' + value)
if recursive:
exifToolInput.extend(('-XMPToolkit=', '-overwrite_original', '-r', '-L', '-m', '-P'))
else:
exifToolInput.extend(('-XMPToolkit=', '-overwrite_original', '-L', '-m', '-P'))
exifToolInput.append(dir)
if quiet:
del exifToolInput[1]
# run exiftool
subprocess.call(exifToolInput)
def process(self, cameraData, imgdir='', outputdir='', recursive=False,
keywords='', additionalInfo='', **kwargs):
"""
The main process workflow for the hp tool.
:param self: reference to HP GUI
:param preferences: preferences filename
:param imgdir: directory of raw images/video/audio files to be processed
:param outputdir: output directory (csv, image, video, and audio files will be made here)
:param recursive: boolean, whether or not to search subdirectories as well
:param keywords:
:param additionalInfo: additional bit to be added onto filenames
:param kwargs: hp data to be set
:return: list of paths of original images and new images
"""
check_settings(self)
print('Settings OK')
# collect image list
print('Collecting images...')
imageList = []
# set up the output subdirectories
check_create_subdirectories(outputdir)
imageList.extend(grab_dir(imgdir, os.path.join(outputdir, 'csv'), recursive))
if not imageList:
print('No new images found')
remove_temp_subs(outputdir)
if os.path.exists(os.path.join(outputdir, 'csv')):
check_outdated(find_rit_file(os.path.join(outputdir, 'csv')), outputdir)
else:
tkMessageBox.showerror("Directory Error", "There has been an error processing the input directory. Please verify there is media within the directory. If there is only 3D Models or Lytro images to be processed, verify that you are following the correct directory structure.")
return None, None
return imageList, []
# build information list. This is the bulk of the processing, and what runs exiftool
print('Building image info...')
imageInfo = parse_image_info(self, imageList, cameraData, path=imgdir, rec=recursive, **kwargs)
if imageInfo is None:
return None, None
print('...done')
# once we're sure we have info to work with, we can check for the image, video, and csv subdirectories
check_create_subdirectories(outputdir)
# prepare for the copy operation
try:
count = int(self.settings.get_key('seq'))
except TypeError:
count = 0
self.settings.save('seq', '00000')
# copy with renaming
print('Copying files...')
newNameList = []
searchmodels = not (recursive or cameraData) or (not recursive and "lytro" in cameraData[cameraData.keys()[0]]["hp_camera_model"].lower())
for image in imageList:
newName = copyrename(image, outputdir, self.settings.get_key('username', ''), self.settings.get_key('hp-organization', ''), pad_to_5_str(count), additionalInfo, searchmodels)
if os.path.split(newName)[1] == os.path.split(image)[1]:
name = os.path.split(image)[1]
if name.lower().endswith('.3d.zip'):
tkMessageBox.showerror("Improper 3D Model Processing", "In order to process 3D models, you must have "
"no device local ID and the 'Include "
"Subdirectories' box must NOT be checked")
else:
tkMessageBox.showerror("Unrecognized data type", "An unrecognized data type {0} was found in the input "
"directory. Please add this extension to the list of "
"addition extensions.".format(os.path.splitext(image)[1]))
return
# image_dir = os.path.dirname(image)
# newFolder = copyrename(image_dir, outputdir, self.settings.get('username'), self.settings.get('organization'), pad_to_5_str(count), additionalInfo, searchmodels)
# newImage = copyrename(image, outputdir, self.settings.get('username'), self.settings.get('organization'), pad_to_5_str(count), additionalInfo, searchmodels)
newNameList += [newName]
count += 1
# Updates HP-Thumbnails tab to show renamed image names rather than the original image names.
for model in xrange(0, len(imageInfo)):
thumbnails = imageInfo[model]['HP-Thumbnails'].split("; ")
try:
del thumbnails[thumbnails.index('')]
except ValueError:
pass
new_thumbnails = []
if thumbnails:
for thumbnail in thumbnails:
model_path = os.path.dirname(os.path.normpath(imageList[model]))
try:
new_thumbnails.append(thumbnail_conversion[model_path][thumbnail])
except KeyError:
pass
imageInfo[model]['HP-Thumbnails'] = "; ".join(new_thumbnails)
# parse seeds
def get_seed_file():
seed = None
while not seed:
tkMessageBox.showinfo("Select Seed File", "Select the GAN seed file (ex. log.txt for the"
" Progressive GAN).")
seed = tkFileDialog.askopenfilename()
print("Loading seeds... "),
seed_loader = SeedProcessor(self, seed)
print("done.")
seeds = seed_loader.get_seeds()
return seeds
try:
local_id = cameraData.keys()[0]
except IndexError: # 3D Models
local_id = ""
if local_id.lower().startswith("gan"):
if tkMessageBox.askyesno("Add Seed?", "Would you like to connect a seed file to these GAN images? "):
seed_list = get_seed_file()
while len(seed_list) != len(imageInfo):
if len(seed_list) > len(imageInfo):
diff_type = "There are more seeds found than GANs. If you continue with this seed file, the last" \
" {0} seeds will be unused.".format(len(seed_list) - len(imageInfo))
if len(seed_list) < len(imageInfo):
diff_type = "There are more GANs found than seeds provided. If you continue with this seed file," \
" the last {0} GANs will not have seeds.".format(len(imageInfo) - len(seed_list))
retry_seed = tkMessageBox.askyesno("Mismatched Seed File", diff_type + " Would you like to select a "
"different seed file?")
if retry_seed:
seed_list = get_seed_file()
for im in xrange(0, len(imageInfo)):
try:
imageInfo[im]['HP-seed'] = seed_list[im]
except IndexError:
break
print(' done')
self.settings.save('seq', pad_to_5_str(count))
self.settings.save('date', datetime.datetime.now().strftime('%Y%m%d')[2:])
print('Settings updated with new sequence number')
print('Updating metadata...')
metadata = {"usageterms": self.settings.get_key("usageterms"),
"copyrightnotice": self.settings.get_key("copyrightnotice"),
"credit": self.settings.get_key("credit"),
"artist": self.settings.get_key("artist"),
"copyright": self.settings.get_key("copyright"),
"by-line": self.settings.get_key("by-line")}
for folder in ['image', 'video', 'audio', 'model']:
process_metadata(os.path.join(outputdir, folder, '.hptemp'), metadata, quiet=True)
dt = datetime.datetime.now().strftime('%Y%m%d%H%M%S')[2:]
for csv_type in ['rit', 'rankone', 'keywords']:
print('Writing ' + csv_type + ' file')
csv_path = os.path.join(outputdir, 'csv', '-'.join(
(dt, self.settings.get_key('hp-organization') + self.settings.get_key('username'), csv_type + '.csv')))
build_csv_file(self, imageList, newNameList, imageInfo, csv_path, csv_type)
# move out of tempfolder
print('Cleaning up...')
remove_temp_subs(outputdir)
print('\nComplete!')
return imageList, newNameList
| bsd-3-clause |
poo12138/gem5-stable | util/stats/output.py | 90 | 7981 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
| bsd-3-clause |
icdishb/scikit-learn | sklearn/neighbors/graph.py | 19 | 6904 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(
n_neighbors, metric=metric, p=p, metric_params=metric_params
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(
radius=radius, metric=metric, p=p,
metric_params=metric_params
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
vistalab/elastic_basis_pursuit | ebp/elastic_basis_pursuit.py | 1 | 8059 | """
Elastic basis pursuit
"""
import numpy as np
import numpy.linalg as nla
import leastsqbound as lsq
import sklearn.linear_model as lm
import scipy.optimize as opt
def err_func(params, x, y, func):
"""
Error function for fitting a function
Parameters
----------
params : tuple
A tuple with the parameters of `func` according to their order of
input
x : float array
An independent variable.
y : float array
The dependent variable.
func : function
A function with inputs: `(x, *params)`
Returns
-------
The sum of squared marginals of the fit to x/y given the params
"""
# We ravel both, so that we can accomodate multi-d input without having
# to think about it:
return np.ravel(y) - np.ravel(func(x, params))
def gaussian_kernel(x, params):
"""
A multi-dimensional Gaussian kernel function
Useful for creating and testing EBP with simple Gaussian Mixture Models
Parameters
----------
x : ndarray
The independent variable over which the Gaussian is calculated
params : ndarray
If this is a 1D array, it could have one of few things:
[mu_1, mu_2, ... mu_n, sigma_1, sigma_2, ... sigma_n]
Or:
[mu_1, mu_2, ... mu_n, var_covar_matrix]
where:
var_covar_matrix needs to be reshaped into n-by-n
"""
mu = np.asarray(params[:x.shape[0]])
if len(params) == x.shape[0] * 2:
sigma = np.diag(params[x.shape[0]:])
elif len(params) == x.shape[0] + x.shape[0] ** 2:
mu = params[:x.shape[0]]
sigma = np.reshape(params[x.shape[0]:], (x.shape[0], x.shape[0]))
else:
e_s = "Inputs to gaussian_kernel don't have the right dimensions"
raise ValueError(e_s)
dims = mu.shape[0]
while len(mu.shape) < len(x.shape):
mu = mu[..., None]
shape_tuple = x.shape[1:]
diff = (x - mu).reshape(x.shape[0], -1)
sigma_inv = nla.inv(sigma)
mult1 = np.dot(diff.T, sigma_inv)
mult2 = (np.diag(np.dot(mult1, diff))).reshape(shape_tuple)
norm_factor = 1/(np.sqrt((2*np.pi)**dims * nla.det(sigma)))
gauss = norm_factor * np.exp(-0.5 * mult2)
return gauss
def leastsq_oracle(x, y, kernel, initial=None, bounds=None):
"""
This is a generic oracle function that uses bounded least squares to find
the parameters in each iteration of EBP, and requires initial parameters.
Parameters
----------
x : ndarray
Input to the kernel function.
y : ndarray
Data to fit to.
kernel : callalble
The kernel function to be specified by this oracle.
initial : list/array
initial setting for the parameters of the function. This has to be
something that kernel knows what to do with.
"""
return lsq.leastsqbound(err_func, initial, args=(x, y, kernel),
bounds=bounds)[0]
def mixture_of_kernels(x, betas, params, kernel):
"""
Generate the signal from a mixture of kernels
Parameters
----------
x : ndarray
betas : 1D array
Coefficients for the linear summation of the kernels
params : list
A set of parameters for each one of the kernels
kernel : callable
"""
betas = np.asarray(betas)
out = np.zeros(x.shape[1:])
for i in xrange(betas.shape[0]):
out += np.dot(betas[i], kernel(x, params[i]))
return out
def kernel_err(y, x, betas, params, kernel):
"""
An error function for a mixture of kernels, each one parameterized by its
own set of params, and weighted by a beta
Note
----
For a given set of betas, params, this can be used as a within set error
function, or to estimate the cross-validation error against another set of
y, x values, sub-sampled from the whole original set, or from a left-out
portion
"""
return y - mixture_of_kernels(x, betas, params, kernel)
def parameters_to_regressors(x, kernel, params):
"""
Maps from parameters to regressors through the kernel function
Parameters
----------
x : ndarray
Input
kernel : callable
The kernel function
params : list
The parameters for each one of the kernel functions
"""
# Ravel the secondary dimensions of this:
x = x.reshape(x.shape[0], -1)
regressors = np.zeros((len(params), x.shape[-1]))
for i, p in enumerate(params):
regressors[i] = kernel(x, p)
return regressors.T
def solve_nnls(x, y, kernel=None, params=None, design=None):
"""
Solve the mixture problem using NNLS
Parameters
----------
x : ndarray
y : ndarray
kernel : callable
params : list
"""
if design is None and (kernel is None or params is None):
e_s = "Need to provide either design matrix, or kernel and list of"
e_s += "params for generating the design matrix"
raise ValueError(e_s)
if design is None:
A = parameters_to_regressors(x, kernel, params)
else:
A = design
y = y.ravel()
beta_hat, rnorm = opt.nnls(A, y)
return beta_hat, rnorm
def elastic_basis_pursuit(x, y, oracle, kernel, initial_theta=None, bounds=None,
max_iter=1000, beta_tol=10e-6):
"""
Elastic basis pursuit
Fit a mixture model::
..math::
y = \sum{w_i f_{\theta_i} (x_i)}
with y data, f a kernel function parameterized by $\theta_i$ and \w_i a
non-negative weight, and x inputs to the kernel function
Parameters
----------
x : 1D/2D array
The independent variable that produces the data
y : 1D/2D darray
The data to be fit.
oracle : callable
This is a function that takes data (`x`/`y`) and a kernel function
(`kernel`) and returns the params theta for the kernel given x and
y. The oracle can use any optimization routine, and any cost function
kernel : callable
A skeleton for the oracle function to optimize. Must take something
of the dimensions of x (together with params, and with args) and return
something of the dimensions of y.
initial_theta : list/array
The initial parameter guess
bounds : the bounds on
"""
# Divide this up into a fit set and a validation set. We'll stop fitting
# when error on the validation set starts climbing:
fit_x = x[:, ::2]
validate_x = x[:, 1::2]
fit_y = y[::2]
validate_y = y[1::2]
# Initialize a bunch of empty lists to hold the state:
theta = []
est = []
design_list = []
r = []
err = [np.var(fit_y)] # Start with the assumption of
err_norm = []
# Initialize the residuals with the fit_data:
r.append(fit_y)
# Limit this by number of iterations
for i in range(max_iter):
theta.append(oracle(fit_x, r[-1], kernel, initial_theta,
bounds=bounds))
design = parameters_to_regressors(fit_x, kernel, theta)
beta_hat, rnorm = solve_nnls(fit_x, fit_y, design=design)
# Here comes the "elastic" bit. We exclude kernels with insignificant
# contributions:
keep_idx = np.where(beta_hat > beta_tol)
# We want this to still be a list (so we can 'append'):
theta = list(np.array(theta)[keep_idx])
beta_hat = beta_hat[keep_idx]
design = design[:, keep_idx[0]]
# Move on with the shrunken basis set:
est.append(np.dot(design, beta_hat))
r.append(fit_y - est[-1])
# Cross-validation:
xval_design = parameters_to_regressors(validate_x, kernel, theta)
xval_est = np.dot(xval_design, beta_hat)
xval_r = validate_y - xval_est
err.append(np.dot(xval_r, xval_r))
# If error just grew, we bail:
if err[i+1] > err[i]:
break
return theta, err, r
| mit |
joergsimon/gesture-analysis | main_old.py | 1 | 9996 | import os
import os.path as path
import numpy as np
import pandas as pd
import sklearn
import sklearn.ensemble
import sklearn.linear_model
import sklearn.naive_bayes
import sklearn.neighbors.nearest_centroid
import sklearn.tree
from sklearn import preprocessing
from sklearn import svm
from sklearn.cross_validation import KFold
from sklearn.feature_selection import RFE
from sklearn.metrics import classification_report
import analysis.old.Window as wd
import dataingestion.DataReader as dr
from const.constants import Constants
def read_user(path, glove_data, label_data, overwrite_data):
user = dr.User(path, glove_data, label_data)
if user.has_intermediate_file() and not overwrite_data:
user.read_intermediate()
else:
user.read()
aggregate = wd.ArrgretageUser(user,200,30)
aggregate.make_rolling_dataset_2()
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print user.data.ix[0:2,:]
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print user.windowData.ix[0:2,:]
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
user.write_intermediate()
return user
def labelMatrixToArray(labelMatrix, threshold):
labels = []
exclude = []
for row in labelMatrix.index:
r = labelMatrix.loc[row,:]
lblInfo = r[r > threshold]
lbl = "0.0"
# TODO: for training, it would be better
# to remove the ones where 0 is more than 50 and label is less than 15
if lblInfo.size > 0:
lbl = lblInfo.index[0]
else:
exclude.append(row)
labels.append(lbl)
# now we need to balance the amount of the zero class to the other classes
# get all 0 indexes:
labelDF = pd.DataFrame(labels, index=labelMatrix.index)
return (labelDF, exclude)
def normalizeZeroClass(labels, data):
counts = labels.groupby(0).size()
max = counts[1:].max()
zeroIndex = labels[labels[0] == "0.0"].index
selectedIndex = np.random.choice(zeroIndex, size=max, replace=False)
removalIndex = zeroIndex.drop(selectedIndex)
labelDF = labels.drop(removalIndex)
trainData = data.drop(removalIndex)
return (labelDF, trainData, removalIndex)
def main():
root = 'data/raw/'
windowData = None
windowLabelInfo = None
files = [f for f in os.listdir(root) if path.isfile(path.join(root, f))]
labels = [l for l in files if "label" in l]
labels = sorted(labels)
gl_data = [g for g in files if "glove" in g]
gl_data = sorted(gl_data)
for glove_data, label_data in zip(gl_data,labels):
user = read_user(root, glove_data, label_data, False)
if windowData is None:
windowData = user.windowData
windowLabelInfo = user.windowLabel
else:
windowData = pd.concat([windowData, user.windowData])
windowLabelInfo = pd.concat([windowLabelInfo, user.windowLabelInfo])
print "permutate data"
# TODO: here compute the labels the way we want it for analysis!
# first simple approach: just the the major labe in each window:
windowLabelInfo = windowLabelInfo.drop('Unnamed: 0', 1)
windowData = windowData.drop(u'gesture', 1)
# permutate the data
indices = np.random.permutation(windowData.index)
windowData = windowData.reindex(indices)
windowLabelInfo = windowLabelInfo.reindex(indices)
# prepare data for feature selection:
selectLabelDF, exclude = labelMatrixToArray(windowLabelInfo, 150)
# now we need to balance the amount of the zero class to the other classes
# get all 0 indexes:
selectLabelDF = selectLabelDF.drop(exclude)
selectData = windowData.drop(exclude)
selectLabelDF, selectData, _ = normalizeZeroClass(selectLabelDF, selectData)
# feature selection using VarianceThreshold filter
# sel = VarianceThreshold(threshold=(.8 * (1 - .8)))
# fit = sel.fit(selectData.values)
# colIndex = fit.get_support(indices=True)
# windowData = windowData[windowData.columns[colIndex]]
# the blow is somehow valid, however:
# first I would need to transform the features so each X > 0
# (means vor each colum add the col max negative offset to 0 to each value)
# but I am more in doupth I should do that as these are univariate
# selections, and I am not sure if we are more in the multivariate
# world here.
# - feature selection getting the X best features based on
# - statistical tests for the data. We have 65 sensors,
# - or about 12 different single movements in our case
# - since in our gesture only complete finger flexation
# - or relaxation is interesting so the minimum
# - number of features should be in the range of
# - 12-65. A good set might be the double amount of that
#fit = SelectKBest(chi2, k=65).fit(selectData.values, selectLabelDF.values)
#colIndex = fit.get_support(indices=True)
#windowData = windowData[windowData.columns[colIndex]]
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = sklearn.linear_model.Lasso(alpha = 0.1) #svm.SVR(kernel="linear")
print "test fit."
svc.fit(selectData.values, np.ravel(selectLabelDF.values))
print "run rfecv.."
rfecv = RFE(estimator=svc, step=0.1, verbose=2)
rfecv.fit(selectData.values, np.ravel(selectLabelDF.values))
print "get support..."
colIndex = rfecv.get_support(indices=True)
print "shrink data to selected features...."
windowData = windowData[windowData.columns[colIndex]]
print windowData.shape
print "selected headers: "
print windowData.columns
# first we split trining and test already here. this
# is because of the different learning approach
#
# windowData['gesture'] = windowLabelInfo.idxmax(axis=1)
splitpoint = int(windowData.index.size * 0.7)
trainData = windowData[0:splitpoint]
testData = windowData[splitpoint + 1:]
trainLabels = windowLabelInfo[0:splitpoint]
testLabels = windowLabelInfo[splitpoint + 1:]
# a complete window has 201 frames. we count the label with
# more than 150, aka. 3/4 as the real label
labelDF, exclude = labelMatrixToArray(trainLabels, 150)
# now we need to balance the amount of the zero class to the other classes
# get all 0 indexes:
labelDF = labelDF.drop(exclude)
trainData = trainData.drop(exclude)
labelDF, trainData, _ = normalizeZeroClass(labelDF, trainData)
print("++++++++++++++++")
print(labelDF)
print("++++++++++++++++")
print("train data size:")
print(trainData.shape)
print("++++++++++++++++")
headers = Constants.headers
#d = trainData.loc[:, headers]
d = trainData.values #d.values
d = preprocessing.scale(d)
print(d)
clf = None
kf = KFold(len(labelDF.values), n_folds=5)
score = 0
for train_index, test_index in kf:
X_train = d[train_index, :]
X_ct = d[test_index, :]
y_train = labelDF.values[train_index]
y_ct = labelDF.values[test_index]
# lin_clf = sklearn.linear_model.LogisticRegression()
# lin_clf = sklearn.linear_model.LogisticRegression(class_weight='auto')
# lin_clf = svm.LinearSVC()
# lin_clf = svm.LinearSVC(class_weight='auto')
# lin_clf = svm.SVR()
# lin_clf = svm.SVC()
# lin_clf = svm.SVC(class_weight='auto')
lin_clf = svm.SVC(decision_function_shape='ovo')
# lin_clf = sklearn.neighbors.nearest_centroid.NearestCentroid()
# lin_clf = sklearn.linear_model.Lasso(alpha = 0.1)
# lin_clf = sklearn.linear_model.SGDClassifier(loss="hinge", penalty="l2")
# lin_clf = sklearn.linear_model.SGDClassifier(loss="hinge", penalty="l2", class_weight='auto')
# lin_clf = sklearn.naive_bayes.MultinomialNB()
# lin_clf = sklearn.tree.DecisionTreeClassifier()
# lin_clf = sklearn.tree.DecisionTreeClassifier(class_weight='auto')
# lin_clf = sklearn.ensemble.RandomForestClassifier(n_estimators=10)
# lin_clf = sklearn.ensemble.RandomForestClassifier(n_estimators=10, class_weight='auto')
# lin_clf = sklearn.ensemble.AdaBoostClassifier(n_estimators=100)
# lin_clf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
lin_clf.fit(X_train, y_train)
s = lin_clf.score(X_ct, y_ct)
if s > score:
score = s
clf = lin_clf
#clf = svm.SVC(decision_function_shape='ovo')
#clf.fit(d, labelDF.values)
# TODO: test label approach:
# compute our binary matrix with labels per frame
# also compute our label vector as above
# then correct the label vector by looking
# at multilabel entries if they match with the prediction
# and set the label to that
testLabelDF, exclude = labelMatrixToArray(testLabels, 10)
# testLabelDF, testData, removalIndex = normalizeZeroClass(testLabelDF, testData)
# testLabels.drop(removalIndex)
testLabels = testLabels.fillna(0)
testLabels[testLabels > 0] = 1
#d = testData.loc[:, headers]
d = testData.values #d.values
d = preprocessing.scale(d)
prediction = clf.predict(d)
for row in range(prediction.size):
p = prediction[row]
val = testLabels.loc[testLabels.index[row]][p]
if val == 1.0:
testLabelDF.loc[testLabelDF.index[row]] = p
print("------------")
print(prediction)
print("------------")
print(testLabelDF)
print("------------")
print(classification_report(testLabelDF.values, prediction))
#windowData = glove_data.reset_index(drop=True)
#clf = Classification(windowData)
#clf.train()
#clf.report()
if __name__ == '__main__':
main()
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/datasets/base.py | 4 | 17316 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: Simplified BSD
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, charset=None,
charse_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used has supervised signal label names. The indivial
file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use utf-8 text files in a scikit-learn classification or clustering
algorithm you will first need to use the `sklearn.features.text`
module to build a feature extraction transformer that suits your
problem.
Similar feature extractors should be build for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
charset : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, charset to use to decode text files if load_content is
True.
charset_error: {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `charset`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = [open(filename).read() for filename in filenames]
if charset is not None:
data = [d.decode(charset, charse_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data', 'iris.csv')))
fdescr = open(join(module_path, 'descr', 'iris.rst'))
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr.read(),
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print digits.data.shape
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
descr = open(join(module_path, 'descr', 'digits.rst')).read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
'target_names', the meaning of the labels, and 'DESCR', the
full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print boston.data.shape
(506, 13)
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data',
'boston_house_prices.csv')))
fdescr = open(join(module_path, 'descr', 'boston_house_prices.rst'))
temp = data_file.next()
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = data_file.next() # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=fdescr.read())
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| agpl-3.0 |
Vimos/scikit-learn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
shl198/Pipeline | Modules/Venn.py | 2 | 5403 | from matplotlib import pyplot as plt
plt.switch_backend("Qt4Agg")
import numpy as np
from matplotlib_venn import venn2,venn2_circles
from matplotlib_venn import venn3, venn3_circles
figure,axes=plt.subplots(3,2)
#========================= draw GS 3C9, 4B7
v1 = venn2(subsets={'01':220,'10':1268,'11':214},set_labels=('GS_VS_3C9','GS_VS_4B7'),ax=axes[0][0])
v1.get_patch_by_id('01').set_alpha(0.49)
v1.get_patch_by_id('10').set_alpha(0.49)
v1.get_patch_by_id('11').set_alpha(0.49)
c1 = venn2_circles(subsets={'01':220,'10':1268,'11':214},
linestyle='dashed',ax=axes[0][0])
first = c1[0]
first.set_edgecolor('red')
second = c1[1]
second.set_edgecolor('green')
#========================== draw GS 91-1C8,91-2A6
v1 = venn2(subsets={'01':1105,'10':519,'11':1646},set_labels=('GS_VS_1C8','GS_VS_2A6'),ax=axes[0][1])
v1.get_patch_by_id('01').set_alpha(0.49)
v1.get_patch_by_id('10').set_alpha(0.49)
v1.get_patch_by_id('11').set_alpha(0.49)
c1 = venn2_circles(subsets={'01':1105,'10':519,'11':1646},
linestyle='dashed',ax=axes[0][1])
first = c1[0]
first.set_edgecolor('red')
second = c1[1]
second.set_edgecolor('green')
#=========================== draw GS 3C9-7,8,9
v = venn3(subsets={'001':93,'010':256,'011':93,'100':75,'101':89,'110':72,'111':754},
set_labels = ('GS_VS_3C9-7','GS_VS_3C9-8','GS_VS_3C9-9'),ax=axes[1][0])
v.get_patch_by_id('001').set_alpha(0.49)
v.get_patch_by_id('010').set_alpha(0.49)
v.get_patch_by_id('011').set_alpha(0.49)
v.get_patch_by_id('100').set_alpha(0.49)
v.get_patch_by_id('101').set_alpha(0.49)
v.get_patch_by_id('110').set_alpha(0.49)
v.get_patch_by_id('111').set_alpha(0.49)
label = v.get_label_by_id('011')
label.set_x(label.get_position()[0] + 0.05)
label = v.get_label_by_id('101')
label.set_x(label.get_position()[0] - 0.03)
label = v.get_label_by_id('110')
label.set_x(label.get_position()[0] - 0.1)
label.set_y(label.get_position()[1] + 0.05)
c = venn3_circles(subsets={'001':93,'010':256,'011':93,'100':75,'101':89,'110':72,'111':754},
linestyle='dashed',ax=axes[1][0])
first = c[0]
first.set_edgecolor('red')
second = c[1]
second.set_edgecolor('green')
third = c[2]
third.set_edgecolor('blue')
#============================ draw GS 4B7-10,11,12
v = venn3(subsets={'001':86,'010':66,'011':29,'100':46,'101':26,'110':24,'111':199},
set_labels = ('GS_VS_4B7-10','GS_VS_4B7-11','GS_VS_4B7-12'),ax=axes[1][1])
v.get_patch_by_id('001').set_alpha(0.49)
v.get_patch_by_id('010').set_alpha(0.49)
v.get_patch_by_id('011').set_alpha(0.49)
v.get_patch_by_id('100').set_alpha(0.49)
v.get_patch_by_id('101').set_alpha(0.49)
v.get_patch_by_id('110').set_alpha(0.49)
v.get_patch_by_id('111').set_alpha(0.49)
label = v.get_label_by_id('011')
label.set_x(label.get_position()[0] + 0.05)
label = v.get_label_by_id('101')
label.set_x(label.get_position()[0] - 0.03)
label = v.get_label_by_id('110')
label.set_x(label.get_position()[0])
label.set_y(label.get_position()[1])
c = venn3_circles(subsets={'001':86,'010':66,'011':29,'100':46,'101':26,'110':24,'111':199},
linestyle='dashed',ax=axes[1][1])
first = c[0]
first.set_edgecolor('red')
second = c[1]
second.set_edgecolor('green')
third = c[2]
third.set_edgecolor('blue')
#=========================== draw GS 91-1C8 13,14,15
v = venn3(subsets={'001':599,'010':213,'011':73,'100':164,'101':112,'110':201,'111':1116},
set_labels = ('GS_VS_1C8-13','GS_VS_1C8-14','GS_VS_1C8-15'),ax=axes[2][0])
v.get_patch_by_id('001').set_alpha(0.49)
v.get_patch_by_id('010').set_alpha(0.49)
v.get_patch_by_id('011').set_alpha(0.49)
v.get_patch_by_id('100').set_alpha(0.49)
v.get_patch_by_id('101').set_alpha(0.49)
v.get_patch_by_id('110').set_alpha(0.49)
v.get_patch_by_id('111').set_alpha(0.49)
label = v.get_label_by_id('011')
label.set_x(label.get_position()[0] + 0.05)
label = v.get_label_by_id('101')
label.set_x(label.get_position()[0] - 0.03)
label = v.get_label_by_id('110')
label.set_x(label.get_position()[0])
label.set_y(label.get_position()[1])
c = venn3_circles(subsets={'001':599,'010':213,'011':73,'100':164,'101':112,'110':201,'111':1116},
linestyle='dashed',ax=axes[2][0])
first = c[0]
first.set_edgecolor('red')
second = c[1]
second.set_edgecolor('green')
third = c[2]
third.set_edgecolor('blue')
#=========================== draw GS 91-2A6 16,17,18
v = venn3(subsets={'001':261,'010':119,'011':147,'100':257,'101':88,'110':227,'111':1480},
set_labels = ('GS_VS_2A6-16','GS_VS_2A6-17','GS_VS_2A6-18'),ax=axes[2][1])
v.get_patch_by_id('001').set_alpha(0.49)
v.get_patch_by_id('010').set_alpha(0.49)
v.get_patch_by_id('011').set_alpha(0.49)
v.get_patch_by_id('100').set_alpha(0.49)
v.get_patch_by_id('101').set_alpha(0.49)
v.get_patch_by_id('110').set_alpha(0.49)
v.get_patch_by_id('111').set_alpha(0.49)
label = v.get_label_by_id('011')
label.set_x(label.get_position()[0] + 0.05)
label = v.get_label_by_id('101')
label.set_x(label.get_position()[0] - 0.03)
label = v.get_label_by_id('110')
label.set_x(label.get_position()[0])
label.set_y(label.get_position()[1])
c = venn3_circles(subsets={'001':261,'010':119,'011':147,'100':257,'101':88,'110':227,'111':1480},
linestyle='dashed',ax=axes[2][1])
first = c[0]
first.set_edgecolor('red')
second = c[1]
second.set_edgecolor('green')
third = c[2]
third.set_edgecolor('blue')
plt.show()
| mit |
Geosyntec/wqio | wqio/tests/helpers.py | 2 | 7031 | import distutils
import sys
import subprocess
import re
import os
import difflib
from functools import wraps
from pkg_resources import resource_filename
from io import StringIO
from collections import namedtuple
from contextlib import contextmanager
import numpy
import pandas
import pytest
def get_img_tolerance():
return int(os.environ.get("MPL_IMGCOMP_TOLERANCE", 15))
def seed(func):
""" Decorator to seed the RNG before any function. """
@wraps(func)
def wrapper(*args, **kwargs):
numpy.random.seed(0)
return func(*args, **kwargs)
return wrapper
def raises(error):
"""Wrapper around pytest.raises to support None."""
if error:
return pytest.raises(error)
else:
@contextmanager
def not_raises():
try:
yield
except Exception as e:
raise e
return not_raises()
def requires(module, modulename):
def outer_wrapper(function):
@wraps(function)
def inner_wrapper(*args, **kwargs):
if module is None:
raise RuntimeError(
"{} required for `{}`".format(modulename, function.__name__)
)
else:
return function(*args, **kwargs)
return inner_wrapper
return outer_wrapper
@seed
def make_dc_data(ndval="ND", rescol="res", qualcol="qual"):
dl_map = {
"A": 0.1,
"B": 0.2,
"C": 0.3,
"D": 0.4,
"E": 0.1,
"F": 0.2,
"G": 0.3,
"H": 0.4,
}
index = pandas.MultiIndex.from_product(
[
list("ABCDEFGH"),
list("1234567"),
["GA", "AL", "OR", "CA"],
["Inflow", "Outflow", "Reference"],
],
names=["param", "bmp", "state", "loc"],
)
array = numpy.random.lognormal(mean=0.75, sigma=1.25, size=len(index))
data = pandas.DataFrame(data=array, index=index, columns=[rescol])
data["DL"] = data.apply(lambda r: dl_map.get(r.name[0]), axis=1)
data[rescol] = data.apply(
lambda r: dl_map.get(r.name[0]) if r[rescol] < r["DL"] else r[rescol], axis=1
)
data[qualcol] = data.apply(lambda r: ndval if r[rescol] <= r["DL"] else "=", axis=1)
return data
@seed
def make_dc_data_complex(dropsome=True):
dl_map = {
"A": 0.25,
"B": 0.50,
"C": 0.10,
"D": 1.00,
"E": 0.25,
"F": 0.50,
"G": 0.10,
"H": 1.00,
}
index = pandas.MultiIndex.from_product(
[
list("ABCDEFGH"),
list("1234567"),
["GA", "AL", "OR", "CA"],
["Inflow", "Outflow", "Reference"],
],
names=["param", "bmp", "state", "loc"],
)
xtab = (
pandas.DataFrame(index=index, columns=["res"])
.unstack(level="param")
.unstack(level="state")
)
xtab_rows = xtab.shape[0]
for c in xtab.columns:
mu = numpy.random.uniform(low=-1.7, high=2)
sigma = numpy.random.uniform(low=0.1, high=2)
xtab[c] = numpy.random.lognormal(mean=mu, sigma=sigma, size=xtab_rows)
data = xtab.stack(level="state").stack(level="param")
data["DL"] = data.apply(lambda r: dl_map.get(r.name[-1]), axis=1)
data["res"] = data.apply(
lambda r: dl_map.get(r.name[-1]) if r["res"] < r["DL"] else r["res"], axis=1
)
data["qual"] = data.apply(lambda r: "<" if r["res"] <= r["DL"] else "=", axis=1)
if dropsome:
if int(dropsome) == 1:
dropsome = 0.25
index = numpy.random.uniform(size=data.shape[0]) >= dropsome
data = data.loc[index]
return data
def comp_statfxn(x, y):
stat = namedtuple("teststat", ("statistic", "pvalue"))
result = x.max() - y.min()
return stat(result, result * 0.25)
def test_data_path(filename):
path = resource_filename("wqio.tests._data", filename)
return path
def getTestROSData():
"""
Generates test data for an ROS estimate.
Input:
None
Output:
Structured array with the values (results or DLs) and qualifers
(blank or "ND" for non-detects)
"""
raw_csv = StringIO(
"res,qual\n2.00,=\n4.20,=\n4.62,=\n5.00,ND\n5.00,ND\n5.50,ND\n"
"5.57,=\n5.66,=\n5.75,ND\n5.86,=\n6.65,=\n6.78,=\n6.79,=\n7.50,=\n"
"7.50,=\n7.50,=\n8.63,=\n8.71,=\n8.99,=\n9.50,ND\n9.50,ND\n9.85,=\n"
"10.82,=\n11.00,ND\n11.25,=\n11.25,=\n12.20,=\n14.92,=\n16.77,=\n"
"17.81,=\n19.16,=\n19.19,=\n19.64,=\n20.18,=\n22.97,=\n"
)
return pandas.read_csv(raw_csv)
def compare_versions(utility="latex"): # pragma: no cover
"return True if a is greater than or equal to b"
requirements = {"latex": "3.1415"}
available = {"latex": checkdep_tex()}
required = requirements[utility]
present = available[utility]
if present:
present = distutils.version.LooseVersion(present)
required = distutils.version.LooseVersion(required)
if present >= required:
return True
else:
return False
else:
return False
def _show_package_info(package, name): # pragma: no cover
packagedir = os.path.dirname(package.__file__)
print("%s version %s is installed in %s" % (name, package.__version__, packagedir))
def _show_system_info(): # pragma: no cover
import pytest
pyversion = sys.version.replace("\n", "")
print("Python version %s" % pyversion)
print("pytest version %d.%d.%d" % pytest.__versioninfo__)
import numpy
_show_package_info(numpy, "numpy")
import scipy
_show_package_info(scipy, "scipy")
import matplotlib
_show_package_info(matplotlib, "matplotlib")
import statsmodels
_show_package_info(statsmodels, "statsmodels")
import pandas
_show_package_info(pandas, "pandas")
def checkdep_tex(): # pragma: no cover
if sys.version_info[0] >= 3:
def byte2str(b):
return b.decode("ascii")
else: # pragma: no cover
def byte2str(b):
return b
try:
s = subprocess.Popen(
["tex", "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
line = byte2str(s.stdout.readlines()[0])
pattern = "3\.1\d+"
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def assert_bigstring_equal(
input_string, known_string, input_out=None, known_out=None
): # pragma: no cover
if input_string != known_string:
if input_out and known_out:
with open(input_out, "w") as fi:
fi.write(input_string)
with open(known_out, "w") as fo:
fo.write(known_string)
message = "".join(
difflib.ndiff(input_string.splitlines(True), known_string.splitlines(True))
)
raise AssertionError("Multi-line strings are unequal:\n" + message)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.