repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Titan-C/scikit-learn | sklearn/datasets/tests/test_kddcup99.py | 42 | 1278 | """Test kddcup99 loader. Only 'percent10' mode is tested, as the full data
is too big to use in unit-testing.
The test is skipped if the data wasn't previously fetched and saved to
scikit-learn data folder.
"""
from sklearn.datasets import fetch_kddcup99
from sklearn.utils.testing import assert_equal, SkipTest
def test_percent10():
try:
data = fetch_kddcup99(download_if_missing=False)
except IOError:
raise SkipTest("kddcup99 dataset can not be loaded.")
assert_equal(data.data.shape, (494021, 41))
assert_equal(data.target.shape, (494021,))
data_shuffled = fetch_kddcup99(shuffle=True, random_state=0)
assert_equal(data.data.shape, data_shuffled.data.shape)
assert_equal(data.target.shape, data_shuffled.target.shape)
data = fetch_kddcup99('SA')
assert_equal(data.data.shape, (100655, 41))
assert_equal(data.target.shape, (100655,))
data = fetch_kddcup99('SF')
assert_equal(data.data.shape, (73237, 4))
assert_equal(data.target.shape, (73237,))
data = fetch_kddcup99('http')
assert_equal(data.data.shape, (58725, 3))
assert_equal(data.target.shape, (58725,))
data = fetch_kddcup99('smtp')
assert_equal(data.data.shape, (9571, 3))
assert_equal(data.target.shape, (9571,))
| bsd-3-clause |
chengchingwen/moth_prediction | get_ft.py | 1 | 1143 | import sqlite3 as sql
import pandas as pd
import datetime as d
import make_db as m
date = "%d-%d-%d"
datef = "%Y-%m-%d"
year_end = "%Y-12-31"
year_start = "%Y-01-01"
query = "select * from `%d` where Time between '%s' and '%s'"
def nd(s):
return d.datetime.strptime(s, "%Y-%m-%d").month
def get_ft_t(year, month, day, place ,delta=10 ):
today = d.datetime(year, month ,day)-d.timedelta(1)
start_date = today - d.timedelta(delta-1)
db = sql.connect(place)
if today.year == start_date.year:
table = pd.read_sql(query % (year, start_date.strftime(datef),today.strftime(datef)),db)
else:
table1 = pd.read_sql(query % (start_date.year, start_date.strftime(datef), start_date.strftime(year_end)), db)
table2 = pd.read_sql(query % (today.year, today.strftime(year_start),today.strftime(datef)),db)
table = pd.concat([table1, table2])
if len(table):
table.index = range(delta)
return table
def get_ft(row):
time = d.datetime.strptime(row["date"],"%Y-%m-%d")
return get_ft_t(time.year,time.month,time.day,m.db_path % m.place[row["ID"]],delta=20)
| artistic-2.0 |
yunque/sms-tools | lectures/08-Sound-transformations/plots-code/stftFiltering-orchestra.py | 18 | 1677 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import utilFunctions as UF
import stftTransformations as STFTT
import stft as STFT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(2048)
N = 2048
H = 512
# design a band stop filter using a hanning window
startBin = int(N*500.0/fs)
nBins = int(N*2000.0/fs)
bandpass = (np.hanning(nBins) * 65.0) - 60
filt = np.zeros(N/2+1)-60
filt[startBin:startBin+nBins] = bandpass
y = STFTT.stftFiltering(x, fs, w, N, H, filt)
mX,pX = STFT.stftAnal(x, fs, w, N, H)
mY,pY = STFT.stftAnal(y, fs, w, N, H)
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
plt.plot(fs*np.arange(mX[0,:].size)/float(N), filt, 'k', lw=1.3)
plt.axis([0, fs/2, -60, 7])
plt.title('filter shape')
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mY[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-stft-filtering.wav')
plt.savefig('stftFiltering-orchestra.png')
plt.show()
| agpl-3.0 |
vigilv/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
spacether/pycalculix | pycalculix/partmodule.py | 1 | 40690 | """This module stores the Part class. It is used to make 2D parts.
"""
import numpy as np # needed for linspace on hole creation
from . import base_classes
from . import geometry #point, line, area
class Part(base_classes.Idobj):
"""This makes a part.
Args:
parent: parent FeaModel
Attributes:
__fea (FeaModel): parent FeaModel
points (list): list or part points, excludes arc centers
allpoints (list): list or part points, includes arc centers
lines (list): list of all Line and Arc that make the part
signlines (list): list of all SignLine and SignArc that make the part
__cursor (Point): location of a cursor drawing the part
__holemode (bool): if True, lines will be added to holes, otherwise,
they'll be added to areas
areas (list): list of Area that make up the part
left (list): list the parts leftmost lines, they must be vertical
right (list): list the parts rightmost lines, they must be vertical
top (list): list the parts top lines, they must be horizontal
bottom (list): list the parts bottom lines, they must be horizontal
center (Point): the area centroid of the part
nodes (list): list of part's nodes
elements (list): list of part's elements
"""
def __init__(self, feamodel):
self.fea = feamodel
self.__cursor = geometry.Point(0, 0)
self.areas = [] # top area is the buffer
# make the buffer
area = self.fea.areas.append(geometry.Area(self, []))
self.areas.append(area)
base_classes.Idobj.__init__(self)
self.left = None
self.right = None
self.top = None
self.bottom = None
self.center = None
self.nodes = []
self.elements = []
self.__holemode = False
self.fea.parts.append(self)
def __hash__(self):
"""Returns the item's id as its hash."""
return self.id
@property
def lines(self):
"""Returns list of part lines."""
lines = set()
for area in self.areas:
lines.update(area.lines)
return list(lines)
@property
def signlines(self):
"""Returns list of part signline and signarc."""
lines = set()
for area in self.areas:
lines.update(area.signlines)
return list(lines)
@property
def points(self):
"""Returns list of part points, excludes arc centers."""
points = set()
lines = self.lines
for line in lines:
points.update(line.points)
return list(points)
@property
def allpoints(self):
"""Returns list of part points, includes arc centers."""
points = set()
lines = self.lines
for line in lines:
points.update(line.allpoints)
return list(points)
def get_item(self, item):
""""Returns the part's item(s) requested by the passed string.
Args:
item (str): string requesting item(s)
* Valid examples: 'P0', 'L0', 'left', 'A0'
Returns:
item(s) or None: If items are found they are returned
* If there is only one item it is returned
* If there are multiple items, they are returned as a list
* If no items are found None is returned
"""
if item in ['left', 'right', 'top', 'bottom']:
items = getattr(self, item)
return items
elif item[0] == 'P':
# get point
items = self.points
num = int(item[1:])
res = [a for a in items if a.id == num]
return res[0]
elif item[0] == 'L':
# get line
items = self.signlines
num = int(item[1:])
items = [a for a in items if a.id == num]
return items[0]
elif item[0] == 'A':
# get area
items = self.areas
num = int(item[1:])
items = [a for a in items if a.id == num]
return items[0]
else:
print('Unknown item! Please pass the name of a point, line or area!')
return None
def get_name(self):
"""Returns the part name based on id number."""
return 'PART'+str(self.id)
def __set_side(self, side):
"""Sets the part.side to a list of lines on that side of the part.
Used to set the part.left, part.right, part.top, part.bottom sides.
Args:
side (string): 'left', 'right', 'top','bottom'
"""
# set index and axis, ind=0 is low side, ind=-1 is high side
inds = {'left':0, 'right':-1, 'top':-1, 'bottom':0}
axes = {'left':'y', 'right':'y', 'top':'x', 'bottom':'x'}
ind = inds[side]
axis = axes[side]
# loc = 'left', ind = 0, axis = 'y'
points = self.points
# sort the points low to high
points = sorted(points, key=lambda pt: getattr(pt, axis))
# store the target value
target_value = getattr(points[ind], axis)
res = []
lines = self.signlines
for sline in lines:
if isinstance(sline, geometry.SignLine):
pt_axis_vals = [getattr(pt, axis) for pt in sline.points]
pt_dist_vals = [abs(target_value - pt_axis_val) for pt_axis_val in pt_axis_vals]
if all([pt_dist_val < geometry.ACC for pt_dist_val in pt_dist_vals]):
# line is on the left side
res.append(sline)
setattr(self, side, res)
def goto(self, x, y, holemode=False):
"""Moves the part cursor to a location.
If that location has a point at it, use it.
If not, make a new point at that location.
Args:
x (float): x-coordinate of the point to go to
y (float): y-coordinate of the point to go to
holemode (bool): if True, we start drawing a hole here, otherwise
we start drawing an area
Returns:
self.__cursor (Point): returns the updated cursor point
"""
[pnew, already_exists] = self.__make_get_pt(x, y)
if already_exists:
if self.areas[-1].closed == True:
# make a new area if the old area is already closed and we're
# going to an existing point
area = self.fea.areas.append(geometry.Area(self, []))
self.areas.append(area)
# return cursor
self.__cursor = pnew
self.__holemode = holemode
return self.__cursor
def __get_point(self, point):
"""Returns point if found, None otherwise."""
points = self.allpoints
found_point = None
for apoint in points:
dist = point - apoint
dist = dist.length()
if dist < geometry.ACC:
# point already exists in part, use it
found_point = apoint
break
return found_point
def __make_get_pt(self, x, y):
"""Gets a point if it exists, makes it if it doesn't. Returns the point.
Use this when you need a point made in the part, and you want to
use an extant point if one is available.
Args:
x (float): point x-coordinate
y (float): point y-coordinate
Returns:
list:
list[0]: Point
list[1]: boolean, True = the point already existed
"""
thept = geometry.Point(x, y)
pfound = self.__get_point(thept)
pexists = True
if pfound == None:
pfound = thept
self.fea.register(pfound)
pexists = False
return [pfound, pexists]
def __calc_area_center(self):
"""Calculates and returns the part and centroid Point.
Returns:
list: [area, Point]
"""
val_list = []
for area in self.areas:
if area.closed == True:
aval, cval = area.area, area.center
val_list.append([aval, cval])
a_sum = sum([aval[0] for aval in val_list])
cxa_sum = sum([center.x*aval for [aval, center] in val_list])
cya_sum = sum([center.y*aval for [aval, center] in val_list])
cx_val = cxa_sum/a_sum
cy_val = cya_sum/a_sum
center = geometry.Point(cx_val, cy_val)
return [a_sum, center]
def __make_get_sline(self, lnew):
"""Returns a signed line or arc, makes it if it needs to.
Args:
lnew (Line or Arc or SignLine or SignArc): Line or Arc to make
Returns:
list:
list[0]: SignLine or SignArc
list[1]: boolean, True = the line already existed
"""
lpos = lnew.signed_copy(1)
lneg = lnew.signed_copy(-1)
# get part's signed lines
slines = self.signlines
lexists = False
for sline in slines:
if lpos == sline:
lexists = True
signline_new = sline
break
elif lneg == sline:
lexists = True
signline_new = sline.signed_copy(-1)
signline_new.edge = False
self.fea.register(signline_new)
break
else:
# fired when we haven't broken out of the loop, the line is new
if isinstance(lnew, geometry.SignLine):
lnew = geometry.Line(lnew.pt(0), lnew.pt(1))
self.fea.register(lnew)
lnew.save_to_points()
signline_new = lnew.signed_copy(1)
self.fea.register(signline_new)
signline_new.line.add_signline(signline_new)
return [signline_new, lexists]
def draw_circle(self, center_x, center_y, radius, num_arcs=4):
"""Draws a circle area and adds it to the part.
Args:
center_x (float): x-axis hole center
center_y (float): y-axis hole center
radius (float): hole radius
num_arcs (int): number of arcs to use, must be >= 3
Returns:
loop (geometry.LineLoop): a LineLoop list of SignArc
"""
center = geometry.Point(center_x, center_y)
rvect = geometry.Point(0, radius)
start = center + rvect
self.goto(start.x, start.y)
angles = np.linspace(360/num_arcs,360,num_arcs, endpoint=True)
for ang in angles:
point = geometry.Point(0, radius).rot_ccw_deg(ang)
point = point + center
self.draw_arc(point.x, point.y, center.x, center.y)
loop = self.areas[-1].exlines
self.__update()
return loop
def draw_hole(self, center_x, center_y, radius, num_arcs=4, filled=False):
"""Makes a hole in the part.
Args:
center_x (float): x-axis hole center
center_y (float): y-axis hole center
radius (float): hole radius
num_arcs (int): number of arcs to use, must be >= 3
filled (bool): whether to fill the hole
* True: makes a new area in the part
Returns:
hole_lines (list or None): list of hole SignLine or SignArc
* Returns None if hole was not made.
"""
center = geometry.Point(center_x, center_y)
area = self.__area_from_pt(center)
if area == None:
print("You can't make a hole here until there's an area here!")
return None
else:
# make points
rvect = geometry.Point(0, radius)
start = center + rvect
self.goto(start.x, start.y, holemode=True)
angles = np.linspace(360/num_arcs,360,num_arcs, endpoint=True)
for ang in angles:
point = geometry.Point(0, radius).rot_ccw_deg(ang)
point = point + center
self.draw_arc(point.x, point.y, center.x, center.y)
# make new area
if filled:
# reverse order, reverse sline directions, store in feamodel
slines = list(area.holes[-1])
slines.reverse()
slines = [sline.signed_copy(-1) for sline in slines]
slines = [self.__make_get_sline(sline)[0] for sline in slines]
anew = self.fea.areas.append(geometry.Area(self, slines))
self.areas.append(anew)
self.__update()
return area.holes[-1]
def draw_arc_angle(self, degrees_ccw, center_x, center_y):
"""Makes an arc and adds it to the part.
| Current point is the first arc point.
| degrees_ccw is the swept angle in degrees, counterclockwise
| (center_x, center_y) is the arc center
| Degrees: Traversed angle of arc must be < 180 degrees
Args:
degrees_ccw (float): arc swept angle in degrees, counterclockwise
center_x (float): arc center point x-coordinate
center_y (float): arc center point y-coordinate
Returns:
list: [arc, arc_start_point, arc_end_point]
"""
center = geometry.Point(center_x, center_y)
radius_vector = self.__cursor - center
radius_vector.rot_ccw_deg(degrees_ccw)
end = center + radius_vector
return self.draw_arc(end.x, end.y, center_x, center_y)
def draw_arc(self, end_x, end_y, center_x, center_y):
"""Makes an arc and adds it to the part.
| Current point is the first arc point.
| (end_x, end_y) is the end point
| (center_x, center_y) is the arc center
| Degrees: Traversed angle of arc must be < 180 degrees
| Radians: Traversed angle of arc must be < Pi
Args:
end_x (float): arc end point x-coordinate
end_y (float): arc end point y-coordinate
center_x (float): arc center point x-coordinate
center_y (float): arc center point y-coordinate
Returns:
list: [arc, arc_start_point, arc_end_point]
"""
pold = self.__cursor
# make arc center point
ctr = self.__make_get_pt(center_x, center_y)[0]
# make arc end point
self.__cursor = self.__make_get_pt(end_x, end_y)[0]
# make arc
arc = self.__make_get_sline(geometry.Arc(pold, self.__cursor, ctr))[0]
if self.__holemode:
area = self.__area_from_pt(self.__cursor)
if area != None:
closed = area.add_hole_sline(arc)
if closed:
self.__holemode = False
else:
print('You must have a closed area here before making a hole!')
else:
self.areas[-1].add_sline(arc)
return [arc, pold, self.__cursor]
def draw_line_delta(self, delta_x, delta_y):
"""Draws a line a relative distance, and adds it to the part.
Args:
delta_x (float): x-axis delta distance to draw the line
delta_y (float): y-axis delta distance to draw the line
Returns:
list: [line, point_start, point_end]
"""
x = self.__cursor.x + delta_x
y = self.__cursor.y + delta_y
return self.draw_line_to(x, y)
def draw_line_rad(self, dx_rad):
"""Draws a line a relative radial distance, and adds it to the part.
Args:
dx_rad (float): x-axis delta distance to draw the line
Returns:
list: [line, point_start, point_end]
"""
return self.draw_line_delta(dx_rad, 0.0)
def draw_line_ax(self, dy_ax):
"""Draws a line a relative axial distance, and adds it to the part.
Args:
dy_ax (float): y-axis delta distance to draw the line
Returns:
list: [line, point_start, point_end]
"""
return self.draw_line_delta(0.0, dy_ax)
def draw_line_to(self, x, y):
"""Draws a line to the given location, and adds it to the part.
Args:
x (float): x-axis coordinate of the end point
y (float): y-axis coordinate of the end point
Returns:
list: [SignLine, point_start, point_end]
"""
pold = self.__cursor
self.__cursor = self.__make_get_pt(x, y)[0]
sline = self.__make_get_sline(geometry.Line(pold, self.__cursor))[0]
if self.__holemode:
area = self.__area_from_pt(self.__cursor)
if area != None:
closed = area.add_hole_sline(sline)
if closed:
self.__holemode = False
self.__update()
else:
print('You must have a closed area here before making a hole!')
else:
# drawing in last area
self.areas[-1].add_sline(sline)
# check for closure of the area
if self.areas[-1].closed:
self.__update()
return [sline, pold, self.__cursor]
def __get_maxlength(self):
"""Returns the max distance between points in the part."""
points = self.points
maxlen = 0.0
# loop through points checking dist to next point
for ind, point_1 in enumerate(points[:-1]):
for point_2 in points[ind:]:
vect = point_1 - point_2
dist = vect.length()
if dist > maxlen:
maxlen = dist
return maxlen
def __area_from_pt(self, point):
"""Returns the area that the point is inside.
Args:
point (Point): the point we are asking about
Returns:
Area or None:
Area is the found area
None is returned if the point is not in one of this part's areas
"""
for area in self.areas:
if area.contains_point(point):
return area
return None
def fillet_lines(self, line1, line2, radius):
"""Fillets the given lines in the part.
This inserts an arc in the part tangent to the two given lines.
Args:
line1 (SignLine): line that the arc starts on, arc is tangent
line2 (SignLine): line that the arc ends on, arc is tangent
radius (float): arc radius size
Returns:
list: [arc, start_point, end_point]
"""
# check if the lines are touching
if not line1.line.touches(line2.line):
print('ERROR: Cannot fillet! Lines must touch!')
return
if line1.line.pt(1) == line2.line.pt(0):
first_line = line1
second_line = line2
elif line2.line.pt(1) == line1.line.pt(0):
first_line = line2
second_line = line1
else:
print('ERROR: Sign lines must both be going in CW or CCW '
'direction. The two passed lines are going in '
'different directions. Unable to fillet them.')
return
tmp = self.__cursor
# offset the lines, assuming area is being traced clockwise
# get the intersection point
magnitude = radius
l1_off = first_line.offset(magnitude)
l2_off = second_line.offset(magnitude)
ctrpt = l1_off.intersects(l2_off)
if ctrpt == None:
# flip the offset direction if lines don't intersect
magnitude = -radius
l1_off = first_line.offset(magnitude)
l2_off = second_line.offset(magnitude)
ctrpt = l1_off.intersects(l2_off)
# now we have an intersecting point
p1_new = first_line.arc_tang_intersection(ctrpt, magnitude)
p2_new = second_line.arc_tang_intersection(ctrpt, magnitude)
rempt = first_line.pt(1)
p1_new = self.__make_get_pt(p1_new.x, p1_new.y)[0]
ctrpt = self.__make_get_pt(ctrpt.x, ctrpt.y)[0]
p2_new = self.__make_get_pt(p2_new.x, p2_new.y)[0]
# make the new arc
arc = self.__make_get_sline(geometry.Arc(p1_new, p2_new, ctrpt))[0]
# put the arc in the right location in the area
area = first_line.lineloop.parent
area.line_insert(first_line, arc)
print('Arc inserted into area %i' % (area.id))
# edit the adjacent lines to replace the removed pt
first_line.set_pt(1, arc.pt(0))
second_line.set_pt(0, arc.pt(1))
# del old pt, store new points for the arc
self.fea.points.remove(rempt)
# reset the cursor to where it should be
self.__cursor = tmp
return [arc, arc.pt(0), arc.pt(1)]
def fillet_all(self, radius):
"""Fillets all external lines not within 10 degrees of tangency
Args:
radius (float): the fillet radius to use
Returns:
arcs (list): list of SignArc
"""
pairs = []
for area in self.areas:
for ind, sline in enumerate(area.exlines):
prev_sline = area.exlines[ind-1]
this_point = sline.pt(0)
if len(this_point.lines) == 2:
# only fillet lines that are not shared by other areas
if (isinstance(sline, geometry.SignLine)
and isinstance(prev_sline, geometry.SignLine)):
# only fillet lines
perp1 = prev_sline.get_perp_vec(this_point)
perp2 = sline.get_perp_vec(this_point)
ang = perp1.ang_bet_deg(perp2)
is_tangent = (-10 <= ang <= 10)
if is_tangent == False:
pairs.append([prev_sline, sline])
arcs = []
for pair in pairs:
arc = self.fillet_lines(pair[0], pair[1], radius)[0]
arcs.append(arc)
return arcs
def label(self, axis):
"""Labels the part on a Matplotlib axis
Args:
axis (Matplotlib Axis): Matplotlib Axis
"""
axis.text(self.center.y, self.center.x, self.get_name(),
ha='center', va='center')
def plot(self, axis, label=True, color='yellow'):
"""Plots the part on the passed Matplotlib axis.
Args:
axis (Matplotlib axis): the axis we will plot on
label (bool): True displays the part label
color (tuple): r,g,b,a matplotlib color tuple
"""
patches = []
for area in self.areas:
if area.closed:
patches.append(area.get_patch())
for patch in patches:
patch.set_color(color)
axis.add_patch(patch)
# apply the label
if label:
self.label(axis)
def __cut_line(self, point, line):
"""Cuts the passed line at the passed point.
The passed line is cut into two lines. All areas that included the
original line are updated.
Args:
line (Line or Arc): the line to cut, must be Line or Arc
point (Point): the location on the line that we will cut it
Returns:
list: [pnew, lnew]
pnew: the new point we created to cut the original line
lnew: the new line we created, the end half of the orignal line
"""
pnew = self.__make_get_pt(point.x, point.y)[0]
if point.id != -1:
# if passed point already exists, use it
pnew = point
pend = line.pt(1)
line.set_pt(1, pnew) # shortens the line
new_prim = geometry.Line(pnew, pend)
if isinstance(line, geometry.Arc):
new_prim = geometry.Arc(pnew, pend, line.actr)
new_sline = self.__make_get_sline(new_prim)[0]
# insert the new line into existing areas
is_line = isinstance(line, geometry.Line) or isinstance(line, geometry.Arc)
is_sline = isinstance(line, geometry.SignLine) or isinstance(line, geometry.SignArc)
print('Cutting line (is_line, is_sline, signlines) (%s, %s, %i)' % (is_line, is_sline, len(line.signlines)))
slines = line.signlines
for sline in slines:
area = sline.lineloop.parent
if sline.sign == 1:
# cutting line in clockwise area, where line is pos
area.line_insert(sline, new_sline)
elif sline.sign == -1:
# cutting line in clockwise area, where line is neg
rev_sline = self.__make_get_sline(new_sline.signed_copy(-1))[0]
area.line_insert(sline, rev_sline, after=False)
return [pnew, new_sline]
def __cut_area(self, area, start_pt, end_pt):
"""Cuts the part area from start_pt to end_pt."""
# select the line portions that define the areas
# list[:low] excludes low index
# list [high:] includes high index
# we want the line which start with the point
lpre_start = area.line_from_startpt(start_pt)
lpre_end = area.line_from_startpt(end_pt)
if lpre_start == None or lpre_end == None:
self.fea.plot_geometry()
print(area.exlines)
istart = area.exlines.index(lpre_start)
iend = area.exlines.index(lpre_end)
low = min(istart, iend)
high = max(istart, iend)
# lists of lines for areas
beg = area.exlines[:low]
mid = area.exlines[low:high]
end = area.exlines[high:]
# make cut line for [beg + cut + end] area
start_pt = mid[0].pt(0)
end_pt = mid[-1].pt(1)
fwd = geometry.Line(start_pt, end_pt)
rev = geometry.Line(end_pt, start_pt)
# update existing area
cline = self.__make_get_sline(fwd)[0]
alist_curr = beg + [cline] + end
area.update(alist_curr)
# make new area
cline_rev = self.__make_get_sline(rev)[0]
alist_other = mid + [cline_rev]
anew = geometry.Area(self, alist_other)
self.fea.register(anew)
self.areas.append(anew)
# fix holes
self.__store_holes()
def __merge_hole(self, area, start_pt, end_pt):
"""Merges the hole into its area with a line between passed points."""
# line will be drawn from start point on exlines to end point on hole
hole_points = area.holepoints
if start_pt in hole_points:
tmp = start_pt
start_pt = end_pt
end_pt = tmp
lpre_start = area.line_from_startpt(start_pt)
hole_line = area.line_from_startpt(end_pt)
if lpre_start == None or hole_line == None:
self.fea.plot_geometry()
ind = area.exlines.index(lpre_start)
# store sections of the area
beg = area.exlines[:ind]
end = area.exlines[ind:]
thehole = None
mid = []
for hole in area.holes:
for sline in hole:
if sline == hole_line:
ind = hole.index(sline)
mid = hole[ind:] + hole[:ind]
thehole = hole
break
if mid != []:
break
fwd = geometry.Line(start_pt, end_pt)
fwd_sline = self.__make_get_sline(fwd)[0]
rev_sline = fwd_sline.signed_copy(-1)
self.fea.register(rev_sline)
rev_sline.line.add_signline(rev_sline)
alist_curr = beg + [fwd_sline] + mid + [rev_sline] + end
area.holes.remove(thehole)
area.update(alist_curr)
def __get_cut_line(self, cutline):
"""Returns a cut line beginning and ending on the part."""
# find all intersections
lines = self.lines
points = set()
# add line intersections
for line in lines:
newpt = line.intersects(cutline)
if newpt != None:
points.add(newpt)
# loop through intersection points, storing distance
points = list(points)
for (ind, point) in enumerate(points):
dist = point - cutline.pt(0)
dist = dist.length()
pdict = {'dist': dist, 'point': point}
points[ind] = pdict
# sort the points by dist, lowest to highest, return first cut
points = sorted(points, key=lambda k: k['dist'])
start = points[0]['point']
end = points[1]['point']
new_cut = geometry.Line(start, end)
return new_cut
def __cut_with_line(self, cutline, debug):
"""Cuts the part using the passed line.
Args:
cutline (Line): line to cut the area with
debug (list): bool for printing, bool for plotting after every cut
"""
# find all intersections
lines = self.lines
points = set()
# add line intersections
for line in lines:
if debug[0]:
print('Checking X between %s and cutline' % line.get_name())
newpt = line.intersects(cutline)
if debug[0]:
print(' Intersection: %s' % newpt)
if newpt != None:
points.add(newpt)
# loop through intersection points, storing distance and lines to cut
points = list(points)
for (ind, point) in enumerate(points):
dist = point - cutline.pt(0)
dist = dist.length()
pdict = {'dist': dist}
realpt = self.__get_point(point)
# we only want to store lines to cut here
if realpt == None or realpt.arc_center == True:
# we could have an existing arc center on a line that needs to
# be cut
if realpt == None:
realpt = point
for line in lines:
point_on_line = line.coincident(realpt)
if point_on_line and point not in line.points:
pdict['line'] = line
break
pdict['point'] = realpt
points[ind] = pdict
# sort the points by dist, lowest to highest
points = sorted(points, key=lambda k: k['dist'])
if debug[0]:
print('==================================')
print('Points on the cutline!------------')
for pdict in points:
print(pdict['point'])
print(' dist %.3f' % pdict['dist'])
if 'line' in pdict:
print(' X cut line: %s' % pdict['line'])
print('==================================')
# loop through the points cutting areas
for ind in range(len(points)):
pdict = points[ind]
start_pt = pdict['point']
if 'line' in pdict:
# cut the line and point to the real new point
print('Cut through line %s' % pdict['line'].get_name())
pnew = self.__cut_line(start_pt, pdict['line'])[0]
points[ind]['point'] = pnew
start_pt = pnew
end_pt = None
pavg = None
area = None
if ind > 0:
# find the area we're working on
end_pt = points[ind-1]['point']
pavg = start_pt + end_pt
pavg = pavg*0.5
area = self.__area_from_pt(pavg)
if area == None:
# stop cutting if we are trying to cut through a holes
print('No area found at point avg, no cut made')
break
start_hole = start_pt in area.holepoints
end_hole = end_pt in area.holepoints
if start_hole and end_hole and area != None:
print('Trying to join holes, no cut made')
break
# stop cutting if we are trying to join holes
if end_hole == True or start_hole == True:
print('Merging hole in %s' % area.get_name())
self.__merge_hole(area, start_pt, end_pt)
else:
print('Cutting %s' % area.get_name())
self.__cut_area(area, start_pt, end_pt)
if debug[1]:
self.fea.plot_geometry()
def __store_holes(self):
"""Puts all holes in their correct areas"""
holes = []
for area in self.areas:
holes += area.holes
for hole in holes:
hole_area = hole.parent
for area in self.areas:
is_inside = hole.inside(area.exlines)
if is_inside == True:
if area != hole_area:
# delete the hole from the old area, move it to the new
hole.set_parent(area)
hole_area.holes.remove(hole)
hole_area.close()
area.holes.append(hole)
area.close()
afrom, ato = hole_area.get_name(), area.get_name()
print('Hole moved from %s to %s' % (afrom, ato))
def __vect_to_line(self, point, cvect):
"""Returns a cutting line at a given point and cutting vector.
Args:
point (Point): the location we are cutting from
cvect (Point): the vector direction of the cut from pt
Returns:
cutline (Line): cut line
"""
cvect.make_unit()
vsize = self.__get_maxlength()
endpt = point + cvect*vsize
cutline = geometry.Line(point, endpt)
cutline = self.__get_cut_line(cutline)
return cutline
def __chunk_area(self, area, mode, exclude_convex, debug):
"""Cuts the passed area into regular smaller areas.
The cgx mesher only accepts areas which are 3-5 sides
so one may need to call this before using that mesher.
Cuts are made perpendicular to tangent points or at
internal corners.
At internal corners two perpendicular cuts are made.
Args:
area (Area): the area to cut into smaller areas
mode (str): 'both', 'holes' or 'ext' chunks the area using the
points form this set. See part.chunk
exclude_convex (bool): If true exclude cutting convex tangent points
debug (list): bool for printing, bool for plotting after every cut
"""
# store the cuts first, then cut after
cuts = [] # each item is a dict with a pt and vect in it
loops = []
cut_point_sets = []
if mode == 'holes':
loops = area.holes
elif mode == 'ext':
loops = [area.exlines]
elif mode == 'both':
loops = area.holes + [area.exlines]
for loop in loops:
for ind, line in enumerate(loop):
line_pre = loop[ind-1]
line_post = line
point = line_pre.pt(1)
perp1 = line_pre.get_perp_vec(point)
perp2 = line_post.get_perp_vec(point)
#tan1 = line_pre.get_tan_vec(point)
#tan2 = line_post.get_tan_vec(point)
# flip these vectors later to make them cut the area(s)
ang = perp1.ang_bet_deg(perp2)
cut = {}
make_cut = True
pre_arc = isinstance(line_pre, geometry.SignArc)
post_arc = isinstance(line_post, geometry.SignArc)
if pre_arc or post_arc:
if pre_arc and post_arc:
if (line_pre.concavity == 'convex'
and line_post.concavity == 'convex'
and exclude_convex == True):
make_cut = False
else:
# only one is an arc
arc = line_pre
if post_arc:
arc = line_post
if arc.concavity == 'convex' and exclude_convex == True:
make_cut = False
is_tangent = (-10 <= ang <= 10)
is_int_corner = (45 <= ang <= 135)
"""
print('-------------------')
print('%s' % point)
print('Angle is %.3f' % ang)
print('Make cut %s' % make_cut)
print('is_tangent %s' % is_tangent)
print('is_int_corner %s' % is_int_corner)
"""
if is_tangent:
if make_cut == True:
# tangent
cut = {'pt':point, 'vect':perp1*-1}
cut_line = self.__vect_to_line(cut['pt'], cut['vect'])
pset = set(cut_line.points)
if pset not in cut_point_sets:
cut_point_sets.append(pset)
cut['line'] = cut_line
cuts.append(cut)
elif is_int_corner:
# internal corner
cut = {'pt':point, 'vect':perp1*-1}
cut_line = self.__vect_to_line(cut['pt'], cut['vect'])
pset = set(cut_line.points)
if pset not in cut_point_sets:
cut_point_sets.append(pset)
cut['line'] = cut_line
cuts.append(cut)
cut = {'pt':point, 'vect':perp2*-1}
cut_line = self.__vect_to_line(cut['pt'], cut['vect'])
pset = set(cut_line.points)
if pset not in cut_point_sets:
cut_point_sets.append(pset)
cut['line'] = cut_line
cuts.append(cut)
elif ang < 0:
# external corner
# do not split these
pass
# do the cuts
for cut in cuts:
print('--------------------')
print('Cut point:', cut['pt'].get_name())
print('Cut line:', cut['line'])
self.__cut_with_line(cut['line'], debug)
def chunk(self, mode='both', exclude_convex = True, debug=[0, 0]):
"""Chunks all areas in the part.
Args:
mode (str): area chunking mode
- 'both': cuts areas using holes and exterior points
- 'holes': cut areas using holes points only
- 'ext': cut areas using exterior points only
exclude_convex (bool): If true exclude cutting convex tangent points
"""
for area in self.areas:
if area.closed:
min_sides = 5
has_holes = len(area.holes) > 0
ext_gr = len(area.exlines) > min_sides
both_false = (has_holes == False and ext_gr == False)
if mode == 'holes' and has_holes:
self.__chunk_area(area, mode, exclude_convex, debug)
elif (mode == 'both'
and (has_holes or ext_gr or not exclude_convex)):
self.__chunk_area(area, mode, exclude_convex, debug)
elif mode == 'ext' and (ext_gr or not exclude_convex):
self.__chunk_area(area, mode, exclude_convex, debug)
else:
aname = area.get_name()
val = 'Area %s was not chunked because it had' % aname
adder = ''
if mode == 'both' and both_false:
adder = '<= %i lines and no holes.' % min_sides
elif has_holes == False and (mode in ['both', 'holes']):
adder = 'no holes.'
elif ext_gr == False and (mode in ['both', 'ext']):
adder = '<= %i lines.' % min_sides
print('%s %s' % (val, adder))
# store the left, right, top, and bottom lines
self.__update()
def __update(self):
"""Updates the left, right, top, bottom sides and area and center"""
self.__set_side('left')
self.__set_side('right')
self.__set_side('top')
self.__set_side('bottom')
self.area, self.center = self.__calc_area_center()
def __str__(self):
"""Returns string listing object type, id number and name."""
val = 'Part, id=%i name=%s' % (self.id, self.get_name())
return val
| apache-2.0 |
OspreyX/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
daniorerio/trackpy | benchmarks/suite.py | 3 | 2664 | import getpass
import sys
import os
from vbench.api import Benchmark, BenchmarkRunner
from datetime import datetime
USERNAME = getpass.getuser()
if sys.platform == 'darwin':
HOME = '/Users/%s' % USERNAME
else:
HOME = '/home/%s' % USERNAME
try:
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(os.path.expanduser('~/.vbenchcfg')))
REPO_PATH = config.get('setup', 'repo_path')
REPO_URL = config.get('setup', 'repo_url')
DB_PATH = config.get('setup', 'db_path')
TMP_DIR = config.get('setup', 'tmp_dir')
except:
REPO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
REPO_URL = '[email protected]:danielballan/mr.git'
DB_PATH = os.path.join(REPO_PATH, 'vb_suite/benchmarks.db')
TMP_DIR = os.path.join(HOME, 'tmp/vb_mr')
PREPARE = """
python setup.py clean
"""
BUILD = """
python setup.py build_ext --inplace
"""
dependencies = []
START_DATE = datetime(2012, 9, 19) # first full day when setup.py existed
# repo = GitRepo(REPO_PATH)
RST_BASE = 'source'
def generate_rst_files(benchmarks):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
vb_path = os.path.join(RST_BASE, 'vbench')
fig_base_path = os.path.join(vb_path, 'figures')
if not os.path.exists(vb_path):
print 'creating %s' % vb_path
os.makedirs(vb_path)
if not os.path.exists(fig_base_path):
print 'creating %s' % fig_base_path
os.makedirs(fig_base_path)
for bmk in benchmarks:
print 'Generating rst file for %s' % bmk.name
rst_path = os.path.join(RST_BASE, 'vbench/%s.txt' % bmk.name)
fig_full_path = os.path.join(fig_base_path, '%s.png' % bmk.name)
# make the figure
plt.figure(figsize=(10, 6))
ax = plt.gca()
bmk.plot(DB_PATH, ax=ax)
start, end = ax.get_xlim()
plt.xlim([start - 30, end + 30])
plt.savefig(fig_full_path, bbox_inches='tight')
plt.close('all')
fig_rel_path = 'vbench/figures/%s.png' % bmk.name
rst_text = bmk.to_rst(image_path=fig_rel_path)
with open(rst_path, 'w') as f:
f.write(rst_text)
ref = __import__('benchmarks')
benchmarks = [v for v in ref.__dict__.values() if isinstance(v, Benchmark)]
runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL,
BUILD, DB_PATH, TMP_DIR, PREPARE,
always_clean=True,
run_option='eod', start_date=START_DATE,
module_dependencies=dependencies)
if __name__ == '__main__':
runner.run()
generate_rst_files(benchmarks)
| bsd-3-clause |
KennyCandy/HAR | _module123/C_64_32.py | 1 | 17396 | # Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 64])
b_conv1 = bias_varibale([64])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 64, 1])
b_conv2 = weight_variable([1])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = h_conv2
h_pool2 = tf.reshape(h_pool2, shape=[-1, 32, 36])
feature_mat = h_pool2
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close() | mit |
jaidevd/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
mohseniaref/PySAR-1 | pysar/correlation_with_dem.py | 1 | 2229 | #! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
import sys
import os
import getopt
import h5py
import numpy as np
import matplotlib.pyplot as plt
import _readfile as readfile
def Usage():
print '''
************************************************************************
************************************************************************
Calculates the correlation of the dem with the InSAR velocity field.
Usage:
correlation_with_dem.py dem velocity
Example:
correlation_with_dem.py radar_8rlks.hgt velocity.h5
***********************************************************************
***********************************************************************
'''
try:
demFile=sys.argv[1]
File=sys.argv[2]
except:
Usage()
sys.exit(1)
if os.path.basename(demFile).split('.')[1]=='hgt':
amp,dem,demRsc = readfile.read_float32(demFile)
elif os.path.basename(demFile).split('.')[1]=='dem':
dem,demRsc = readfile.read_dem(demFile)
#amp,dem,demRsc = readfile.read_float32(demFile)
h5data = h5py.File(File)
dset = h5data['velocity'].get('velocity')
data = dset[0:dset.shape[0],0:dset.shape[1]]
try:
suby=sys.argv[3].split(':')
subx=sys.argv[4].split(':')
data = data[int(suby[0]):int(suby[1]),int(subx[0]):int(subx[1])]
dem = dem[int(suby[0]):int(suby[1]),int(subx[0]):int(subx[1])]
except:
print 'no subset'
dem=dem.flatten(1)
data=data.flatten(1)
ndx = ~np.isnan(data)
C1=np.zeros([2,len(dem[ndx])])
C1[0][:]=dem[ndx]
C1[1][:]=data[ndx]
print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
print ''
print 'Correlation of the velocity with the DEM: '+ str(np.corrcoef(C1)[0][1])
print ''
print'+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
print 'DEM info:'
print ''
print 'Maximum height difference (m) : ' + str(np.max(dem[ndx])-np.min(dem[ndx]))
print 'Average height (m) :'+str(np.mean(dem[ndx]))
print 'Height Std: '+str(np.std(dem[ndx]))
| mit |
saiwing-yeung/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/scipy/signal/wavelets.py | 67 | 10523 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| mit |
menpo/menpo | menpo/visualize/base.py | 2 | 57597 | try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numpy as np
from menpo.base import MenpoMissingDependencyError
class Menpo3dMissingError(MenpoMissingDependencyError):
r"""
Exception that is thrown when an attempt is made to import a 3D
visualisation method, but 'menpo3d' is not installed.
"""
def __init__(self, actual_missing_import_name):
super(Menpo3dMissingError, self).__init__(actual_missing_import_name)
self.message += (
"\nThis import is required in order to use the " "'menpo3d' package"
)
class Renderer(object):
r"""
Abstract class for rendering visualizations. Framework specific
implementations of these classes are made in order to separate
implementation cleanly from the rest of the code.
It is assumed that the renderers follow some form of stateful pattern for
rendering to Figures. Therefore, the major interface for rendering involves
providing a `figure_id` or a `bool` about whether a new figure should be
used. If neither are provided then the default state of the rendering engine
is assumed to be maintained.
Providing both a ``figure_id`` and ``new_figure == True`` is not a valid
state.
Parameters
----------
figure_id : `object`
A figure id. Could be any valid object that identifies a figure in a
given framework (`str`, `int`, `float`, etc.).
new_figure : `bool`
Whether the rendering engine should create a new figure.
Raises
------
ValueError
It is not valid to provide a figure id AND request a new figure to
be rendered on.
"""
def __init__(self, figure_id, new_figure):
if figure_id is not None and new_figure:
raise ValueError(
"Conflicting arguments. figure_id cannot be "
"specified if the new_figure flag is True"
)
self.figure_id = figure_id
self.new_figure = new_figure
self.figure = self.get_figure()
def render(self, **kwargs):
r"""
Abstract method to be overridden by the renderer. This will implement
the actual rendering code for a given object class.
Parameters
----------
kwargs : `dict`
Passed through to specific rendering engine.
Returns
-------
viewer : :map:`Renderer`
Pointer to `self`.
"""
pass
def get_figure(self):
r"""
Abstract method for getting the correct figure to render on. Should
also set the correct `figure_id` for the figure.
Returns
-------
figure : `object`
The figure object that the renderer will render on.
"""
pass
def save_figure(self, **kwargs):
r"""
Abstract method for saving the figure of the current `figure_id` to
file. It will implement the actual saving code for a given object class.
Parameters
----------
kwargs : `dict`
Options to be set when saving the figure to file.
"""
pass
def clear_figure(self):
r"""
Abstract method for clearing the current figure.
"""
pass
def force_draw(self):
r"""
Abstract method for forcing the current figure to render.
"""
pass
class viewwrapper(object):
r"""
This class abuses the Python descriptor protocol in order to dynamically
change the view method at runtime. Although this is more obviously achieved
through inheritance, the view methods practically amount to syntactic sugar
and so we want to maintain a single view method per class. We do not want
to add the mental overhead of implementing different 2D and 3D PointCloud
classes for example, since, outside of viewing, their implementations would
be identical.
Also note that we could have separated out viewing entirely and made the
check there, but the view method is an important paradigm in menpo that
we want to maintain.
Therefore, this function cleverly (and obscurely) returns the correct
view method for the dimensionality of the given object.
"""
def __init__(self, wrapped_func):
fname = wrapped_func.__name__
self._2d_fname = "_{}_2d".format(fname)
self._3d_fname = "_{}_3d".format(fname)
def __get__(self, instance, instancetype):
if instance.n_dims == 2:
return getattr(instance, self._2d_fname)
elif instance.n_dims == 3:
return getattr(instance, self._3d_fname)
else:
def raise_not_supported(*args, **kwargs):
r"""
Viewing of objects with greater than 3 dimensions is not
currently possible.
"""
raise ValueError(
"Viewing of objects with greater than 3 "
"dimensions is not currently possible."
)
return raise_not_supported
class Viewable(object):
r"""
Abstract interface for objects that can visualize themselves. This assumes
that the class has dimensionality as the view method checks the ``n_dims``
property to wire up the correct view method.
"""
@viewwrapper
def view(self):
r"""
Abstract method for viewing. See the :map:`viewwrapper` documentation
for an explanation of how the `view` method works.
"""
pass
def _view_2d(self, **kwargs):
raise NotImplementedError("2D Viewing is not supported.")
def _view_3d(self, **kwargs):
raise NotImplementedError("3D Viewing is not supported.")
class LandmarkableViewable(object):
r"""
Mixin for :map:`Landmarkable` and :map:`Viewable` objects. Provides a
single helper method for viewing Landmarks and `self` on the same figure.
"""
@viewwrapper
def view_landmarks(self, **kwargs):
pass
def _view_landmarks_2d(self, **kwargs):
raise NotImplementedError("2D Landmark Viewing is not supported.")
def _view_landmarks_3d(self, **kwargs):
raise NotImplementedError("3D Landmark Viewing is not supported.")
from menpo.visualize.viewmatplotlib import (
MatplotlibImageViewer2d,
MatplotlibImageSubplotsViewer2d,
MatplotlibLandmarkViewer2d,
MatplotlibAlignmentViewer2d,
MatplotlibGraphPlotter,
MatplotlibMultiImageViewer2d,
MatplotlibMultiImageSubplotsViewer2d,
MatplotlibPointGraphViewer2d,
)
# Default importer types
PointGraphViewer2d = MatplotlibPointGraphViewer2d
LandmarkViewer2d = MatplotlibLandmarkViewer2d
ImageViewer2d = MatplotlibImageViewer2d
ImageSubplotsViewer2d = MatplotlibImageSubplotsViewer2d
AlignmentViewer2d = MatplotlibAlignmentViewer2d
GraphPlotter = MatplotlibGraphPlotter
MultiImageViewer2d = MatplotlibMultiImageViewer2d
MultiImageSubplotsViewer2d = MatplotlibMultiImageSubplotsViewer2d
class ImageViewer(object):
r"""
Base :map:`Image` viewer that abstracts away dimensionality. It can
visualize multiple channels of an image in subplots.
Parameters
----------
figure_id : `object`
A figure id. Could be any valid object that identifies a figure in a
given framework (`str`, `int`, `float`, etc.).
new_figure : `bool`
Whether the rendering engine should create a new figure.
dimensions : {``2``, ``3``} `int`
The number of dimensions in the image.
pixels : ``(N, D)`` `ndarray`
The pixels to render.
channels: `int` or `list` or ``'all'`` or `None`
A specific selection of channels to render. The user can choose either
a single or multiple channels. If ``'all'``, render all channels in
subplot mode. If `None` and image is not greyscale or RGB, render all
channels in subplots. If `None` and image is greyscale or RGB, then do
not plot channels in different subplots.
mask: ``(N, D)`` `ndarray`
A `bool` mask to be applied to the image. All points outside the
mask are set to ``0``.
"""
def __init__(
self, figure_id, new_figure, dimensions, pixels, channels=None, mask=None
):
if len(pixels.shape) == 3 and pixels.shape[0] == 3:
# then probably an RGB image, so ensure the clipped pixels.
from menpo.image import Image
image = Image(pixels, copy=False)
image_clipped = image.clip_pixels()
pixels = image_clipped.pixels
else:
pixels = pixels.copy()
self.figure_id = figure_id
self.new_figure = new_figure
self.dimensions = dimensions
pixels, self.use_subplots = self._parse_channels(channels, pixels)
self.pixels = self._masked_pixels(pixels, mask)
self._flip_image_channels()
def _flip_image_channels(self):
if self.pixels.ndim == 3:
from menpo.image.base import channels_to_back
self.pixels = channels_to_back(self.pixels)
def _parse_channels(self, channels, pixels):
r"""
Parse `channels` parameter. If `channels` is `int` or `list`, keep it as
is. If `channels` is ``'all'``, return a `list` of all the image's
channels. If `channels` is `None`, return the minimum between an
`upper_limit` and the image's number of channels. If image is greyscale
or RGB and `channels` is `None`, then do not plot channels in different
subplots.
Parameters
----------
channels : `int` or `list` or ``'all'`` or `None`
A specific selection of channels to render.
pixels : ``(N, D)`` `ndarray`
The image's pixels to render.
Returns
-------
pixels : ``(N, D)`` `ndarray`
The pixels to be visualized.
use_subplots : `bool`
Whether to visualize using subplots.
"""
# Flag to trigger ImageSubplotsViewer2d or ImageViewer2d
use_subplots = True
n_channels = pixels.shape[0]
if channels is None:
if n_channels == 1:
pixels = pixels[0, ...]
use_subplots = False
elif n_channels == 3:
use_subplots = False
elif channels != "all":
if isinstance(channels, Iterable):
if len(channels) == 1:
pixels = pixels[channels[0], ...]
use_subplots = False
else:
pixels = pixels[channels, ...]
else:
pixels = pixels[channels, ...]
use_subplots = False
return pixels, use_subplots
def _masked_pixels(self, pixels, mask):
r"""
Return the masked pixels using a given `bool` mask. In order to make
sure that the non-masked pixels are visualized in white, their value
is set to the maximum of pixels.
Parameters
----------
pixels : ``(N, D)`` `ndarray`
The image's pixels to render.
mask: ``(N, D)`` `ndarray`
A `bool` mask to be applied to the image. All points outside the
mask are set to the image max. If mask is `None`, then the initial
pixels are returned.
Returns
-------
masked_pixels : ``(N, D)`` `ndarray`
The masked pixels.
"""
if mask is not None:
nanmax = np.nanmax(pixels)
pixels[..., ~mask] = nanmax + (0.01 * nanmax)
return pixels
def render(self, **kwargs):
r"""
Select the correct type of image viewer for the given image
dimensionality.
Parameters
----------
kwargs : `dict`
Passed through to image viewer.
Returns
-------
viewer : :map:`Renderer`
The rendering object.
Raises
------
ValueError
Only 2D images are supported.
"""
if self.dimensions == 2:
if self.use_subplots:
return ImageSubplotsViewer2d(
self.figure_id, self.new_figure, self.pixels
).render(**kwargs)
else:
return ImageViewer2d(
self.figure_id, self.new_figure, self.pixels
).render(**kwargs)
else:
raise ValueError("Only 2D images are currently supported")
def view_image_landmarks(
image,
channels,
masked,
group,
with_labels,
without_labels,
figure_id,
new_figure,
interpolation,
cmap_name,
alpha,
render_lines,
line_colour,
line_style,
line_width,
render_markers,
marker_style,
marker_size,
marker_face_colour,
marker_edge_colour,
marker_edge_width,
render_numbering,
numbers_horizontal_align,
numbers_vertical_align,
numbers_font_name,
numbers_font_size,
numbers_font_style,
numbers_font_weight,
numbers_font_colour,
render_legend,
legend_title,
legend_font_name,
legend_font_style,
legend_font_size,
legend_font_weight,
legend_marker_scale,
legend_location,
legend_bbox_to_anchor,
legend_border_axes_pad,
legend_n_columns,
legend_horizontal_spacing,
legend_vertical_spacing,
legend_border,
legend_border_padding,
legend_shadow,
legend_rounded_corners,
render_axes,
axes_font_name,
axes_font_size,
axes_font_style,
axes_font_weight,
axes_x_limits,
axes_y_limits,
axes_x_ticks,
axes_y_ticks,
figure_size,
):
r"""
This is a helper method that abstracts away the fact that viewing
images and masked images is identical apart from the mask. Therefore,
we do the class check in this method and then proceed identically whether
the image is masked or not.
See the documentation for _view_2d on Image or _view_2d on MaskedImage
for information about the parameters.
"""
import matplotlib.pyplot as plt
if not image.has_landmarks:
raise ValueError(
"Image does not have landmarks attached, unable " "to view landmarks."
)
# Parse axes limits
image_axes_x_limits = None
landmarks_axes_x_limits = axes_x_limits
if axes_x_limits is None:
image_axes_x_limits = landmarks_axes_x_limits = [0, image.width - 1]
image_axes_y_limits = None
landmarks_axes_y_limits = axes_y_limits
if axes_y_limits is None:
image_axes_y_limits = landmarks_axes_y_limits = [0, image.height - 1]
# Render image
from menpo.image import MaskedImage
if isinstance(image, MaskedImage):
self_view = image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
masked=masked,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_x_limits=image_axes_x_limits,
axes_y_limits=image_axes_y_limits,
)
else:
self_view = image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_x_limits=image_axes_x_limits,
axes_y_limits=image_axes_y_limits,
)
# Render landmarks
# correct group label in legend
if group is None and image.landmarks.n_groups == 1:
group = image.landmarks.group_labels[0]
landmark_view = None # initialize viewer object
# useful in order to visualize the legend only for the last axis object
render_legend_tmp = False
for i, ax in enumerate(self_view.axes_list):
# set current axis
plt.sca(ax)
# show legend only for the last axis object
if i == len(self_view.axes_list) - 1:
render_legend_tmp = render_legend
# viewer
landmark_view = image.landmarks[group].view(
with_labels=with_labels,
without_labels=without_labels,
group=group,
figure_id=self_view.figure_id,
new_figure=False,
image_view=True,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend_tmp,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=landmarks_axes_x_limits,
axes_y_limits=landmarks_axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
return landmark_view
class MultipleImageViewer(ImageViewer):
def __init__(
self, figure_id, new_figure, dimensions, pixels_list, channels=None, mask=None
):
super(MultipleImageViewer, self).__init__(
figure_id,
new_figure,
dimensions,
pixels_list[0],
channels=channels,
mask=mask,
)
pixels_list = [self._parse_channels(channels, p)[0] for p in pixels_list]
self.pixels_list = [self._masked_pixels(p, mask) for p in pixels_list]
def render(self, **kwargs):
if self.dimensions == 2:
if self.use_subplots:
MultiImageSubplotsViewer2d(
self.figure_id, self.new_figure, self.pixels_list
).render(**kwargs)
else:
return MultiImageViewer2d(
self.figure_id, self.new_figure, self.pixels_list
).render(**kwargs)
else:
raise ValueError("Only 2D images are currently supported")
def plot_curve(
x_axis,
y_axis,
figure_id=None,
new_figure=True,
legend_entries=None,
title="",
x_label="",
y_label="",
axes_x_limits=0.0,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour="k",
marker_edge_width=1.0,
render_legend=True,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
figure_size=(7, 7),
render_grid=True,
grid_line_style="--",
grid_line_width=1,
):
r"""
Plot a single or multiple curves on the same figure.
Parameters
----------
x_axis : `list` or `array`
The values of the horizontal axis. They are common for all curves.
y_axis : `list` of `lists` or `arrays`
A `list` with `lists` or `arrays` with the values of the vertical axis
for each curve.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
legend_entries : `list of `str` or ``None``, optional
If `list` of `str`, it must have the same length as `errors` `list` and
each `str` will be used to name each curve. If ``None``, the CED curves
will be named as `'Curve %d'`.
title : `str`, optional
The figure's title.
x_label : `str`, optional
The label of the horizontal axis.
y_label : `str`, optional
The label of the vertical axis.
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the graph as a percentage of the curves' width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the graph as a percentage of the curves' height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
render_lines : `bool` or `list` of `bool`, optional
If ``True``, the line will be rendered. If `bool`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`.
line_colour : `colour` or `list` of `colour` or ``None``, optional
The colour of the lines. If not a `list`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`. If ``None``, the
colours will be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
line_style : ``{'-', '--', '-.', ':'}`` or `list` of those, optional
The style of the lines. If not a `list`, this value will be used for all
curves. If `list`, a value must be specified for each curve, thus it must
have the same length as `y_axis`.
line_width : `float` or `list` of `float`, optional
The width of the lines. If `float`, this value will be used for all
curves. If `list`, a value must be specified for each curve, thus it must
have the same length as `y_axis`.
render_markers : `bool` or `list` of `bool`, optional
If ``True``, the markers will be rendered. If `bool`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`.
marker_style : `marker` or `list` of `markers`, optional
The style of the markers. If not a `list`, this value will be used for
all curves. If `list`, a value must be specified for each curve, thus it
must have the same length as `y_axis`.
Example `marker` options ::
{'.', ',', 'o', 'v', '^', '<', '>', '+', 'x', 'D', 'd', 's',
'p', '*', 'h', 'H', '1', '2', '3', '4', '8'}
marker_size : `int` or `list` of `int`, optional
The size of the markers in points. If `int`, this value will be used
for all curves. If `list`, a value must be specified for each curve, thus
it must have the same length as `y_axis`.
marker_face_colour : `colour` or `list` of `colour` or ``None``, optional
The face (filling) colour of the markers. If not a `list`, this value
will be used for all curves. If `list`, a value must be specified for
each curve, thus it must have the same length as `y_axis`. If ``None``,
the colours will be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
marker_edge_colour : `colour` or `list` of `colour` or ``None``, optional
The edge colour of the markers. If not a `list`, this value will be used
for all curves. If `list`, a value must be specified for each curve, thus
it must have the same length as `y_axis`. If ``None``, the colours will
be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
marker_edge_width : `float` or `list` of `float`, optional
The width of the markers' edge. If `float`, this value will be used for
all curves. If `list`, a value must be specified for each curve, thus it
must have the same length as `y_axis`.
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend.
Example options ::
{'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'}
legend_font_style : ``{'normal', 'italic', 'oblique'}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See below, optional
The font weight of the legend.
Example options ::
{'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ===
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ===
legend_bbox_to_anchor : (`float`, `float`), optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See below, optional
The font of the axes.
Example options ::
{'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{'normal', 'italic', 'oblique'}``, optional
The font style of the axes.
axes_font_weight : See below, optional
The font weight of the axes.
Example options ::
{'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'}
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : ``{'-', '--', '-.', ':'}``, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Raises
------
ValueError
legend_entries list has different length than y_axis list
Returns
-------
viewer : :map:`GraphPlotter`
The viewer object.
"""
from menpo.visualize import GraphPlotter
# check y_axis
if not isinstance(y_axis, list):
y_axis = [y_axis]
# check legend_entries
if legend_entries is not None and len(legend_entries) != len(y_axis):
raise ValueError("legend_entries list has different length than y_axis " "list")
# render
return GraphPlotter(
figure_id=figure_id,
new_figure=new_figure,
x_axis=x_axis,
y_axis=y_axis,
title=title,
legend_entries=legend_entries,
x_label=x_label,
y_label=y_label,
x_axis_limits=axes_x_limits,
y_axis_limits=axes_y_limits,
x_axis_ticks=axes_x_ticks,
y_axis_ticks=axes_y_ticks,
).render(
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_legend=render_legend,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
figure_size=figure_size,
render_grid=render_grid,
grid_line_style=grid_line_style,
grid_line_width=grid_line_width,
)
def render_rectangles_around_patches(
centers,
patch_shape,
axes=None,
image_view=True,
line_colour="r",
line_style="-",
line_width=1,
interpolation="none",
):
r"""
Method that renders rectangles of the specified `patch_shape` centered
around all the points of the provided `centers`.
Parameters
----------
centers : :map:`PointCloud`
The centers around which to draw the rectangles.
patch_shape : `tuple` or `ndarray`, optional
The size of the rectangle to render.
axes : `matplotlib.pyplot.axes` object or ``None``, optional
The axes object on which to render.
image_view : `bool`, optional
If ``True`` the rectangles will be viewed as if they are in the image
coordinate system.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
interpolation : See Below, optional
In case a patch-based image is already rendered on the specified axes,
this argument controls how tight the rectangles would be to the patches.
It needs to have the same value as the one used when rendering the
patches image, otherwise there is the danger that the rectangles won't
be exactly on the border of the patches. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# Dictionary with the line styles
line_style_dict = {"-": "solid", "--": "dashed", "-.": "dashdot", ":": "dotted"}
# Get axes object
if axes is None:
axes = plt.gca()
# Need those in order to compute the lower left corner of the rectangle
half_patch_shape = [patch_shape[0] / 2, patch_shape[1] / 2]
# Set the view mode
if image_view:
xi = 1
yi = 0
else:
xi = 0
yi = 1
# Set correct offsets so that the rectangle is tight to the patch
if interpolation == "none":
off_start = 0.5
off_end = 0.0
else:
off_start = 1.0
off_end = 0.5
# Render rectangles
for p in range(centers.shape[0]):
xc = np.intp(centers[p, xi] - half_patch_shape[xi]) - off_start
yc = np.intp(centers[p, yi] - half_patch_shape[yi]) - off_start
axes.add_patch(
Rectangle(
(xc, yc),
patch_shape[xi] + off_end,
patch_shape[yi] + off_end,
fill=False,
edgecolor=line_colour,
linewidth=line_width,
linestyle=line_style_dict[line_style],
)
)
def view_patches(
patches,
patch_centers,
patches_indices=None,
offset_index=None,
figure_id=None,
new_figure=False,
background="white",
render_patches=True,
channels=None,
interpolation="none",
cmap_name=None,
alpha=1.0,
render_patches_bboxes=True,
bboxes_line_colour="r",
bboxes_line_style="-",
bboxes_line_width=1,
render_centers=True,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour=None,
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
r"""
Method that renders the provided `patches` on a canvas. The user can
choose whether to render the patch centers (`render_centers`) as well as
rectangle boundaries around the patches (`render_patches_bboxes`).
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods of the :map:`Image` class. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that are
returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects.
patch_centers : :map:`PointCloud`
The centers around which to visualize the patches.
patches_indices : `int` or `list` of `int` or ``None``, optional
Defines the patches that will be visualized. If ``None``, then all the
patches are selected.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the index
of the second dimension from which to sample. If ``None``, then ``0`` is
used.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
background : ``{'black', 'white'}``, optional
If ``'black'``, then the background is set equal to the minimum value
of `patches`. If ``'white'``, then the background is set equal to the
maximum value of `patches`.
render_patches : `bool`, optional
Flag that determines whether to render the patch values.
channels : `int` or `list` of `int` or ``all`` or ``None``, optional
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_patches_bboxes : `bool`, optional
Flag that determines whether to render the bounding box lines around the
patches.
bboxes_line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
bboxes_line_style : ``{-, --, -., :}``, optional
The style of the lines.
bboxes_line_width : `float`, optional
The width of the lines.
render_centers : `bool`, optional
Flag that determines whether to render the patch centers.
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the shape as a percentage of the shape's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the shape as a percentage of the shape's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
from menpo.image.base import (
_convert_patches_list_to_single_array,
_create_patches_image,
)
# If patches is a list, convert it to an array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(patches, patch_centers.n_points)
# Create patches image
if render_patches:
patches_image = _create_patches_image(
patches,
patch_centers,
patches_indices=patches_indices,
offset_index=offset_index,
background=background,
)
else:
if background == "black":
tmp_patches = np.zeros(
(
patches.shape[0],
patches.shape[1],
3,
patches.shape[3],
patches.shape[4],
)
)
elif background == "white":
tmp_patches = np.ones(
(
patches.shape[0],
patches.shape[1],
3,
patches.shape[3],
patches.shape[4],
)
)
patches_image = _create_patches_image(
tmp_patches,
patch_centers,
patches_indices=patches_indices,
offset_index=offset_index,
background=background,
)
channels = None
# Render patches image
if render_centers:
patch_view = patches_image.view_landmarks(
channels=channels,
group="patch_centers",
figure_id=figure_id,
new_figure=new_figure,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=False,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
else:
patch_view = patches_image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
# Render rectangles around patches
if render_patches_bboxes:
patch_shape = [patches.shape[3], patches.shape[4]]
render_rectangles_around_patches(
patches_image.landmarks["patch_centers"].points,
patch_shape,
image_view=True,
line_colour=bboxes_line_colour,
line_style=bboxes_line_style,
line_width=bboxes_line_width,
interpolation=interpolation,
)
return patch_view
def plot_gaussian_ellipses(
covariances,
means,
n_std=2,
render_colour_bar=True,
colour_bar_label="Normalized Standard Deviation",
colour_map="jet",
figure_id=None,
new_figure=False,
image_view=True,
line_colour="r",
line_style="-",
line_width=1.0,
render_markers=True,
marker_edge_colour="k",
marker_face_colour="k",
marker_edge_width=1.0,
marker_size=5,
marker_style="o",
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
crop_proportion=0.1,
figure_size=(7, 7),
):
r"""
Method that renders the Gaussian ellipses that correspond to a set of
covariance matrices and mean vectors. Naturally, this only works for
2-dimensional random variables.
Parameters
----------
covariances : `list` of ``(2, 2)`` `ndarray`
The covariance matrices that correspond to each ellipse.
means : `list` of ``(2, )`` `ndarray`
The mean vectors that correspond to each ellipse.
n_std : `float`, optional
This defines the size of the ellipses in terms of number of standard
deviations.
render_colour_bar : `bool`, optional
If ``True``, then the ellipses will be coloured based on their
normalized standard deviations and a colour bar will also appear on
the side. If ``False``, then all the ellipses will have the same colour.
colour_bar_label : `str`, optional
The title of the colour bar. It only applies if `render_colour_bar`
is ``True``.
colour_map : `str`, optional
A valid Matplotlib colour map. For more info, please refer to
`matplotlib.cm`.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
image_view : `bool`, optional
If ``True`` the ellipses will be rendered in the image coordinates
system.
line_colour : See Below, optional
The colour of the lines of the ellipses.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines of the ellipses.
line_width : `float`, optional
The width of the lines of the ellipses.
render_markers : `bool`, optional
If ``True``, the centers of the ellipses will be rendered.
marker_style : See Below, optional
The style of the centers of the ellipses. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the centers of the ellipses in points.
marker_face_colour : See Below, optional
The face (filling) colour of the centers of the ellipses.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the centers of the ellipses.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The edge width of the centers of the ellipses.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
crop_proportion : `float`, optional
The proportion to be left around the centers' pointcloud.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.font_manager import FontProperties
from menpo.shape import PointCloud
def eigh_sorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
# get correct line style
if line_style == "-":
line_style = "solid"
elif line_style == "--":
line_style = "dashed"
elif line_style == "-.":
line_style = "dashdot"
elif line_style == ":":
line_style = "dotted"
else:
raise ValueError("line_style must be selected from " "['-', '--', '-.', ':'].")
# create pointcloud
pc = PointCloud(np.array(means))
# compute axes limits
bounds = pc.bounds()
r = pc.range()
x_rr = r[0] * crop_proportion
y_rr = r[1] * crop_proportion
axes_x_limits = [bounds[0][1] - x_rr, bounds[1][1] + x_rr]
axes_y_limits = [bounds[0][0] - y_rr, bounds[1][0] + y_rr]
normalizer = np.sum(r) / 2.0
# compute height, width, theta and std
stds = []
heights = []
widths = []
thetas = []
for cov in covariances:
vals, vecs = eigh_sorted(cov)
width, height = np.sqrt(vals)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
stds.append(np.mean([height, width]) / normalizer)
heights.append(height)
widths.append(width)
thetas.append(theta)
if render_colour_bar:
# set colormap values
cmap = plt.get_cmap(colour_map)
cNorm = colors.Normalize(vmin=np.min(stds), vmax=np.max(stds))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
# visualize pointcloud
if render_colour_bar:
renderer = pc.view(
figure_id=figure_id,
new_figure=new_figure,
image_view=image_view,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
figure_size=figure_size,
render_markers=False,
)
else:
renderer = pc.view(
figure_id=figure_id,
new_figure=new_figure,
image_view=image_view,
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
marker_edge_width=marker_edge_width,
marker_size=marker_size,
marker_style=marker_style,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
figure_size=figure_size,
render_markers=render_markers,
)
# plot ellipses
ax = plt.gca()
for i in range(len(covariances)):
# Width and height are "full" widths, not radius
width = 2 * n_std * widths[i]
height = 2 * n_std * heights[i]
if image_view:
colour = line_colour
if render_colour_bar:
colour = scalarMap.to_rgba(stds[i])
if render_markers:
plt.plot(
means[i][1],
means[i][0],
facecolor=colour,
edgecolor=colour,
linewidth=0,
)
ellip = Ellipse(
xy=means[i][-1::-1],
width=height,
height=width,
angle=thetas[i],
linestyle=line_style,
linewidth=line_width,
edgecolor=colour,
facecolor="none",
)
else:
colour = line_colour
if render_colour_bar:
colour = scalarMap.to_rgba(stds[i])
if render_markers:
plt.plot(
means[i][0],
means[i][1],
facecolor=colour,
edgecolor=colour,
linewidth=0,
)
ellip = Ellipse(
xy=means[i],
width=width,
height=height,
angle=thetas[i],
linestyle=line_style,
linewidth=line_width,
edgecolor=colour,
facecolor="none",
)
ax.add_artist(ellip)
# show colour bar
if render_colour_bar:
scalarMap.set_array(stds)
cb = plt.colorbar(scalarMap, label=colour_bar_label)
# change colour bar's font properties
ax = cb.ax
text = ax.yaxis.label
font = FontProperties(
size=axes_font_size,
weight=axes_font_weight,
style=axes_font_style,
family=axes_font_name,
)
text.set_font_properties(font)
return renderer
| bsd-3-clause |
mosra/m.css | documentation/test_python/test_page.py | 1 | 4453 | #
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020 Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import matplotlib
import os
import re
import subprocess
from distutils.version import LooseVersion
from . import BaseTestCase
def dot_version():
return re.match(".*version (?P<version>\d+\.\d+\.\d+).*", subprocess.check_output(['dot', '-V'], stderr=subprocess.STDOUT).decode('utf-8').strip()).group('version')
class Page(BaseTestCase):
def test(self):
self.run_python({
'INPUT_PAGES': ['index.rst', 'another.rst', 'error.rst']
})
self.assertEqual(*self.actual_expected_contents('index.html'))
self.assertEqual(*self.actual_expected_contents('another.html'))
self.assertEqual(*self.actual_expected_contents('error.html'))
self.assertEqual(*self.actual_expected_contents('pages.html'))
class InputSubdir(BaseTestCase):
def test(self):
self.run_python({
'INPUT': 'sub',
'INPUT_PAGES': ['index.rst']
})
# The same output as Page, just the file is taken from elsewhere
self.assertEqual(*self.actual_expected_contents('index.html', '../page/index.html'))
class Plugins(BaseTestCase):
def test(self):
self.run_python({
# Test all of them to check the registration works well
'PLUGINS': [
'm.abbr',
'm.code',
'm.components',
'm.dot',
'm.dox',
'm.gh',
'm.gl',
'm.images',
'm.link',
'm.plots',
'm.vk',
'fancyline'
],
'PLUGIN_PATHS': ['plugins'],
'INPUT_PAGES': ['index.rst', 'dot.rst', 'plots.rst'],
'M_HTMLSANITY_SMART_QUOTES': True,
'M_DOT_FONT': 'DejaVu Sans',
'M_PLOTS_FONT': 'DejaVu Sans',
'M_DOX_TAGFILES': [
(os.path.join(self.path, '../../../doc/documentation/corrade.tag'), 'https://doc.magnum.graphics/corrade/')
]
})
self.assertEqual(*self.actual_expected_contents('index.html'))
# The output is different for every other Graphviz
if LooseVersion(dot_version()) >= LooseVersion("2.44.0"):
file = 'dot.html'
elif LooseVersion(dot_version()) > LooseVersion("2.40.0"):
file = 'dot-240.html'
elif LooseVersion(dot_version()) >= LooseVersion("2.38.0"):
file = 'dot-238.html'
self.assertEqual(*self.actual_expected_contents('dot.html', file))
# I assume this will be a MASSIVE ANNOYANCE at some point as well so
# keeping it separate
self.assertEqual(*self.actual_expected_contents('plots.html'))
self.assertTrue(os.path.exists(os.path.join(self.path, 'output/tiny.png')))
import fancyline
self.assertEqual(fancyline.post_crawl_call_count, 1)
self.assertEqual(fancyline.scope_stack, [])
# No code, thus no docstrings processed
self.assertEqual(fancyline.docstring_call_count, 0)
# Once for each page, but nonce for render_docs() as that shouldn't
# generate any output anyway
self.assertEqual(fancyline.pre_page_call_count, 3)
self.assertEqual(fancyline.post_run_call_count, 1)
| mit |
jdwittenauer/ionyx | ionyx/experiment.py | 1 | 19762 | import pickle
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import get_scorer
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import cross_validate, learning_curve, train_test_split
from .print_message import PrintMessageMixin
class Experiment(PrintMessageMixin):
"""
Provides functionality to create and run machine learning experiments. Designed to
serve as a "wrapper" for running an experiment. This class provides methods for
training, cross-validation, parameter tuning etc. The main value proposition is
in providing a simplified API for common tasks, layering useful reporting and logging
on top, and reconciling capabilities between several popular libraries.
Parameters
----------
package : {'sklearn', 'xgboost', 'keras', 'prophet'}
The source package of the model used in the experiment. Some capabilities are
available only using certain packages.
model : object
An instantiated supervised learning model. Must be API compatible with
scikit-learn estimators. Pipelines are also supported.
scoring_metric : string
Name of the metric to use to score models. Text must match a valid scikit-learn
metric.
eval_metric : string, optional, default None
Separate metric used specifically for evaluation such as hold-out sets during
training. Text must match an evaluation metric supported by the package the
model originates from.
n_jobs : int, optional, default 1
Number of parallel processes to use (where functionality is available).
verbose : boolean, optional, default True
If true, messages will be written to the console.
logger : object, optional, default None
An instantiated log writer with an open file handle. If provided, messages
will be written to the log file.
data : DataFrame, optional, default None
The data set to be used for the experiment. Provides the option to specify the
data at initialization vs. passing in training data and labels with each
function call. If "data" is specified then "X_columns" and "y_column" must also
be specified.
X_columns : list, optional, default None
List of columns in "data" to use for the training set.
y_column : string, optional, default None
Name of the column in "data" to use as a label for supervised learning.
cv : object, optional, default None
A cross-validation strategy. Accepts all options considered valid by
scikit-learn.
Attributes
----------
scorer_ : object
Scikit-learn scoring function for the provided scoring metric.
best_model_ : object
The best model found during a parameter search.
"""
def __init__(self, package, model, scoring_metric, eval_metric=None, n_jobs=1, verbose=True,
logger=None, data=None, X_columns=None, y_column=None, cv=None):
PrintMessageMixin.__init__(self, verbose, logger)
self.package = package
self.model = model
self.scoring_metric = scoring_metric
self.eval_metric = eval_metric
self.n_jobs = n_jobs
self.scorer_ = get_scorer(self.scoring_metric)
self.best_model_ = None
self.data = data
if self.data is not None:
if X_columns and y_column:
self.X = data[X_columns].values
self.y = data[y_column].values
else:
raise Exception('X and y columns must be specified if data set is provided.')
self.cv = cv
self.print_message('Beginning experiment...')
self.print_message('Package = {0}'.format(package))
self.print_message('Scoring Metric = {0}'.format(scoring_metric))
self.print_message('Evaluation Metric = {0}'.format(eval_metric))
self.print_message('Parallel Jobs = {0}'.format(n_jobs))
self.print_message('Model:')
self.print_message(model, pprint=True)
self.print_message('Parameters:')
self.print_message(model.get_params(), pprint=True)
def train_model(self, X=None, y=None, validate=False, early_stopping=False,
early_stopping_rounds=None, plot_eval_history=False, fig_size=16):
"""
Trains a new model using the provided training data.
Parameters
----------
X : array-like, optional, default None
Training input samples. Must be specified if no data was provided during
initialization.
y : array-like, optional, default None
Target values. Must be specified if no data was provided during
initialization.
validate : boolean, optional, default False
Evaluate model on a hold-out set during training.
early_stopping : boolean, optional, default False
Stop training the model when performance on a validation set begins to drop.
Eval must be enabled.
early_stopping_rounds : int, optional, default None
Number of training iterations to allow before stopping training due to
performance on a validation set. Eval and early_stopping must be enabled.
plot_eval_history : boolean, optional, default False
Plot model performance as a function of training time. Eval must be enabled.
fig_size : int, optional, default 16
Size of the evaluation history plot.
"""
if X is not None:
self.X = X
if y is not None:
self.y = y
self.print_message('Beginning model training...')
self.print_message('X dimensions = {0}'.format(self.X.shape))
self.print_message('y dimensions = {0}'.format(self.y.shape))
v = 1 if self.verbose else 0
t0 = time.time()
if self.package not in ['sklearn', 'xgboost', 'keras', 'prophet']:
raise Exception('Package not supported.')
if validate and self.package in ['xgboost', 'keras']:
X_train, X_eval, y_train, y_eval = train_test_split(self.X, self.y, test_size=0.1)
training_history = None
min_eval_loss = None
min_eval_epoch = None
if early_stopping:
if self.package == 'xgboost':
self.model.fit(X_train, y_train, eval_set=[(X_eval, y_eval)],
eval_metric=self.eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=self.verbose)
elif self.package == 'keras':
from keras.callbacks import EarlyStopping
callbacks = [
EarlyStopping(monitor='val_loss', patience=early_stopping_rounds)
]
training_history = self.model.fit(X_train, y_train, verbose=v,
validation_data=(X_eval, y_eval),
callbacks=callbacks)
else:
if self.package == 'xgboost':
self.model.fit(X_train, y_train, eval_set=[(X_eval, y_eval)],
eval_metric=self.eval_metric, verbose=self.verbose)
elif self.package == 'keras':
training_history = self.model.fit(X_train, y_train, verbose=v,
validation_data=(X_eval, y_eval))
if self.package == 'xgboost':
training_history = self.model.evals_result()['validation_0'][self.eval_metric]
min_eval_loss = min(training_history)
min_eval_epoch = training_history.index(min(training_history)) + 1
elif self.package == 'keras':
training_history = training_history.history['val_loss']
min_eval_loss = min(training_history)
min_eval_epoch = training_history.index(min(training_history)) + 1
if plot_eval_history:
df = pd.DataFrame(training_history, columns=['Eval Loss'])
df.plot(figsize=(fig_size, fig_size * 3 / 4))
t1 = time.time()
self.print_message('Model training complete in {0:3f} s.'.format(t1 - t0))
self.print_message('Training score = {0}'
.format(self.scorer_(self.model, X_train, y_train)))
self.print_message('Min. evaluation score = {0}'.format(min_eval_loss))
self.print_message('Min. evaluation epoch = {0}'.format(min_eval_epoch))
elif validate:
raise Exception('Package does not support evaluation during training.')
else:
if self.package == 'keras':
self.model.set_params(verbose=v)
self.model.fit(self.X, self.y)
t1 = time.time()
self.print_message('Model training complete in {0:3f} s.'.format(t1 - t0))
self.print_message('Training score = {0}'
.format(self.scorer_(self.model, self.X, self.y)))
def cross_validate(self, X=None, y=None, cv=None):
"""
Performs cross-validation to estimate the true performance of the model.
Parameters
----------
X : array-like, optional, default None
Training input samples. Must be specified if no data was provided during
initialization.
y : array-like, optional, default None
Target values. Must be specified if no data was provided during
initialization.
cv : object, optional, default None
A cross-validation strategy. Accepts all options considered valid by
scikit-learn. Must be specified if no cv was passed in during
initialization.
"""
if X is not None:
self.X = X
if y is not None:
self.y = y
if cv is not None:
self.cv = cv
self.print_message('Beginning cross-validation...')
self.print_message('X dimensions = {0}'.format(self.X.shape))
self.print_message('y dimensions = {0}'.format(self.y.shape))
self.print_message('Cross-validation strategy = {0}'.format(self.cv))
t0 = time.time()
if self.package not in ['sklearn', 'xgboost', 'keras', 'prophet']:
raise Exception('Package not supported.')
if self.package == 'keras':
self.model.set_params(verbose=0)
results = cross_validate(self.model, self.X, self.y, scoring=self.scoring_metric,
cv=self.cv, n_jobs=self.n_jobs, verbose=0,
return_train_score=True)
t1 = time.time()
self.print_message('Cross-validation complete in {0:3f} s.'.format(t1 - t0))
train_score = np.mean(results['train_score'])
test_score = np.mean(results['test_score'])
self.print_message('Training score = {0}'.format(train_score))
self.print_message('Cross-validation score = {0}'.format(test_score))
def learning_curve(self, X=None, y=None, cv=None, fig_size=16):
"""
Plots a learning curve showing model performance against both training and
validation data sets as a function of the number of training samples.
Parameters
----------
X : array-like, optional, default None
Training input samples. Must be specified if no data was provided during
initialization.
y : array-like, optional, default None
Target values. Must be specified if no data was provided during
initialization.
cv : object, optional, default None
A cross-validation strategy. Accepts all options considered valid by
scikit-learn. Must be specified if no cv was passed in during
initialization.
fig_size : int, optional, default 16
Size of the plot.
"""
if X is not None:
self.X = X
if y is not None:
self.y = y
if cv is not None:
self.cv = cv
self.print_message('Plotting learning curve...')
self.print_message('X dimensions = {0}'.format(self.X.shape))
self.print_message('y dimensions = {0}'.format(self.y.shape))
self.print_message('Cross-validation strategy = {0}'.format(self.cv))
t0 = time.time()
if self.package not in ['sklearn', 'xgboost', 'keras', 'prophet']:
raise Exception('Package not supported.')
if self.package == 'keras':
self.model.set_params(verbose=0)
values = learning_curve(self.model, self.X, self.y, cv=self.cv,
scoring=self.scoring_metric, n_jobs=self.n_jobs, verbose=0)
train_sizes, train_scores, test_scores = values
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fig, ax = plt.subplots(figsize=(fig_size, fig_size * 3 / 4))
ax.set_title('Learning Curve')
ax.set_xlabel('Training Examples')
ax.set_ylabel('Score')
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='b')
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color='r')
ax.plot(train_sizes, train_scores_mean, 'o-', color='b', label='Training score')
ax.plot(train_sizes, test_scores_mean, 'o-', color='r', label='Cross-validation score')
ax.legend(loc='best')
fig.tight_layout()
t1 = time.time()
self.print_message('Plot generation complete in {0:3f} s.'.format(t1 - t0))
def param_search(self, param_grid, X=None, y=None, cv=None, search_type='grid',
n_iter=100, save_results_path=None):
"""
Conduct a search over some pre-defined set of hyper-parameter configurations
to find the best-performing set of parameter.
Parameters
----------
param_grid : list, dict
Parameter search space. See scikit-learn documentation for GridSearchCV and
RandomSearchCV for acceptable formatting.
X : array-like, optional, default None
Training input samples. Must be specified if no data was provided during
initialization.
y : array-like, optional, default None
Target values. Must be specified if no data was provided during
initialization.
cv : object, optional, default None
A cross-validation strategy. Accepts all options considered valid by
scikit-learn. Must be specified if no cv was passed in during
initialization.
search_type : {'grid', 'random'}, optional, default 'grid'
Specifies use of grid search or random search. Requirements for param_grid
are different depending on which method is used. See scikit-learn
documentation for GridSearchCV and RandomSearchCV for details.
n_iter : int, optional, default 100
Number of search iterations to run. Only applies to random search.
save_results_path : string, optional, default None
Specifies a location to save the full results of the search in format.
File name should end in .csv.
"""
if X is not None:
self.X = X
if y is not None:
self.y = y
if cv is not None:
self.cv = cv
self.print_message('Beginning hyper-parameter search...')
self.print_message('X dimensions = {0}'.format(self.X.shape))
self.print_message('y dimensions = {0}'.format(self.y.shape))
self.print_message('Cross-validation strategy = {0}'.format(self.cv))
t0 = time.time()
if self.package not in ['sklearn', 'xgboost', 'keras', 'prophet']:
raise Exception('Package not supported.')
if self.package == 'keras':
self.model.set_params(verbose=0)
if search_type == 'grid':
search = GridSearchCV(self.model, param_grid=param_grid, scoring=self.scoring_metric,
n_jobs=self.n_jobs, cv=self.cv, refit=self.scoring_metric,
verbose=0, return_train_score=True)
elif search_type == 'random':
search = RandomizedSearchCV(self.model, param_grid, n_iter=n_iter,
scoring=self.scoring_metric, n_jobs=self.n_jobs,
cv=self.cv, refit=self.scoring_metric, verbose=0,
return_train_score=True)
else:
raise Exception('Search type not supported.')
search.fit(self.X, self.y)
t1 = time.time()
self.print_message('Hyper-parameter search complete in {0:3f} s.'.format(t1 - t0))
self.print_message('Best score found = {0}'.format(search.best_score_))
self.print_message('Best parameters found:')
self.print_message(search.best_params_, pprint=True)
self.best_model_ = search.best_estimator_
if save_results_path:
results = pd.DataFrame(search.cv_results_)
results = results.sort_values(by='mean_test_score', ascending=False)
results.to_csv(save_results_path, index=False)
def load_model(self, filename):
"""
Load a previously trained model from disk.
Parameters
----------
filename : string
Location of the file to read.
"""
self.print_message('Loading model...')
t0 = time.time()
if self.package in ['sklearn', 'xgboost', 'prophet']:
model_file = open(filename, 'rb')
self.model = pickle.load(model_file)
model_file.close()
elif self.package == 'keras':
from keras.models import load_model
self.model.model = load_model(filename)
else:
raise Exception('Package not supported.')
t1 = time.time()
self.print_message('Model loaded in {0:3f} s.'.format(t1 - t0))
def save_model(self, filename):
"""
Persist a trained model to disk.
Parameters
----------
filename : string
Location of the file to write. Scikit-learn, XGBoost, and Prophet use
pickle to write to disk (use .pkl extension for clarity) while Keras has a
built-in save function that uses the HDF5 file format, so Keras models must
have a .h5 extension.
"""
self.print_message('Saving model...')
t0 = time.time()
if self.package in ['sklearn', 'xgboost', 'prophet']:
model_file = open(filename, 'wb')
pickle.dump(self.model, model_file)
model_file.close()
elif self.package == 'keras':
if hasattr(self.model, 'model'):
self.model.model.save(filename)
else:
raise Exception('Keras model must be fit before saving.')
else:
raise Exception('Package not supported.')
t1 = time.time()
self.print_message('Model saved in {0:3f} s.'.format(t1 - t0))
| apache-2.0 |
niltonlk/nest-simulator | doc/userdoc/guides/spatial/user_manual_scripts/layers.py | 17 | 11076 | # -*- coding: utf-8 -*-
#
# layers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Run as python3 layers.py > layers.log
import matplotlib.pyplot as plt
import nest
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(1234567)
def beautify_layer(layer, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
ctr = layer.spatial['center']
ext = layer.spatial['extent']
if xticks is None:
if 'shape' in layer.spatial:
dx = float(ext[0]) / layer.spatial['shape'][0]
dy = float(ext[1]) / layer.spatial['shape'][1]
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
layer.spatial['shape'][0])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
layer.spatial['shape'][1])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_axisbelow(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
# --------------------------------------------------
nest.ResetKernel()
#{ layer1 #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[5, 5]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(ax.text(0.65, 0.4 - r * 0.2, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(ax.text(-0.4 + r * 0.2, 0.65, str(r),
horizontalalignment='center',
verticalalignment='center'))
# For bbox_extra_artists, see
# https://github.com/matplotlib/matplotlib/issues/351
# plt.savefig('../user_manual_figures/layer1.png', bbox_inches='tight',
# bbox_extra_artists=tx)
print("#{ layer1s.log #}")
#{ layer1s #}
print(layer.spatial)
#{ end #}
print("#{ end.log #}")
print("#{ layer1p.log #}")
#{ layer1p #}
nest.PrintNodes()
#{ end #}
print("#{ end.log #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer2 #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
extent=[2.0, 0.5]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(fig.gca().text(1.25, 0.2 - r * 0.1, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(fig.gca().text(-0.8 + r * 0.4, 0.35, str(r),
horizontalalignment='center',
verticalalignment='center'))
# See https://github.com/matplotlib/matplotlib/issues/351
plt.savefig('../user_manual_figures/layer2.png', bbox_inches='tight',
bbox_extra_artists=tx)
# --------------------------------------------------
nest.ResetKernel()
#{ layer3 #}
layer1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[5, 5]))
layer2 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
center=[-1., 1.]))
layer3 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
center=[1.5, 0.5]))
#{ end #}
fig = nest.PlotLayer(layer1, nodesize=50)
nest.PlotLayer(layer2, nodesize=50, nodecolor='g', fig=fig)
nest.PlotLayer(layer3, nodesize=50, nodecolor='r', fig=fig)
beautify_layer(layer1, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-1.6, 2.1], ylim=[-0.6, 1.6],
xticks=np.arange(-1.4, 2.05, 0.2),
yticks=np.arange(-0.4, 1.45, 0.2))
plt.savefig('../user_manual_figures/layer3.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer3a #}
nx, ny = 5, 3
d = 0.1
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[nx, ny],
extent=[nx * d, ny * d],
center=[nx * d / 2., 0.]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=100)
plt.plot(0, 0, 'x', markersize=20, c='k', mew=3)
plt.plot(nx * d / 2, 0, 'o', markersize=20, c='k', mew=3, mfc='none',
zorder=100)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xticks=np.arange(0., 0.501, 0.05),
yticks=np.arange(-0.15, 0.151, 0.05),
xlim=[-0.05, 0.55], ylim=[-0.2, 0.2])
plt.savefig('../user_manual_figures/layer3a.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4 #}
pos = nest.spatial.free(pos=nest.random.uniform(min=-0.5, max=0.5),
num_dimensions=2)
layer = nest.Create('iaf_psc_alpha', 50,
positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.55], ylim=[-0.55, 0.55],
xticks=[-0.5, 0., 0.5], yticks=[-0.5, 0., 0.5])
plt.savefig('../user_manual_figures/layer4.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4b #}
pos = nest.spatial.free(pos=[[-0.5, -0.5], [-0.25, -0.25], [0.75, 0.75]])
layer = nest.Create('iaf_psc_alpha', positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.80], ylim=[-0.55, 0.80],
xticks=[-0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.],
yticks=[-0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.])
plt.savefig('../user_manual_figures/layer4b.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d #}
pos = nest.spatial.free(nest.random.uniform(min=-0.5, max=0.5),
num_dimensions=3)
layer = nest.Create('iaf_psc_alpha', 200, positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d_b #}
pos = nest.spatial.grid(shape=[4, 5, 6])
layer = nest.Create('iaf_psc_alpha', positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d_b.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ player #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 1],
extent=[5., 1.],
edge_wrap=True))
#{ end #}
# fake plot with layer on line and circle
clist = [(0, 0, 1), (0.35, 0, 1), (0.6, 0, 1), (0.8, 0, 1), (1.0, 0, 1)]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1.scatter(range(1, 6), [0] * 5, s=200, c=clist)
ax1.set_xlim([0, 6])
ax1.set_ylim([-0.5, 1.25])
ax1.set_aspect('equal', 'box')
ax1.set_xticks([])
ax1.set_yticks([])
for j in range(1, 6):
ax1.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax1a = fig.add_subplot(223)
ax1a.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1a.scatter(range(1, 6), [0] * 5, s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax1a.set_xlim([0, 6])
ax1a.set_ylim([-0.5, 1.25])
ax1a.set_aspect('equal', 'box')
ax1a.set_xticks([])
ax1a.set_yticks([])
for j in range(1, 6):
ax1a.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax2 = fig.add_subplot(122)
phic = np.arange(0., 2 * np.pi + 0.5, 0.1)
r = 5. / (2 * np.pi)
ax2.plot(r * np.cos(phic), r * np.sin(phic), 'k-', lw=2)
phin = np.arange(0., 4.1, 1.) * 2 * np.pi / 5
ax2.scatter(r * np.sin(phin), r * np.cos(phin), s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax2.set_xlim([-1.3, 1.3])
ax2.set_ylim([-1.2, 1.2])
ax2.set_aspect('equal', 'box')
ax2.set_xticks([])
ax2.set_yticks([])
for j in range(5):
ax2.text(1.4 * r * np.sin(phin[j]), 1.4 * r * np.cos(phin[j]),
str('(%d,0)' % (j + 1 - 3)),
horizontalalignment='center', verticalalignment='center')
plt.savefig('../user_manual_figures/player.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer6 #}
layer1 = nest.Create('iaf_cond_alpha',
positions=nest.spatial.grid(shape=[2, 1]))
layer2 = nest.Create('poisson_generator',
positions=nest.spatial.grid(shape=[2, 1]))
#{ end #}
print("#{ layer6 #}")
nest.PrintNodes()
print("#{ end #}")
# --------------------------------------------------
nest.ResetKernel()
#{ vislayer #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[21, 21]))
probability_param = nest.spatial_distributions.gaussian(nest.spatial.distance, std=0.15)
conndict = {'rule': 'pairwise_bernoulli',
'p': probability_param,
'mask': {'circular': {'radius': 0.4}}}
nest.Connect(layer, layer, conndict)
fig = nest.PlotLayer(layer, nodesize=80)
ctr = nest.FindCenterElement(layer)
nest.PlotTargets(ctr, layer, fig=fig,
mask=conndict['mask'], probability_parameter=probability_param,
src_size=250, tgt_color='red', tgt_size=20, mask_color='red',
probability_cmap='Greens')
#{ end #}
plt.savefig('../user_manual_figures/vislayer.png', bbox_inches='tight')
| gpl-2.0 |
dotpmrcunha/gnuradio | gr-filter/examples/interpolate.py | 58 | 8816 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq2, 0.5)
self.signal = blocks.add_cc()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = blocks.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = blocks.vector_sink_c()
self.snk2 = blocks.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
akrherz/iem | htdocs/plotting/auto/scripts/p8.py | 1 | 3318 | """ Monthly precip reliability"""
import calendar
import datetime
import psycopg2.extras
import numpy as np
import pandas as pd
from pyiem import network
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
y2 = datetime.date.today().year
y1 = y2 - 20
desc["arguments"] = [
dict(
type="station",
name="station",
default="IA0200",
label="Select Station:",
network="IACLIMATE",
),
dict(type="year", name="syear", default=y1, label="Enter Start Year:"),
dict(
type="year",
name="eyear",
default=y2,
label="Enter End Year (inclusive):",
),
dict(
type="int",
name="threshold",
default="80",
label="Threshold Percentage [%]:",
),
]
desc["data"] = True
desc[
"description"
] = """This plot presents the frequency of having
a month's preciptation at or above some threshold. This threshold
is compared against the long term climatology for the site and month. This
plot is designed to answer the question about reliability of monthly
precipitation for a period of your choice. """
return desc
def plotter(fdict):
""" Go """
coop = get_dbconn("coop")
cursor = coop.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
syear = ctx["syear"]
eyear = ctx["eyear"]
threshold = ctx["threshold"]
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
cursor.execute(
f"""
with months as (
select year, month, p, avg(p) OVER (PARTITION by month) from (
select year, month, sum(precip) as p from {table}
where station = %s and year < extract(year from now())
GROUP by year, month) as foo)
SELECT month, sum(case when p > (avg * %s / 100.0) then 1 else 0 end)
from months WHERE year >= %s and year < %s
GROUP by month ORDER by month ASC
""",
(station, threshold, syear, eyear),
)
vals = []
years = float(1 + eyear - syear)
for row in cursor:
vals.append(row[1] / years * 100.0)
if not vals:
raise NoDataFound("No Data Found!")
df = pd.DataFrame(
dict(freq=pd.Series(vals, index=range(1, 13))),
index=pd.Series(range(1, 13), name="month"),
)
(fig, ax) = plt.subplots(1, 1)
ax.bar(np.arange(1, 13), vals, align="center")
ax.set_xticks(np.arange(1, 13))
ax.set_ylim(0, 100)
ax.set_yticks(np.arange(0, 101, 10))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.grid(True)
ax.set_xlim(0.5, 12.5)
ax.set_ylabel("Percentage of Months, n=%.0f years" % (years,))
ax.set_title(
(
"%s [%s] Monthly Precipitation Reliability\n"
"Period: %s-%s, %% of Months above %s%% of Long Term Avg"
)
% (nt.sts[station]["name"], station, syear, eyear, threshold)
)
return fig, df
if __name__ == "__main__":
plotter(dict())
| mit |
janhahne/nest-simulator | pynest/examples/spatial/grid_iaf.py | 20 | 1437 | # -*- coding: utf-8 -*-
#
# grid_iaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create a population of iaf_psc_alpha neurons on a 4x3 grid
-----------------------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
l1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[4, 3], extent=[2., 1.5]))
nest.PrintNodes()
nest.PlotLayer(l1, nodesize=50)
# beautify
plt.axis([-1.0, 1.0, -0.75, 0.75])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
plt.axes().set_yticks((-0.5, 0, 0.5))
plt.grid(True)
plt.xlabel('4 Columns, Extent: 1.5')
plt.ylabel('2 Rows, Extent: 1.0')
plt.show()
# plt.savefig('grid_iaf.png')
| gpl-2.0 |
mdjurfeldt/nest-simulator | examples/neuronview/neuronview.py | 13 | 10676 | # -*- coding: utf-8 -*-
#
# neuronview.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk # noqa
import pango # noqa
import gobject # noqa
from matplotlib.figure import Figure # noqa
from matplotlib.backends.backend_gtkagg import \
FigureCanvasGTKAgg as FigureCanvas # noqa
import matplotlib.gridspec as gridspec # noqa
import os # noqa
import nest # noqa
default_neuron = "iaf_psc_alpha"
default_stimulator = "dc_generator"
class Main():
def __init__(self):
self._gladefile = "neuronview.glade"
self._builder = gtk.Builder()
self._builder.add_from_file(self._gladefile)
self._builder.connect_signals(self)
self._win = self._builder.get_object("mainwindow")
self._win.resize(900, 700)
box = self._builder.get_object("box5")
self._stimulatordictview = DictView()
self._builder.get_object("scrolledwindow2").add(
self._stimulatordictview)
box = self._builder.get_object("box4")
self._neurondictview = DictView()
self._builder.get_object("scrolledwindow3").add(self._neurondictview)
self.populate_comboboxes()
self._figure = Figure(figsize=(5, 4), dpi=100)
canvas = FigureCanvas(self._figure)
canvas.set_size_request(200, 250)
canvas.show()
box = self._builder.get_object("box3")
bg_style = box.get_style().bg[gtk.STATE_NORMAL]
gtk_color = (bg_style.red_float, bg_style.green_float,
bg_style.blue_float)
self._figure.set_facecolor(gtk_color)
box.pack_start(canvas)
self._win.show()
gtk.main()
def update_figure(self, spikes, potentials):
if nest.GetKernelStatus("time") != 0.0:
self._figure.clear()
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
ax0 = self._figure.add_subplot(gs[0])
ax0.plot(spikes[0]["times"], [1] * len(spikes[0]["times"]), ".")
ax0.set_yticks([])
ax0.set_xticks([])
ax1 = self._figure.add_subplot(gs[1])
ax1.plot(potentials[0]["times"], potentials[0]["V_m"], "r-")
ax1.set_ylabel("$V_m$ (mV)")
ax1.set_xlabel("time (s)")
# plt.tight_layout()
self._figure.canvas.draw()
def filter_statusdict(self, params):
for key in ["archiver_length", "available", "capacity",
"elementsize", "frozen", "global_id",
"instantiations", "is_refractory", "local",
"model", "element_type", "offset", "origin",
"receptor_types", "recordables",
"refractory_input", "rmax", "state", "t_spike",
"thread", "tlast", "tspike", "type_id", "vp",
"ymod"]:
if key in params.keys():
params.pop(key)
def populate_comboboxes(self):
neuronmodels = self._builder.get_object("neuronmodels")
neuronmodelsliststore = neuronmodels.get_model()
stimulatormodels = self._builder.get_object("stimulatormodels")
stimulatormodelsliststore = stimulatormodels.get_model()
neuron_it = None
stimulator_it = None
models = nest.Models("nodes")
models = [x for x in models if
x not in ["correlation_detector", "sli_neuron",
"iaf_psc_alpha_norec", "parrot_neuron",
"parrot_neuron_ps"]]
for entry in models:
try:
entrytype = nest.GetDefaults(entry)["element_type"]
except:
entrytype = "unknown"
if entrytype == "neuron":
it = neuronmodelsliststore.append([entry])
if entry == default_neuron:
neuron_it = it
elif entrytype == "stimulator":
it = stimulatormodelsliststore.append([entry])
if entry == default_stimulator:
stimulator_it = it
cell = gtk.CellRendererText()
neuronmodels.pack_start(cell, True)
neuronmodels.add_attribute(cell, 'text', 0)
neuronmodels.set_active_iter(neuron_it)
stimulatormodels.pack_start(cell, True)
stimulatormodels.add_attribute(cell, 'text', 0)
stimulatormodels.set_active_iter(stimulator_it)
docviewcombo = self._builder.get_object("docviewcombo")
docviewcomboliststore = docviewcombo.get_model()
docviewcomboliststore.append(["Stimulating device"])
it = docviewcomboliststore.append(["Neuron"])
docviewcombo.pack_start(cell, True)
docviewcombo.add_attribute(cell, 'text', 0)
docviewcombo.set_active_iter(it)
def get_help_text(self, name):
nest.sli_run("statusdict /prgdocdir get")
docdir = nest.sli_pop()
helptext = "No documentation available"
for subdir in ["cc", "sli"]:
filename = os.path.join(docdir, "help", subdir, name + ".hlp")
if os.path.isfile(filename):
helptext = open(filename, 'r').read()
return helptext
def on_model_selected(self, widget):
liststore = widget.get_model()
model = liststore.get_value(widget.get_active_iter(), 0)
statusdict = nest.GetDefaults(model)
self.filter_statusdict(statusdict)
if widget == self._builder.get_object("neuronmodels"):
self._neurondictview.set_params(statusdict)
if widget == self._builder.get_object("stimulatormodels"):
self._stimulatordictview.set_params(statusdict)
self.on_doc_selected(self._builder.get_object("docviewcombo"))
def on_doc_selected(self, widget):
liststore = widget.get_model()
doc = liststore.get_value(widget.get_active_iter(), 0)
docview = self._builder.get_object("docview")
docbuffer = gtk.TextBuffer()
if doc == "Neuron":
combobox = self._builder.get_object("neuronmodels")
if doc == "Stimulating device":
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
model = liststore.get_value(combobox.get_active_iter(), 0)
docbuffer.set_text(self.get_help_text(model))
docview.set_buffer(docbuffer)
docview.modify_font(pango.FontDescription("monospace 10"))
def on_simulate_clicked(self, widget):
nest.ResetKernel()
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
stimulatormodel = liststore.get_value(combobox.get_active_iter(), 0)
params = self._stimulatordictview.get_params()
stimulator = nest.Create(stimulatormodel, params=params)
combobox = self._builder.get_object("neuronmodels")
liststore = combobox.get_model()
neuronmodel = liststore.get_value(combobox.get_active_iter(), 0)
neuron = nest.Create(neuronmodel,
params=self._neurondictview.get_params())
weight = self._builder.get_object("weight").get_value()
delay = self._builder.get_object("delay").get_value()
nest.Connect(stimulator, neuron, weight, delay)
sd = nest.Create("spike_detector", params={"record_to": ["memory"]})
nest.Connect(neuron, sd)
vm = nest.Create("voltmeter", params={"record_to": ["memory"],
"interval": 0.1})
nest.Connect(vm, neuron)
simtime = self._builder.get_object("simtime").get_value()
nest.Simulate(simtime)
self.update_figure(nest.GetStatus(sd, "events"),
nest.GetStatus(vm, "events"))
def on_delete_event(self, widget, event):
self.on_quit(widget)
return True
def on_quit(self, project):
self._builder.get_object("mainwindow").hide()
gtk.main_quit()
class DictView(gtk.TreeView):
def __init__(self, params=None):
gtk.TreeView.__init__(self)
if params:
self.params = params
self.repopulate()
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name", renderer, text=1)
self.append_column(column)
renderer = gtk.CellRendererText()
renderer.set_property("mode", gtk.CELL_RENDERER_MODE_EDITABLE)
renderer.set_property("editable", True)
column = gtk.TreeViewColumn("Value", renderer, text=2)
self.append_column(column)
self.set_size_request(200, 150)
renderer.connect("edited", self.check_value)
self.show()
def repopulate(self):
model = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_STRING,
gobject.TYPE_STRING)
for key in sorted(self.params.keys()):
pos = model.insert_after(None, None)
data = {"key": key, "element_type": type(self.params[key])}
model.set_value(pos, 0, data)
model.set_value(pos, 1, str(key))
model.set_value(pos, 2, str(self.params[key]))
self.set_model(model)
def check_value(self, widget, path, new_text):
model = self.get_model()
data = model[path][0]
try:
typename = data["element_type"].__name__
new_value = eval("%s('%s')" % (typename, new_text))
if typename == "bool" and new_text.lower() in ["false", "0"]:
new_value = False
self.params[data["key"]] = new_value
model[path][2] = str(new_value)
except ValueError:
old_value = self.params[data["key"]]
model[path][2] = str(old_value)
def get_params(self):
return self.params
def set_params(self, params):
self.params = params
self.repopulate()
if __name__ == "__main__":
Main()
| gpl-2.0 |
dmnfarrell/epitopepredict | epitopepredict/utilities.py | 2 | 6016 | #!/usr/bin/env python
"""
Utilities for epitopepredict
Created March 2013
Copyright (C) Damien Farrell
"""
from __future__ import absolute_import, print_function
import os, math, csv, string
import shutil
import numpy as np
import pandas as pd
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio import PDB
home = os.path.expanduser("~")
def venndiagram(names, labels, ax=None, colors=('r','g','b'), **kwargs):
"""Plot a venn diagram"""
from matplotlib_venn import venn2,venn3
import pylab as plt
f=None
if ax==None:
f=plt.figure(figsize=(4,4))
ax=f.add_subplot(111)
if len(names)==2:
n1,n2=names
v = venn2([set(n1), set(n2)], set_labels=labels, set_colors=colors, **kwargs)
elif len(names)==3:
n1,n2,n3=names
v = venn3([set(n1), set(n2), set(n3)], set_labels=labels, set_colors=colors, **kwargs)
ax.axis('off')
#f.patch.set_visible(False)
ax.set_axis_off()
return f
def compress(filename, remove=False):
"""Compress a file with gzip"""
import gzip
fin = open(filename, 'rb')
fout = gzip.open(filename+'.gz', 'wb')
fout.writelines(fin)
fout.close()
fin.close()
if remove == True:
os.remove(filename)
return
def rmse(ar1, ar2):
"""Mean squared error"""
ar1 = np.asarray(ar1)
ar2 = np.asarray(ar2)
dif = ar1 - ar2
dif *= dif
return np.sqrt(dif.sum()/len(ar1))
def add_dicts(a, b):
return dict((n, a.get(n, 0)+b.get(n, 0)) for n in set(a)|set(b))
def copyfile(source, dest, newname=None):
"""Helper method to copy files"""
if not os.path.exists(source):
#print 'no such file %s' %source
return False
shutil.copy(source, newname)
dest = os.path.join(dest, newname)
if os.path.exists(dest):
os.remove(dest)
shutil.move(newname, dest)
return True
def copyfiles(path, files):
for f in files:
src = os.path.join(path, f)
print (src)
if not os.path.exists(src):
return False
shutil.copy(src, f)
return True
def symmetrize(m, lower=True):
"""Return symmetric array"""
m=m.fillna(0)
if lower==True:
return np.tril(m) + np.triu(m.T) - np.diag(np.diag(m))
else:
return np.triu(m) + np.tril(m.T) - np.diag(np.diag(m))
def get_symmetric_data_frame(m):
x = symmetrize(m)
return pd.DataFrame(x, columns=m.columns,index=m.index)
def find_filefrom_string(files, string):
for f in files:
if string in os.path.splitext(f)[0]:
return f
return ''
def find_files(path, ext='txt'):
"""List files in a dir of a specific type"""
if not os.path.exists(path):
print ('no such directory: %s' %path)
return []
files=[]
for dirname, dirnames, filenames in os.walk(path):
for f in filenames:
name = os.path.join(dirname, f)
if f.endswith(ext):
files.append(name)
return files
def find_folders(path):
if not os.path.exists(path):
print ('no such directory: %s' %path)
return []
dirs = []
for dirname, dirnames, filenames in os.walk(path):
dirs.append(dirname)
return dirs
def reorder_filenames(files, order):
"""reorder filenames by another list order(seqs)"""
new = []
for i in order:
found=False
for f in files:
if i in f:
new.append(f)
found=True
if found==False:
new.append('')
return new
def read_iedb(filename, key='Epitope ID'):
"""Load iedb peptidic csv file and return dataframe"""
#cr = csv.reader(open(filename,'r'))
cr = csv.DictReader(open(filename,'r'),quotechar='"')
cr.fieldnames = [field.strip() for field in cr.fieldnames]
D={}
for r in cr:
k = r[key]
D[k] = r
return D
def get_sequencefrom_pdb(pdbfile, chain='C', index=0):
"""Get AA sequence from PDB"""
parser = PDB.PDBParser(QUIET=True)
struct = parser.get_structure(pdbfile,pdbfile)
ppb = PDB.PPBuilder()
model = struct[0]
peptides = ppb.build_peptides(model[chain])
seq=''
for i,pep in enumerate(peptides):
seq+=str(pep.get_sequence())
return seq
def filter_iedb_file(filename, field, search):
"""Return filtered iedb data"""
X = pd.read_csv(filename)
cols = ['PubMed ID','Author','Journal','Year','T Cell ID','MHC Allele Name',
'Epitope Linear Sequence','Epitope Source Organism Name']
y = X[X[field].str.contains(search)]
print (y[cols])
y.to_csv('filtered.csv',cols=cols)
return y
def search_pubmed(term, max_count=100):
from Bio import Entrez
from Bio import Medline
def fetch_details(id_list):
ids = ','.join(id_list)
Entrez.email = '[email protected]'
handle = Entrez.efetch(db='pubmed',
retmode='xml',
id=ids)
results = Entrez.read(handle)
return results
def search(query):
Entrez.email = '[email protected]'
handle = Entrez.esearch(db='pubmed',
sort='relevance',
retmax=max_count,
retmode='xml',
term=query)
results = Entrez.read(handle)
return results
results = search(term)
id_list = results['IdList']
papers = fetch_details(id_list)
for i, paper in enumerate(papers):
print("%d) %s" % (i+1, paper['MedlineCitation']['Article']['ArticleTitle']))
# Pretty print the first paper in full to observe its structure
#import json
#print(json.dumps(papers[0], indent=2, separators=(',', ':')))
def test():
sourcefasta = os.path.join(home,'dockingdata/fastafiles/1KLU.fasta')
findClosestStructures(sourcefasta)
#fetchPDBList('MHCII_homologs.csv')
if __name__ == '__main__':
test()
| apache-2.0 |
fbagirov/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
hvanwyk/drifter | src/grid/mesh.py | 1 | 15658 | from grid.cell import Cell
from grid.vertex import Vertex
from grid.triangle import Triangle
import numpy
import matplotlib.pyplot as plt
class Mesh(object):
'''
Description: (Quad) Mesh object
Attributes:
bounding_box: [xmin, xmax, ymin, ymax]
children: Cell, list of cells contained in mesh
vertex_list: Vertex, list of vertices (run number_vertices)
connectivity: int, numpy array - element connectivity matrix (run build_connectivity)
max_depth: int, maximum number of times each of the mesh's cell can be refined
balanced: bool, true if mesh is balanced.
Methods:
'''
def __init__(self, box=[0.,1.,0.,1.], nx=2, ny=2):
'''
Description: Constructor, initialize rectangular grid
Inputs:
box: double, boundary vertices of rectangular grid, box = [x_min, x_max, y_min, y_max]
nx: int, number of cells in x-direction
ny: int, number of cells in y-direction
type: 'MESH'
'''
self.bounding_box = box
self.type = 'MESH'
self.children_array_size = (nx,ny)
#
# Define cells in mesh
#
xmin, xmax, ymin, ymax = box
x = numpy.linspace(xmin, xmax, nx+1)
y = numpy.linspace(ymin, ymax, ny+1)
mesh_cells = {}
for i in range(nx):
for j in range(ny):
if i == 0 and j == 0:
v_sw = Vertex((x[i] ,y[j] ))
v_se = Vertex((x[i+1],y[j] ))
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = Vertex((x[i] ,y[j+1]))
elif i > 0 and j == 0:
v_se = Vertex((x[i+1],y[j] ))
v_ne = Vertex((x[i+1],y[j+1]))
v_sw = mesh_cells[i-1,j].vertices['SE']
v_nw = mesh_cells[i-1,j].vertices['NE']
elif i == 0 and j > 0:
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = Vertex((x[i] ,y[j+1]))
v_sw = mesh_cells[i,j-1].vertices['NW']
v_se = mesh_cells[i,j-1].vertices['NE']
elif i > 0 and j > 0:
v_ne = Vertex((x[i+1],y[j+1]))
v_nw = mesh_cells[i-1,j].vertices['NE']
v_sw = mesh_cells[i,j-1].vertices['NW']
v_se = mesh_cells[i,j-1].vertices['NE']
cell_vertices = {'SW': v_sw, 'SE': v_se, 'NE': v_ne, 'NW': v_nw}
cell_address = [i,j]
mesh_cells[i,j] = Cell(cell_vertices, self, cell_address)
self.children = mesh_cells
self.vertex_list = []
self.connectivity = None
self.max_depth = 0
self.__num_vertices = 0
self.__num_cells = 0
self.__balanced = False
self.__triangles = []
def leaves(self):
"""
Description: Returns a list of all leaf sub-cells of the mesh
Input:
group: string, optional sorting criterium (None, or 'depth')
Output:
leaves: list of LEAF cells
"""
#
# All leaves go in a long list
#
leaves = []
for child in self.children.itervalues():
leaves.extend(child.find_leaves())
self.__num_cells = len(leaves)
return leaves
def triangles(self):
"""
Returns a list of triangles
"""
if len(self.__triangles) == 0:
#
# Mesh has not been triangulated yet
#
self.triangulate()
return self.__triangles
else:
#
# Mesh triangulated
#
return self.__triangles
def vertices(self):
"""
Returns a list of vertices.
POSSIBLE BUG: if vertex has been marked outside of
this function, it will not show up in the list.
"""
n_vertices = -1
vertices = []
for leaf in self.leaves():
for v in leaf.vertices.itervalues():
if not v.is_marked():
n_vertices += 1
vertices.append(v)
v.set_node_number(n_vertices)
#
# Mark vertices in the list
#
v.mark()
self.__num_vertices = n_vertices
#
# Unmark all vertices again
#
for v in vertices:
v.unmark()
def cells_at_depth(self, depth):
"""
Return all cells at a given depth > 0
"""
cells = []
for child in self.children.itervalues():
cells.extend(child.cells_at_depth(depth))
return cells
def has_children(self):
"""
Determine whether the mesh has children
"""
return any(child != None for child in self.children.itervalues())
def get_max_depth(self):
"""
Determine the maximum depth of the mesh
"""
def unmark_all(self):
"""
Unmark all cells in mesh
"""
if self.has_children():
for child in self.children.itervalues():
child.unmark_all()
def refine(self):
"""
Refine mesh by splitting marked cells.
"""
leaves = self.leaves()
for leaf in leaves:
if leaf.flag:
leaf.split()
leaf.unmark()
self.__balanced = False
def coarsen(self):
"""
Coarsen mesh by collapsing marked cells
"""
leaves = self.leaves()
for leaf in leaves:
parent = leaf.parent
if parent.flag:
parent.children.clear()
self.remove_supports()
self.__balanced = False
def balance_tree(self):
"""
Ensure the 2:1 rule holds
"""
leaves = self.leaves()
leaf_dict = {'N': ['SE', 'SW'], 'S': ['NE', 'NW'],
'E': ['NW', 'SW'], 'W': ['NE', 'SE']}
while len(leaves) > 0:
leaf = leaves.pop()
flag = False
#
# Check if leaf needs to be split
#
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb == None:
pass
elif nb.type == 'LEAF':
pass
else:
for pos in leaf_dict[direction]:
#
# If neighor's children nearest to you aren't LEAVES,
# then split and add children to list of leaves!
#
if nb.children[pos].type != 'LEAF':
leaf.mark()
leaf.split()
for child in leaf.children.itervalues():
child.mark_support_cell()
leaves.append(child)
#
# Check if there are any neighbors that should
# now also be split.
#
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb != None and nb.depth < leaf.depth:
leaves.append(nb)
flag = True
break
if flag:
break
self.__balanced = True
def remove_supports(self):
"""
Remove the supporting cells
"""
leaves = self.leaves()
while len(leaves) > 0:
leaf = leaves.pop()
if leaf.support_cell:
#
# Check whether its safe to delete the support cell
#
safe_to_coarsen = True
for direction in ['N', 'S', 'E', 'W']:
nb = leaf.find_neighbor(direction)
if nb.has_children():
safe_to_coarsen = False
break
if safe_to_coarsen:
parent = leaf.parent
for child in parent.children.itervalues():
#
# Delete cells individually
#
del child
parent.children.clear()
leaves.append(parent)
self.__balanced = False
def triangulate(self):
"""
Generate triangulation of mesh:
balance if necessary
populate cells with triangles
generate connectivity matrix.
#TODO: unfinished
"""
triangles = []
if not self.__balanced:
#
# Balance mesh first
#
self.balance_tree()
for leaf in self.leaves():
v = leaf.vertices
#
# Determine whether Steiner Point is necessary
#
if any([v.has_key(direction) for direction in ['N','S','E','W']]):
#
# Add Steiner vertex
#
x0, x1, y0, y1 = leaf.box()
vm = Vertex((0.5*(x0 + x1), 0.5*(y0 + y1)))
leaf.vertices['M'] = vm
sub_edge_dict = {'S': ['SW','S','SE'], \
'E': ['NE','E','SE'], \
'N': ['NE','N','NW'], \
'W': ['NW','W','SW']}
for direction in ['S','E','N','W']:
se = sub_edge_dict[direction]
if v.has_key(direction):
#
# Midpoint on this edge
#
tri = [Triangle([v[se[0]],v[se[1]],vm],parent_cell=leaf),
Triangle([v[se[1]],v[se[2]],vm],parent_cell=leaf)]
else:
#
# No midpoint
#
tri = [Triangle([v[se[0]],v[se[2]],vm],parent_cell=leaf)]
triangles.extend(tri)
else:
#
# No Steiner vertex - simple triangulation
#
tri = [Triangle([v['SW'],v['SE'],v['NE']], parent_cell=leaf), \
Triangle([v['NE'],v['NW'],v['SW']], parent_cell=leaf)]
triangles.extend(tri)
self.__triangles = triangles
def build_connectivity(self):
"""
Returns the connectivity matrix for the tree
"""
# TODO: FIX build_connectivity
econn = []
num_vertices = len(self.vertex_list)
#
# Balance tree first
#
#self.balance_tree()
for leaf in self.leaves():
add_steiner_pt = False
#
# Get global indices for each corner vertex
#
gi = {}
for pos in ['NW', 'SW', 'NE', 'SE']:
gi[pos] = leaf.vertices[pos].node_number
edges = {'S': [[gi['SW'], gi['SE']]], 'N': [[gi['NE'], gi['NW']]],
'W': [[gi['NW'], gi['SW']]], 'E': [[gi['SE'], gi['NE']]] }
opposite_direction = {'N': 'S', 'S': 'N', 'W': 'E', 'E': 'W'}
for direction in ['S', 'N', 'E', 'W']:
neighbor = leaf.find_neighbor(direction)
if neighbor != None and neighbor.type != 'LEAF':
# If neighbor has children, then add the midpoint to
# your list of vertices, update the list of edges and
# remember to add the Steiner point later on.
#
od = opposite_direction[direction]
leaf.vertices[direction] = neighbor.vertices[od]
gi[direction] = leaf.vertices[direction].node_number
add_steiner_pt = True
edges[direction] = [[edges[direction][0][0], gi[direction]],
[gi[direction], edges[direction][0][1]]]
#
# Add the Triangles to connectivity
#
if not add_steiner_pt:
#
# Simple Triangulation
#
econn.extend([[gi['SW'], gi['SE'], gi['NE']],
[gi['NE'], gi['NW'], gi['SW']]] )
elif not leaf.vertices.has_key('M') or leaf.vertices['M'] == None:
#
# Add Steiner Vertex
#
x0, x1, y0, y1 = leaf.box()
vm = Vertex((0.5*(x0 + x1), 0.5*(y0 + y1)), node_number=num_vertices)
leaf.vertices['M'] = vm
gi['M'] = vm.node_number
self.vertex_list.append(vm)
num_vertices += 1
for direction in ['N', 'S', 'E', 'W']:
for sub_edge in edges[direction]:
econn.append([sub_edge[0], sub_edge[1], gi['M']])
return econn
def plot_quadmesh(self, ax, name=None, show=True, set_axis=True):
'''
Plot the current quadmesh
'''
if self.has_children():
if set_axis:
x0, x1, y0, y1 = self.bounding_box
hx = x1 - x0
hy = y1 - y0
ax.set_xlim(x0-0.1*hx, x1+0.1*hx)
ax.set_ylim(y0-0.1*hy, y1+0.1*hy)
for child in self.children.itervalues():
ax = child.plot(ax, set_axis=False)
else:
x0, y0 = self.vertices['SW'].coordinate
x1, y1 = self.vertices['NE'].coordinate
# Plot current cell
plt.plot([x0, x0, x1, x1],[y0, y1, y0, y1],'r.')
points = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]
if self.flag:
rect = plt.Polygon(points, fc='r', edgecolor='k')
else:
rect = plt.Polygon(points, fc='w', edgecolor='k')
ax.add_patch(rect)
return ax
def plot_trimesh(self, ax):
"""
Plot triangular mesh
"""
e_conn = self.build_connectivity()
for element in e_conn:
points = []
for node_num in element:
x, y = self.vertex_list[node_num].coordinate
points.append([x,y])
triangle = plt.Polygon(points, fc='w', ec='k')
ax.add_patch(triangle)
| mit |
rubenlorenzo/fab-city-dashboard | app/views.py | 1 | 1785 | # -*- encoding: utf-8 -*-
from app import app
from flask import Flask, render_template, jsonify
import pandas as pd
import makerlabs.fablabs_io as fio
from werkzeug.routing import Rule
# import global variables for Z2N
from .scripts.app_vars import title, metas, description, subtitle, version, authors, license, static_dir, URLroot_, fabcities
# gather global names
global_names = {
'titleApp': title, # name/brand of the app
'subtitleApp': subtitle, # explanation of what the app does
'metas': metas, # meta for referencing
'description': description, # description of the app
'version': version, # explanation of what the app does
'authors': authors, # authors in metas
'license': license
}
@app.route('/')
@app.route('/index')
def index():
print '-' * 10, 'VIEW INDEX', '-' * 50
return render_template(
"index.html",
index=True,
glob=global_names, )
# Tests by massimo
@app.route("/api/cities")
def fabicites_list():
return jsonify(fabcities)
@app.route("/api/labs")
def labs_map():
labs_geojson = fio.get_labs(format="geojson")
return labs_geojson
@app.route("/oecd/regional-data")
def regional_data():
regional_data = pd.read_csv(
app.static_folder + "/data_custom/json_stats/OECD/regional.csv",
encoding="utf-8")
# return regional_data.to_html()
return regional_data.to_json(orient='records')
@app.route("/oecd/national-data")
def national_data():
national_data = pd.read_csv(
app.static_folder + "/data_custom/json_stats/OECD/national.csv",
encoding="utf-8")
# return national_data.to_html()
return national_data.to_json(orient='records')
@app.route('/viz_format')
def info():
return render_template('modules/mod_viz.html')
| agpl-3.0 |
stevenjoelbrey/PMFutures | Python/average6HourlyData.py | 1 | 14668 | #!/usr/bin/env python2
###############################################################################
# ------------------------- Description ---------------------------------------
###############################################################################
# This script will be used to generate daily met fields from hourly nc files.
# The 6-houly data to average live in /barnes-scratch/sbrey/era_interim_nc_6_hourly
# Follows ----------------------------------------
# - get_ERA_Interim_data.py
# Precedes
# - merge_yearly_nc.py
# TODO: Handle fg10 (wind gust) daily value creation.
###############################################################################
# ---------------------- Set analysis variables--------------------------------
###############################################################################
import sys
import cesm_nc_manager as cnm
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
if len(sys.argv) != 1:
print 'Using arguments passed via command line.'
hourlyVAR = str(sys.argv[1]) # e.g. 'z'
startYear = int(sys.argv[2])
endYear = int(sys.argv[3])
else:
# Development environment. Set variables by manually here.
hourlyVAR = 'fg10'
startYear = 1992
endYear = 2016
drive = cnm.getDrive()
dataDir = drive + "era_interim_nc_6_hourly"
outputDir = drive + "era_interim_nc_daily"
print '----------------------------------------------------------------------'
print 'Working on variable: ' + hourlyVAR + ' for years: ' + str(startYear) +\
'-'+ str(endYear)
print '----------------------------------------------------------------------'
# Import the required modules
import os
import numpy as np
from mpl_toolkits.basemap import Basemap, cm
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy.ma as ma
from datetime import date
from datetime import timedelta
import matplotlib.ticker as tkr
import datetime
import time as timer
import os.path
# Start the timer on this code
timeStart = timer.time()
# Loop over the selected years
if startYear == 'all':
years = ['all']
else:
years = np.arange(startYear, endYear+1)
ii = 0 # for counting total iterations
for year in years:
year = str(year)
print '---------------------------------------------------------------------'
print 'Working on : ' + year
print '---------------------------------------------------------------------'
# Load 6-hourly data
HourlyFile = os.path.join(dataDir, hourlyVAR + "_" + year + ".nc")
print HourlyFile
nc = Dataset(HourlyFile, 'r')
VAR = nc.variables[hourlyVAR]
time = nc.variables['time']
time_hour = np.array(time[:], dtype=int)
lon = nc.variables['longitude']
lat = nc.variables['latitude']
# Some variables live on (t, level, lat, lon) grids, others (t, lat, lon)
# Find out which one using dimension keys
# e.g. [u'longitude', u'latitude', u'time'] for 'sp'
# [u'longitude', u'latitude', u'level', u'time'] for 'z'
dims = nc.dimensions.keys()
if len(dims) == 4:
level = nc.variables['level']
#######################################################################
# Handle date from hourly time dimensions
#######################################################################
if time.units == 'hours since 1900-01-01 00:00:0.0':
# For time datetime array
t0 = datetime.datetime(year=1900, month=1, day=1,\
hour=0, minute=0, second=0)
# For simply getting dates
date0 = date(year=1900, month=1, day=1)
else:
raise ValueError('Unknown time origin! Code will not work.')
# Create arrays to store datetime objects
t = []
dates = []
yearList = []
monthList = []
hourList = []
for i in range(len(time_hour)):
dt = timedelta(hours=time_hour[i])
date_new = date0 + dt
t_new = t0 + dt
year_new = t_new.year
t.append(t_new)
dates.append(date_new)
yearList.append(year_new)
monthList.append(t_new.month)
hourList.append(t_new.hour)
t = np.array(t)
dates = np.array(dates)
dateYears = np.array(yearList)
dateMonths = np.array(monthList)
dateHours = np.array(hourList)
# NOTE: Accumulation parameters (total precip (tp) and e) represent
# NOTE: accumulated values from intitialization time. For these data those
# NOTE: times are 00:00:00 and 12:00:00. I downloaded the data in 12 hour steps.
# NOTE: So for these parameters, each time in the data represents a total for the
# NOTE: previous 12 hours. This is why time series start at 12:00:00 for
# NOTE: these fields and 00:00:00 for analysis fields.
# NOTE: For maximum in time period quantity, e.g. wind gust (fg10), the time step
# NOTE: is three hourly and starts at 03:00:00. The units of wind gusts are
# NOTE: "10 meter wind gusts since previous post processing".
# Get all values numpy array into workspace
print '---------------------------------------------------'
print 'Loading the large variable array into the workspace'
print '---------------------------------------------------'
VAR_array = VAR[:]
print 'Working on the large loop averaging 6-hourly values for each day'
if (hourlyVAR != 'tp') & (hourlyVAR != 'e') & (hourlyVAR != 'fg10'):
# these are the analysis variables that always require averages for a
# given calendar date.
print '---------------------------------------------------------------------'
print 'Working with an analysis parameter whos first hour is 0. '
print '---------------------------------------------------------------------'
if dateHours[0] != 0:
raise ValueError('The first hour of analysis field was not 0Z.')
# Create structure to save daily data and do the averaging
unique_dates = np.unique(dates)
nDays = len(unique_dates) # might have to do a - 1 here now. Or search for feb 29th and set length based on that.
nLon = len(lon)
nLat = len(lat)
# Create array to store daily averaged data, based on dimensions
if len(dims) == 4:
nLevel= len(level)
dailyVAR = np.zeros((nDays, nLevel, nLat, nLon))
else:
dailyVAR = np.zeros((nDays, nLat, nLon))
for i in range(nDays):
# find unique day to work on
indexMask = np.where(dates == unique_dates[i])[0]
if len(dims) == 4:
VAR_array_subset = VAR_array[indexMask, :, :, :]
day_time_mean = np.mean(VAR_array_subset, 0)
dailyVAR[i, :, : , :] = day_time_mean
else:
# Non-precip variables of this size need an average. These are analysis variables
VAR_array_subset = VAR_array[indexMask, :, :]
day_time_mean = np.mean(VAR_array_subset, 0)
dailyVAR[i, :, : ] = day_time_mean
elif (hourlyVAR == 'fg10'):
print "Handling precip. Going to follow ecmwf guidelines for getting daily max value. "
# Create structure to save daily data and do the max value finding
unique_dates = np.unique(dates)
nDays = len(unique_dates) - 1 # last day (3 hour chunk) goes into next year. Discard that data
nLon = len(lon)
nLat = len(lat)
dailyVAR = np.zeros((nDays, nLat, nLon))
for i in range(nDays):
indexMask = np.where(dates == unique_dates[i])[0]
VAR_array_subset = VAR_array[indexMask, :, :]
# find 0:6 index of maximum value in each lat lon coordinate position array
dailyMaxValArray = np.amax(VAR_array_subset, axis=0)
# TODO: ensure that this is the max value for each coord!
dailyVAR[i,:,:] = dailyMaxValArray
elif (dateHours[0] == 12) & (dateHours[-1]) == 0 & (dateYears[-1] > int(year)):
print '---------------------------------------------------------------------'
print 'Working with an accumulation parameter with start time hour == 12. '
print '---------------------------------------------------------------------'
# These strange time conditions are all true when we are working with
# tp and e accumulation forecast fields.
# Precip units of m per 12 hour window. Requires a sum NOT an average.
# Need matching dates noon and next dates midnight to get a days total.
# e.g. total precip for Jan 1 is sum of tp at 01-01-year 12:00:00 AND
# 01-02-year 00:00:00.
# the last date in the time array will be the next year, since midnight or
# 0Z.
# In order for the code to work for these variables the same as the
# analysis fields, we are going to subtract 12 hours from each time
# element.
t = t - timedelta(hours=12)
nTime = len(t)
if nTime % 2 != 0:
raise ValueError("There is something wrong. Somehow there is a date without two 23 hour chuncks. ")
nDays = len(t)/2
nLon = len(lon)
nLat = len(lat)
# To make a mask of unique dates, we need to make timetime.datetime objects
# a more simple datetime.date object.
dates = []
for i in range(nDays*2):
dates.append(t[i].date())
dates = np.array(dates)
unique_dates = np.unique(dates)
# Now that these strange time contrains have been met, we know we can
# sum the values of every other. Create an array to store daily data in.
dailyVAR = np.zeros((nDays, nLat, nLon))
for j in range(nDays):
# The hours that match our date.
indexMask = np.where(dates == unique_dates[j])[0]
if (dateHours[indexMask[0]] == 12) & (dateHours[indexMask[1]] == 0):
# Subset the dataframe to include the two twelve hour slices we
# want.
timeSlice = VAR[indexMask, :, :]
# This statement makes sure we are really getting a time slice with
# a dimension of 2, e.g. 2 12 hour segments.
if timeSlice.shape[0] == 2:
dailySum = np.sum(timeSlice, axis=0)
dailyVAR[j,:,:] = dailySum
else:
raise ValueError("The time size was not two deep in time dim.")
# if the sum of the dailyVAR array for this date is still zero,
# no data was assigned.
if np.sum(dailyVAR[j, :,:]) == 0:
raise ValueError("No data was assigned to day index j = " + str(j))
meansCompleteTime = timer.time()
dt = (meansCompleteTime - timeStart) / 60.
print '---------------------------------------------------------------------'
print 'It took ' + str(dt) + ' minutes to create daily averages.'
print '---------------------------------------------------------------------'
# Check to see if the total amount of precip was conserved.
if hourlyVAR == 'tp':
originalSum = np.sum(VAR, axis=0)
dailySum = np.sum(dailyVAR, axis=0)
dtp = np.abs(originalSum - dailySum)
# ideally dtp is all zero. With float rounding issues it could be slightly
# different. This matrix needs to be examined.
maxDiff = np.max(dtp)
print '------------------------------------------------------------------'
print 'Maximum annual difference in rainfall is: ' + str(maxDiff)
print '------------------------------------------------------------------'
if maxDiff > 1e-10:
raise ValueError("Total rainfall depth in meters not conserved within tolerance")
#print 'The min value of dailyVAR is: ' + str(np.min(dailyVAR))
#print 'The max value of dailyVAR is: ' + str(np.max(dailyVAR))
###############################################################################
# Write the new netcdf data with the exact same formatting as the
# data read here.
# Create the Save name and make sure that this file does not exist
###############################################################################
outputFile = os.path.join(outputDir, hourlyVAR + "_" + year + ".nc")
# See if the file already exists
# os.path.isfile(outPutSavename):
print '----------------------------------------------------------------------'
print 'outputFile used:'
print outputFile
print '----------------------------------------------------------------------'
###############################################################################
# Write the daily averaged netCDF data
###############################################################################
ncFile = Dataset(outputFile, 'w', format='NETCDF4')
ncFile.description = 'Daily average of 6 hourly data'
ncFile.location = 'Global'
ncFile.createDimension('time', nDays )
ncFile.createDimension('latitude', nLat )
ncFile.createDimension('longitude', nLon )
# Create variables on the dimension they live on
if len(dims) == 4:
ncFile.createDimension('level', nLevel )
dailyVAR_ = ncFile.createVariable(hourlyVAR,'f4',
('time','level','latitude','longitude'))
# While here create the level dimesion
level_ = ncFile.createVariable('level', 'i4', ('level',))
level_.units = level.units
else:
dailyVAR_ = ncFile.createVariable(hourlyVAR,
'f4',('time','latitude','longitude'))
# Assign the same units as the loaded file to the main variable
dailyVAR_.units = VAR.units
# Create time variable
time_ = ncFile.createVariable('time', 'i4', ('time',))
time_.units = time.units
time_.calendar = time.calendar
# create lat variable
latitude_ = ncFile.createVariable('latitude', 'f4', ('latitude',))
latitude_.units = lat.units
# create longitude variable
longitude_ = ncFile.createVariable('longitude', 'f4', ('longitude',))
longitude_.units = lon.units
# Write the actual data to these dimensions
dailyVAR_[:] = dailyVAR
latitude_[:] = lat[:]
longitude_[:] = lon[:]
if len(dims) == 4:
level_[:] = level[:]
# NOTE: In general, every 4th element, starting at 0th, since there
# NOTE: are 4 analysis snapshots space by 6 hours for any given date.
# NOTE: However, tp (total precip) only has two chunks of 12 hourly data per
# NOTE: day so this needs to be handled seperately. Because tp and e fields
# NOTE: time were adjusted by minus 12 hours, all daily mean or sum fields
# NOTE: have a time stamp of the 0Z for the date of the data.
# NOTE: fg divides days into 3 hour analysis periods and they start at 03:00:00Z
# NOTE: for a given date. I save the largest wind gust of those 8 3 hour analysis
# NOTE: periods. So I need to subtract 3 from time for fg in order to get date
# NOTE: for a day to be saved as a consistent 00:00:00Z time for a given date.
if (hourlyVAR == 'tp') | (hourlyVAR == 'e'):
time = time[:] - 12
elif (hourlyVAR == 'fg10'):
time = time[:] - 3
tstep = len(time) / nDays
time_[:] = time[0::tstep]
# The difference in each time_[:] element in hours must be 24 or
# this is not working.
if np.unique(np.diff(time_[:])) != 24:
ValueError('Difference in hours between days not all 24! Broken.')
# The data is written, close the ncFile and move on to the next year!
ncFile.close()
dt = (timer.time() - timeStart) / 60.
print '----------------------------------------------------------------------'
print 'It took ' + str(dt) + ' minutes to run entire script.'
print '----------------------------------------------------------------------' | mit |
dmargala/qusp | examples/analysis_prep.py | 1 | 5041 | #!/usr/bin/env python
import argparse
import numpy as np
import numpy.ma as ma
import h5py
import qusp
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
# def sum_chunk(x, chunk_size, axis=-1):
# shape = x.shape
# if axis < 0:
# axis += x.ndim
# shape = shape[:axis] + (-1, chunk_size) + shape[axis+1:]
# x = x.reshape(shape)
# return x.sum(axis=axis+1)
def combine_pixels(loglam, flux, ivar, num_combine, trim_front=True):
"""
Combines neighboring pixels of inner most axis using an ivar weighted average
"""
shape = flux.shape
num_pixels = flux.shape[-1]
assert len(loglam) == num_pixels
ndim = flux.ndim
new_shape = shape[:ndim-1] + (-1, num_combine)
num_leftover = num_pixels % num_combine
s = slice(num_leftover,None) if trim_front else slice(0,-num_leftover)
flux = flux[...,s].reshape(new_shape)
ivar = ivar[...,s].reshape(new_shape)
loglam = loglam[s].reshape(-1, num_combine)
flux, ivar = ma.average(flux, weights=ivar, axis=ndim, returned=True)
loglam = ma.average(loglam, axis=1)
return loglam, flux, ivar
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
## targets to fit
parser.add_argument("-i", "--input", type=str, default=None,
help="target list")
parser.add_argument("-o", "--output", type=str, default=None,
help="output file name")
parser.add_argument("--num-combine", type=int, default=3,
help="number of pixels to combine")
parser.add_argument("--wave-min", type=float, default=3600,
help="minimum observed wavelength")
args = parser.parse_args()
# import data
skim = h5py.File(args.input, 'r')
skim_norm = skim['norm'][:][:,np.newaxis]
assert not np.any(skim_norm <= 0)
skim_flux = np.ma.MaskedArray(skim['flux'][:], mask=skim['mask'][:])/skim_norm
skim_ivar = np.ma.MaskedArray(skim['ivar'][:], mask=skim['mask'][:])*skim_norm*skim_norm
skim_loglam = skim['loglam'][:]
skim_wave = np.power(10.0, skim_loglam)
good_waves = skim_wave > args.wave_min
print 'Combining input pixels...'
loglam, flux, ivar = combine_pixels(skim_loglam[good_waves], skim_flux[:,good_waves], skim_ivar[:,good_waves], args.num_combine)
wave = np.power(10.0, loglam)
outfile = h5py.File(args.output+'.hdf5', 'w')
# save pixel flux, ivar, and mask
outfile.create_dataset('flux', data=flux.data, compression="gzip")
outfile.create_dataset('ivar', data=ivar.data, compression="gzip")
outfile.create_dataset('mask', data=ivar.mask, compression="gzip")
# save uniform wavelength grid
outfile.create_dataset('loglam', data=loglam, compression="gzip")
# save redshifts from input target list
outfile.copy(skim['z'], 'z')
# save additional quantities
outfile.copy(skim['norm'], 'norm')
# save meta data
outfile.copy(skim['meta'], 'meta')
# copy attrs
for attr_key in skim.attrs:
outfile.attrs[attr_key] = skim.attrs[attr_key]
outfile.attrs['coeff0'] = loglam[0]
outfile.attrs['coeff1'] = args.num_combine*1e-4
outfile.attrs['max_fid_index'] = len(loglam)
outfile.attrs['wave_min'] = args.wave_min
outfile.close()
# verify combined pixels
print 'Computing mean and variance of input pixels...'
skim_flux_mean = np.ma.average(skim_flux, axis=0, weights=skim_ivar)
skim_flux_var = np.ma.average((skim_flux-skim_flux_mean)**2, axis=0, weights=skim_ivar)
print 'Computing mean and variance of combined pixels...'
flux_mean = np.ma.average(flux, axis=0, weights=ivar)
flux_var = np.ma.average((flux-flux_mean)**2, axis=0, weights=ivar)
print 'Making comparison plots...'
plt.figure(figsize=(12,9))
plt.plot(skim_wave, skim_flux_mean, label='Pipeline pixels')
plt.plot(wave, flux_mean, label='Analysis pixels')
plt.ylim(0.5, 1.5)
plt.ylabel(r'Normalized Flux Mean (arb. units)')
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.legend()
plt.grid()
plt.savefig(args.output+'-flux-mean.png', dpi=100, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12,9))
plt.plot(skim_wave, skim_flux_var, label='Pipeline pixels')
plt.plot(wave, flux_var, label='Analysis pixels')
plt.ylim(0, 0.45)
plt.ylabel('Normalized Flux Variance (arb. units)')
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.legend()
plt.grid()
plt.savefig(args.output+'-flux-var.png', dpi=100, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12,9))
plt.plot(skim_wave, np.sum(skim_ivar, axis=0), label='Pipeline pixels')
plt.plot(wave, np.sum(ivar, axis=0), label='Analysis pixels')
plt.ylabel('Inv. Var. Total (arb. units)')
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.legend()
plt.grid()
plt.savefig(args.output+'-ivar-total.png', dpi=100, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
ky822/nyu_ml_lectures | notebooks/figures/plot_digits_datasets.py | 19 | 2750 | # Taken from example in scikit-learn examples
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
def digits_plot():
digits = datasets.load_digits(n_class=6)
n_digits = 500
X = digits.data[:n_digits]
y = digits.target[:n_digits]
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 1e5:
# don't show points that are too close
# set a high threshold to basically turn this off
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
print("Computing PCA projection")
pca = decomposition.PCA(n_components=2).fit(X)
X_pca = pca.transform(X)
plot_embedding(X_pca, "Principal Components projection of the digits")
plt.figure()
plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.figure()
plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.show()
| cc0-1.0 |
rhyolight/nupic.research | projects/sequence_prediction/reberGrammar/reberSequence_CompareTMvsLSTM.py | 13 | 2320 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
plt.ion()
rcParams.update({'figure.autolayout': True})
def plotResult():
resultTM = np.load('result/reberSequenceTM.npz')
resultLSTM = np.load('result/reberSequenceLSTM.npz')
plt.figure()
plt.hold(True)
plt.subplot(2,2,1)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['correctRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['correctRateAll'],1),'-s',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate (Best Match) (%)')
plt.subplot(2,2,4)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['missRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['missRateAll'],1),'-*',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['fpRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['fpRateAll'],1),'-*',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_CompareTM&LSTMperformance.pdf')
if __name__ == "__main__":
plotResult()
| gpl-3.0 |
Intel-Corporation/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 8716 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ChanderG/scipy | scipy/spatial/_plotutils.py | 53 | 4034 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
was_held = ax.ishold()
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
ptp_bound = points.ptp(axis=0)
ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0],
points[:,0].max() + 0.1*ptp_bound[0])
ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1],
points[:,1].max() + 0.1*ptp_bound[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
ax.plot(tri.points[:,0], tri.points[:,1], 'o')
ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
for simplex in hull.simplices:
ax.plot(hull.points[simplex,0], hull.points[simplex,1], 'k-')
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
ax.plot(vor.points[:,0], vor.points[:,1], '.')
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
ax.plot(vor.vertices[simplex,0], vor.vertices[simplex,1], 'k-')
ptp_bound = vor.points.ptp(axis=0)
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
ax.plot([vor.vertices[i,0], far_point[0]],
[vor.vertices[i,1], far_point[1]], 'k--')
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
librosa/librosa | librosa/util/utils.py | 1 | 64787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions"""
import warnings
import scipy.ndimage
import scipy.sparse
import numpy as np
import numba
from numpy.lib.stride_tricks import as_strided
from .._cache import cache
from .exceptions import ParameterError
# Constrain STFT block sizes to 256 KB
MAX_MEM_BLOCK = 2 ** 8 * 2 ** 10
__all__ = [
"MAX_MEM_BLOCK",
"frame",
"pad_center",
"fix_length",
"valid_audio",
"valid_int",
"valid_intervals",
"fix_frames",
"axis_sort",
"localmax",
"localmin",
"normalize",
"peak_pick",
"sparsify_rows",
"shear",
"stack",
"fill_off_diagonal",
"index_to_slice",
"sync",
"softmask",
"buf_to_float",
"tiny",
"cyclic_gradient",
"dtype_r2c",
"dtype_c2r",
]
def frame(x, frame_length, hop_length, axis=-1):
"""Slice a data array into (overlapping) frames.
This implementation uses low-level stride manipulation to avoid
making a copy of the data. The resulting frame representation
is a new view of the same input data.
However, if the input data is not contiguous in memory, a warning
will be issued and the output will be a full copy, rather than
a view of the input data.
For example, a one-dimensional input ``x = [0, 1, 2, 3, 4, 5, 6]``
can be framed with frame length 3 and hop length 2 in two ways.
The first (``axis=-1``), results in the array ``x_frames``::
[[0, 2, 4],
[1, 3, 5],
[2, 4, 6]]
where each column ``x_frames[:, i]`` contains a contiguous slice of
the input ``x[i * hop_length : i * hop_length + frame_length]``.
The second way (``axis=0``) results in the array ``x_frames``::
[[0, 1, 2],
[2, 3, 4],
[4, 5, 6]]
where each row ``x_frames[i]`` contains a contiguous slice of the input.
This generalizes to higher dimensional inputs, as shown in the examples below.
In general, the framing operation increments by 1 the number of dimensions,
adding a new "frame axis" either to the end of the array (``axis=-1``)
or the beginning of the array (``axis=0``).
Parameters
----------
x : np.ndarray
Array to frame
frame_length : int > 0 [scalar]
Length of the frame
hop_length : int > 0 [scalar]
Number of steps to advance between frames
axis : 0 or -1
The axis along which to frame.
If ``axis=-1`` (the default), then ``x`` is framed along its last dimension.
``x`` must be "F-contiguous" in this case.
If ``axis=0``, then ``x`` is framed along its first dimension.
``x`` must be "C-contiguous" in this case.
Returns
-------
x_frames : np.ndarray [shape=(..., frame_length, N_FRAMES) or (N_FRAMES, frame_length, ...)]
A framed view of ``x``, for example with ``axis=-1`` (framing on the last dimension)::
x_frames[..., j] == x[..., j * hop_length : j * hop_length + frame_length]
If ``axis=0`` (framing on the first dimension), then::
x_frames[j] = x[j * hop_length : j * hop_length + frame_length]
Raises
------
ParameterError
If ``x`` is not an `np.ndarray`.
If ``x.shape[axis] < frame_length``, there is not enough data to fill one frame.
If ``hop_length < 1``, frames cannot advance.
If ``axis`` is not 0 or -1. Framing is only supported along the first or last axis.
See Also
--------
numpy.asfortranarray : Convert data to F-contiguous representation
numpy.ascontiguousarray : Convert data to C-contiguous representation
numpy.ndarray.flags : information about the memory layout of a numpy `ndarray`.
Examples
--------
Extract 2048-sample frames from monophonic signal with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64)
>>> frames
array([[-1.407e-03, -2.604e-02, ..., -1.795e-05, -8.108e-06],
[-4.461e-04, -3.721e-02, ..., -1.573e-05, -1.652e-05],
...,
[ 7.960e-02, -2.335e-01, ..., -6.815e-06, 1.266e-05],
[ 9.568e-02, -1.252e-01, ..., 7.397e-06, -1.921e-05]],
dtype=float32)
>>> y.shape
(117601,)
>>> frames.shape
(2048, 1806)
Or frame along the first axis instead of the last:
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64, axis=0)
>>> frames.shape
(1806, 2048)
Frame a stereo signal:
>>> y, sr = librosa.load(librosa.ex('trumpet', hq=True), mono=False)
>>> y.shape
(2, 117601)
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64)
(2, 2048, 1806)
Carve an STFT into fixed-length patches of 32 frames with 50% overlap
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = np.abs(librosa.stft(y))
>>> S.shape
(1025, 230)
>>> S_patch = librosa.util.frame(S, frame_length=32, hop_length=16)
>>> S_patch.shape
(1025, 32, 13)
>>> # The first patch contains the first 32 frames of S
>>> np.allclose(S_patch[:, :, 0], S[:, :32])
True
>>> # The second patch contains frames 16 to 16+32=48, and so on
>>> np.allclose(S_patch[:, :, 1], S[:, 16:48])
True
"""
if not isinstance(x, np.ndarray):
raise ParameterError(
"Input must be of type numpy.ndarray, " "given type(x)={}".format(type(x))
)
if x.shape[axis] < frame_length:
raise ParameterError(
"Input is too short (n={:d})"
" for frame_length={:d}".format(x.shape[axis], frame_length)
)
if hop_length < 1:
raise ParameterError("Invalid hop_length: {:d}".format(hop_length))
if axis == -1 and not x.flags["F_CONTIGUOUS"]:
warnings.warn(
"librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(axis)
)
x = np.asfortranarray(x)
elif axis == 0 and not x.flags["C_CONTIGUOUS"]:
warnings.warn(
"librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(axis)
)
x = np.ascontiguousarray(x)
n_frames = 1 + (x.shape[axis] - frame_length) // hop_length
strides = np.asarray(x.strides)
new_stride = np.prod(strides[strides > 0] // x.itemsize) * x.itemsize
if axis == -1:
shape = list(x.shape)[:-1] + [frame_length, n_frames]
strides = list(strides) + [hop_length * new_stride]
elif axis == 0:
shape = [n_frames, frame_length] + list(x.shape)[1:]
strides = [hop_length * new_stride] + list(strides)
else:
raise ParameterError("Frame axis={} must be either 0 or -1".format(axis))
return as_strided(x, shape=shape, strides=strides)
@cache(level=20)
def valid_audio(y, mono=True):
"""Determine whether a variable contains valid audio data.
If ``mono=True``, then ``y`` is only considered valid if it has shape
``(N,)`` (number of samples).
If ``mono=False``, then ``y`` may be either monophonic, or have shape
``(2, N)`` (stereo) or ``(K, N)`` for ``K>=2`` for general multi-channel.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to require monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
In any of these cases:
- ``type(y)`` is not ``np.ndarray``
- ``y.dtype`` is not floating-point
- ``mono == True`` and ``y.ndim`` is not 1
- ``mono == False`` and ``y.ndim`` is not 1 or 2
- ``mono == False`` and ``y.ndim == 2`` but ``y.shape[0] == 1``
- ``np.isfinite(y).all()`` is False
Notes
-----
This function caches at level 20.
Examples
--------
>>> # By default, valid_audio allows only mono signals
>>> filepath = librosa.ex('trumpet', hq=True)
>>> y_mono, sr = librosa.load(filepath, mono=True)
>>> y_stereo, _ = librosa.load(filepath, mono=False)
>>> librosa.util.valid_audio(y_mono), librosa.util.valid_audio(y_stereo)
True, False
>>> # To allow stereo signals, set mono=False
>>> librosa.util.valid_audio(y_stereo, mono=False)
True
See also
--------
numpy.float32
"""
if not isinstance(y, np.ndarray):
raise ParameterError("Audio data must be of type numpy.ndarray")
if not np.issubdtype(y.dtype, np.floating):
raise ParameterError("Audio data must be floating-point")
if mono and y.ndim != 1:
raise ParameterError(
"Invalid shape for monophonic audio: "
"ndim={:d}, shape={}".format(y.ndim, y.shape)
)
elif y.ndim > 2 or y.ndim == 0:
raise ParameterError(
"Audio data must have shape (samples,) or (channels, samples). "
"Received shape={}".format(y.shape)
)
elif y.ndim == 2 and y.shape[0] < 2:
raise ParameterError(
"Mono data must have shape (samples,). " "Received shape={}".format(y.shape)
)
if not np.isfinite(y).all():
raise ParameterError("Audio buffer is not finite everywhere")
return True
def valid_int(x, cast=None):
"""Ensure that an input value is integer-typed.
This is primarily useful for ensuring integrable-valued
array indices.
Parameters
----------
x : number
A scalar value to be cast to int
cast : function [optional]
A function to modify ``x`` before casting.
Default: `np.floor`
Returns
-------
x_int : int
``x_int = int(cast(x))``
Raises
------
ParameterError
If ``cast`` is provided and is not callable.
"""
if cast is None:
cast = np.floor
if not callable(cast):
raise ParameterError("cast parameter must be callable")
return int(cast(x))
def valid_intervals(intervals):
"""Ensure that an array is a valid representation of time intervals:
- intervals.ndim == 2
- intervals.shape[1] == 2
- intervals[i, 0] <= intervals[i, 1] for all i
Parameters
----------
intervals : np.ndarray [shape=(n, 2)]
set of time intervals
Returns
-------
valid : bool
True if ``intervals`` passes validation.
"""
if intervals.ndim != 2 or intervals.shape[-1] != 2:
raise ParameterError("intervals must have shape (n, 2)")
if np.any(intervals[:, 0] > intervals[:, 1]):
raise ParameterError(
"intervals={} must have non-negative durations".format(intervals)
)
return True
def pad_center(data, size, axis=-1, **kwargs):
"""Pad an array to a target length along a target axis.
This differs from `np.pad` by centering the data prior to padding,
analogous to `str.center`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad ``data``
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad`
Returns
-------
data_padded : np.ndarray
``data`` centered and padded to length ``size`` along the
specified axis
Raises
------
ParameterError
If ``size < data.shape[axis]``
See Also
--------
numpy.pad
"""
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, int(size - n - lpad))
if lpad < 0:
raise ParameterError(
("Target size ({:d}) must be " "at least input size ({:d})").format(size, n)
)
return np.pad(data, lengths, **kwargs)
def fix_length(data, size, axis=-1, **kwargs):
"""Fix the length an array ``data`` to exactly ``size`` along a target axis.
If ``data.shape[axis] < n``, pad according to the provided kwargs.
By default, ``data`` is padded with trailing zeros.
Examples
--------
>>> y = np.arange(7)
>>> # Default: pad with zeros
>>> librosa.util.fix_length(y, 10)
array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> # Trim to a desired length
>>> librosa.util.fix_length(y, 5)
array([0, 1, 2, 3, 4])
>>> # Use edge-padding instead of zeros
>>> librosa.util.fix_length(y, 10, mode='edge')
array([0, 1, 2, 3, 4, 5, 6, 6, 6, 6])
Parameters
----------
data : np.ndarray
array to be length-adjusted
size : int >= 0 [scalar]
desired length of the array
axis : int, <= data.ndim
axis along which to fix length
kwargs : additional keyword arguments
Parameters to ``np.pad``
Returns
-------
data_fixed : np.ndarray [shape=data.shape]
``data`` either trimmed or padded to length ``size``
along the specified axis.
See Also
--------
numpy.pad
"""
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
if n > size:
slices = [slice(None)] * data.ndim
slices[axis] = slice(0, size)
return data[tuple(slices)]
elif n < size:
lengths = [(0, 0)] * data.ndim
lengths[axis] = (0, size - n)
return np.pad(data, lengths, **kwargs)
return data
def fix_frames(frames, x_min=0, x_max=None, pad=True):
"""Fix a list of frames to lie within [x_min, x_max]
Examples
--------
>>> # Generate a list of frame indices
>>> frames = np.arange(0, 1000.0, 50)
>>> frames
array([ 0., 50., 100., 150., 200., 250., 300., 350.,
400., 450., 500., 550., 600., 650., 700., 750.,
800., 850., 900., 950.])
>>> # Clip to span at most 250
>>> librosa.util.fix_frames(frames, x_max=250)
array([ 0, 50, 100, 150, 200, 250])
>>> # Or pad to span up to 2500
>>> librosa.util.fix_frames(frames, x_max=2500)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 500, 550, 600, 650, 700, 750, 800, 850,
900, 950, 2500])
>>> librosa.util.fix_frames(frames, x_max=2500, pad=False)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500,
550, 600, 650, 700, 750, 800, 850, 900, 950])
>>> # Or starting away from zero
>>> frames = np.arange(200, 500, 33)
>>> frames
array([200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames, x_max=500)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497,
500])
Parameters
----------
frames : np.ndarray [shape=(n_frames,)]
List of non-negative frame indices
x_min : int >= 0 or None
Minimum allowed frame index
x_max : int >= 0 or None
Maximum allowed frame index
pad : boolean
If ``True``, then ``frames`` is expanded to span the full range
``[x_min, x_max]``
Returns
-------
fixed_frames : np.ndarray [shape=(n_fixed_frames,), dtype=int]
Fixed frame indices, flattened and sorted
Raises
------
ParameterError
If ``frames`` contains negative values
"""
frames = np.asarray(frames)
if np.any(frames < 0):
raise ParameterError("Negative frame index detected")
if pad and (x_min is not None or x_max is not None):
frames = np.clip(frames, x_min, x_max)
if pad:
pad_data = []
if x_min is not None:
pad_data.append(x_min)
if x_max is not None:
pad_data.append(x_max)
frames = np.concatenate((pad_data, frames))
if x_min is not None:
frames = frames[frames >= x_min]
if x_max is not None:
frames = frames[frames <= x_max]
return np.unique(frames).astype(int)
def axis_sort(S, axis=-1, index=False, value=None):
"""Sort an array along its rows or columns.
Examples
--------
Visualize NMF output for a spectrogram S
>>> # Sort the columns of W by peak frequency bin
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = np.abs(librosa.stft(y))
>>> W, H = librosa.decompose.decompose(S, n_components=64)
>>> W_sort = librosa.util.axis_sort(W)
Or sort by the lowest frequency bin
>>> W_sort = librosa.util.axis_sort(W, value=np.argmin)
Or sort the rows instead of the columns
>>> W_sort_rows = librosa.util.axis_sort(W, axis=0)
Get the sorting index also, and use it to permute the rows of H
>>> W_sort, idx = librosa.util.axis_sort(W, index=True)
>>> H_sort = H[idx, :]
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=2, ncols=2)
>>> img_w = librosa.display.specshow(librosa.amplitude_to_db(W, ref=np.max),
... y_axis='log', ax=ax[0, 0])
>>> ax[0, 0].set(title='W')
>>> ax[0, 0].label_outer()
>>> img_act = librosa.display.specshow(H, x_axis='time', ax=ax[0, 1])
>>> ax[0, 1].set(title='H')
>>> ax[0, 1].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(W_sort,
... ref=np.max),
... y_axis='log', ax=ax[1, 0])
>>> ax[1, 0].set(title='W sorted')
>>> librosa.display.specshow(H_sort, x_axis='time', ax=ax[1, 1])
>>> ax[1, 1].set(title='H sorted')
>>> ax[1, 1].label_outer()
>>> fig.colorbar(img_w, ax=ax[:, 0], orientation='horizontal')
>>> fig.colorbar(img_act, ax=ax[:, 1], orientation='horizontal')
Parameters
----------
S : np.ndarray [shape=(d, n)]
Array to be sorted
axis : int [scalar]
The axis along which to compute the sorting values
- ``axis=0`` to sort rows by peak column index
- ``axis=1`` to sort columns by peak row index
index : boolean [scalar]
If true, returns the index array as well as the permuted data.
value : function
function to return the index corresponding to the sort order.
Default: `np.argmax`.
Returns
-------
S_sort : np.ndarray [shape=(d, n)]
``S`` with the columns or rows permuted in sorting order
idx : np.ndarray (optional) [shape=(d,) or (n,)]
If ``index == True``, the sorting index used to permute ``S``.
Length of ``idx`` corresponds to the selected ``axis``.
Raises
------
ParameterError
If ``S`` does not have exactly 2 dimensions (``S.ndim != 2``)
"""
if value is None:
value = np.argmax
if S.ndim != 2:
raise ParameterError("axis_sort is only defined for 2D arrays")
bin_idx = value(S, axis=np.mod(1 - axis, S.ndim))
idx = np.argsort(bin_idx)
sort_slice = [slice(None)] * S.ndim
sort_slice[axis] = idx
if index:
return S[tuple(sort_slice)], idx
else:
return S[tuple(sort_slice)]
@cache(level=40)
def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None):
"""Normalize an array along a chosen axis.
Given a norm (described below) and a target axis, the input
array is scaled so that::
norm(S, axis=axis) == 1
For example, ``axis=0`` normalizes each column of a 2-d array
by aggregating over the rows (0-axis).
Similarly, ``axis=1`` normalizes each row of a 2-d array.
This function also supports thresholding small-norm slices:
any slice (i.e., row or column) with norm below a specified
``threshold`` can be left un-normalized, set to all-zeros, or
filled with uniform non-zero values that normalize to 1.
Note: the semantics of this function differ from
`scipy.linalg.norm` in two ways: multi-dimensional arrays
are supported, but matrix-norms are not.
Parameters
----------
S : np.ndarray
The matrix to normalize
norm : {np.inf, -np.inf, 0, float > 0, None}
- `np.inf` : maximum absolute value
- `-np.inf` : minimum absolute value
- `0` : number of non-zeros (the support)
- float : corresponding l_p norm
See `scipy.linalg.norm` for details.
- None : no normalization is performed
axis : int [scalar]
Axis along which to compute the norm.
threshold : number > 0 [optional]
Only the columns (or rows) with norm at least ``threshold`` are
normalized.
By default, the threshold is determined from
the numerical precision of ``S.dtype``.
fill : None or bool
If None, then columns (or rows) with norm below ``threshold``
are left as is.
If False, then columns (rows) with norm below ``threshold``
are set to 0.
If True, then columns (rows) with norm below ``threshold``
are filled uniformly such that the corresponding norm is 1.
.. note:: ``fill=True`` is incompatible with ``norm=0`` because
no uniform vector exists with l0 "norm" equal to 1.
Returns
-------
S_norm : np.ndarray [shape=S.shape]
Normalized array
Raises
------
ParameterError
If ``norm`` is not among the valid types defined above
If ``S`` is not finite
If ``fill=True`` and ``norm=0``
See Also
--------
scipy.linalg.norm
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct an example matrix
>>> S = np.vander(np.arange(-2.0, 2.0))
>>> S
array([[-8., 4., -2., 1.],
[-1., 1., -1., 1.],
[ 0., 0., 0., 1.],
[ 1., 1., 1., 1.]])
>>> # Max (l-infinity)-normalize the columns
>>> librosa.util.normalize(S)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # Max (l-infinity)-normalize the rows
>>> librosa.util.normalize(S, axis=1)
array([[-1. , 0.5 , -0.25 , 0.125],
[-1. , 1. , -1. , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 1. , 1. , 1. , 1. ]])
>>> # l1-normalize the columns
>>> librosa.util.normalize(S, norm=1)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
>>> # l2-normalize the columns
>>> librosa.util.normalize(S, norm=2)
array([[-0.985, 0.943, -0.816, 0.5 ],
[-0.123, 0.236, -0.408, 0.5 ],
[ 0. , 0. , 0. , 0.5 ],
[ 0.123, 0.236, 0.408, 0.5 ]])
>>> # Thresholding and filling
>>> S[:, -1] = 1e-308
>>> S
array([[ -8.000e+000, 4.000e+000, -2.000e+000,
1.000e-308],
[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.000e+000, 1.000e+000, 1.000e+000,
1.000e-308]])
>>> # By default, small-norm columns are left untouched
>>> librosa.util.normalize(S)
array([[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ -1.250e-001, 2.500e-001, -5.000e-001,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.250e-001, 2.500e-001, 5.000e-001,
1.000e-308]])
>>> # Small-norm columns can be zeroed out
>>> librosa.util.normalize(S, fill=False)
array([[-1. , 1. , -1. , 0. ],
[-0.125, 0.25 , -0.5 , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0.125, 0.25 , 0.5 , 0. ]])
>>> # Or set to constant with unit-norm
>>> librosa.util.normalize(S, fill=True)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # With an l1 norm instead of max-norm
>>> librosa.util.normalize(S, norm=1, fill=True)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
"""
# Avoid div-by-zero
if threshold is None:
threshold = tiny(S)
elif threshold <= 0:
raise ParameterError(
"threshold={} must be strictly " "positive".format(threshold)
)
if fill not in [None, False, True]:
raise ParameterError("fill={} must be None or boolean".format(fill))
if not np.all(np.isfinite(S)):
raise ParameterError("Input must be finite")
# All norms only depend on magnitude, let's do that first
mag = np.abs(S).astype(np.float)
# For max/min norms, filling with 1 works
fill_norm = 1
if norm == np.inf:
length = np.max(mag, axis=axis, keepdims=True)
elif norm == -np.inf:
length = np.min(mag, axis=axis, keepdims=True)
elif norm == 0:
if fill is True:
raise ParameterError("Cannot normalize with norm=0 and fill=True")
length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype)
elif np.issubdtype(type(norm), np.number) and norm > 0:
length = np.sum(mag ** norm, axis=axis, keepdims=True) ** (1.0 / norm)
if axis is None:
fill_norm = mag.size ** (-1.0 / norm)
else:
fill_norm = mag.shape[axis] ** (-1.0 / norm)
elif norm is None:
return S
else:
raise ParameterError("Unsupported norm: {}".format(repr(norm)))
# indices where norm is below the threshold
small_idx = length < threshold
Snorm = np.empty_like(S)
if fill is None:
# Leave small indices un-normalized
length[small_idx] = 1.0
Snorm[:] = S / length
elif fill:
# If we have a non-zero fill value, we locate those entries by
# doing a nan-divide.
# If S was finite, then length is finite (except for small positions)
length[small_idx] = np.nan
Snorm[:] = S / length
Snorm[np.isnan(Snorm)] = fill_norm
else:
# Set small values to zero by doing an inf-divide.
# This is safe (by IEEE-754) as long as S is finite.
length[small_idx] = np.inf
Snorm[:] = S / length
return Snorm
def localmax(x, axis=0):
"""Find local maxima in an array
An element ``x[i]`` is considered a local maximum if the following
conditions are met:
- ``x[i] > x[i-1]``
- ``x[i] >= x[i+1]``
Note that the first condition is strict, and that the first element
``x[0]`` will never be considered as a local maximum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmax(x)
array([False, False, False, True, False, True, False, True], dtype=bool)
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmax(x, axis=0)
array([[False, False, False],
[ True, False, False],
[False, True, True]], dtype=bool)
>>> librosa.util.localmax(x, axis=1)
array([[False, False, True],
[False, False, True],
[False, False, True]], dtype=bool)
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local maximality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local maximality along ``axis``
See Also
--------
localmin
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode="edge")
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x > x_pad[tuple(inds1)]) & (x >= x_pad[tuple(inds2)])
def localmin(x, axis=0):
"""Find local minima in an array
An element ``x[i]`` is considered a local minimum if the following
conditions are met:
- ``x[i] < x[i-1]``
- ``x[i] <= x[i+1]``
Note that the first condition is strict, and that the first element
``x[0]`` will never be considered as a local minimum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmin(x)
array([False, True, False, False, True, False, True, False])
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmin(x, axis=0)
array([[False, False, False],
[False, True, True],
[False, False, False]])
>>> librosa.util.localmin(x, axis=1)
array([[False, True, False],
[False, True, False],
[False, True, False]])
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local minimality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local minimality along ``axis``
See Also
--------
localmax
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode="edge")
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x < x_pad[tuple(inds1)]) & (x <= x_pad[tuple(inds2)])
def peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
"""Uses a flexible heuristic to pick peaks in a signal.
A sample n is selected as an peak if the corresponding ``x[n]``
fulfills the following three conditions:
1. ``x[n] == max(x[n - pre_max:n + post_max])``
2. ``x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta``
3. ``n - previous_n > wait``
where ``previous_n`` is the last sample picked as a peak (greedily).
This implementation is based on [#]_ and [#]_.
.. [#] Boeck, Sebastian, Florian Krebs, and Markus Schedl.
"Evaluating the Online Capabilities of Onset Detection Methods." ISMIR.
2012.
.. [#] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py
Parameters
----------
x : np.ndarray [shape=(n,)]
input signal to peak picks from
pre_max : int >= 0 [scalar]
number of samples before ``n`` over which max is computed
post_max : int >= 1 [scalar]
number of samples after ``n`` over which max is computed
pre_avg : int >= 0 [scalar]
number of samples before ``n`` over which mean is computed
post_avg : int >= 1 [scalar]
number of samples after ``n`` over which mean is computed
delta : float >= 0 [scalar]
threshold offset for mean
wait : int >= 0 [scalar]
number of samples to wait after picking a peak
Returns
-------
peaks : np.ndarray [shape=(n_peaks,), dtype=int]
indices of peaks in ``x``
Raises
------
ParameterError
If any input lies outside its defined range
Examples
--------
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... hop_length=512,
... aggregate=np.median)
>>> peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
>>> peaks
array([ 3, 27, 40, 61, 72, 88, 103])
>>> import matplotlib.pyplot as plt
>>> times = librosa.times_like(onset_env, sr=sr, hop_length=512)
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> D = np.abs(librosa.stft(y))
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time', ax=ax[1])
>>> ax[0].plot(times, onset_env, alpha=0.8, label='Onset strength')
>>> ax[0].vlines(times[peaks], 0,
... onset_env.max(), color='r', alpha=0.8,
... label='Selected peaks')
>>> ax[0].legend(frameon=True, framealpha=0.8)
>>> ax[0].label_outer()
"""
if pre_max < 0:
raise ParameterError("pre_max must be non-negative")
if pre_avg < 0:
raise ParameterError("pre_avg must be non-negative")
if delta < 0:
raise ParameterError("delta must be non-negative")
if wait < 0:
raise ParameterError("wait must be non-negative")
if post_max <= 0:
raise ParameterError("post_max must be positive")
if post_avg <= 0:
raise ParameterError("post_avg must be positive")
if x.ndim != 1:
raise ParameterError("input array must be one-dimensional")
# Ensure valid index types
pre_max = valid_int(pre_max, cast=np.ceil)
post_max = valid_int(post_max, cast=np.ceil)
pre_avg = valid_int(pre_avg, cast=np.ceil)
post_avg = valid_int(post_avg, cast=np.ceil)
wait = valid_int(wait, cast=np.ceil)
# Get the maximum of the signal over a sliding window
max_length = pre_max + post_max
max_origin = np.ceil(0.5 * (pre_max - post_max))
# Using mode='constant' and cval=x.min() effectively truncates
# the sliding window at the boundaries
mov_max = scipy.ndimage.filters.maximum_filter1d(
x, int(max_length), mode="constant", origin=int(max_origin), cval=x.min()
)
# Get the mean of the signal over a sliding window
avg_length = pre_avg + post_avg
avg_origin = np.ceil(0.5 * (pre_avg - post_avg))
# Here, there is no mode which results in the behavior we want,
# so we'll correct below.
mov_avg = scipy.ndimage.filters.uniform_filter1d(
x, int(avg_length), mode="nearest", origin=int(avg_origin)
)
# Correct sliding average at the beginning
n = 0
# Only need to correct in the range where the window needs to be truncated
while n - pre_avg < 0 and n < x.shape[0]:
# This just explicitly does mean(x[n - pre_avg:n + post_avg])
# with truncation
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start : n + post_avg])
n += 1
# Correct sliding average at the end
n = x.shape[0] - post_avg
# When post_avg > x.shape[0] (weird case), reset to 0
n = n if n > 0 else 0
while n < x.shape[0]:
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start : n + post_avg])
n += 1
# First mask out all entries not equal to the local max
detections = x * (x == mov_max)
# Then mask out all entries less than the thresholded average
detections = detections * (detections >= (mov_avg + delta))
# Initialize peaks array, to be filled greedily
peaks = []
# Remove onsets which are close together in time
last_onset = -np.inf
for i in np.nonzero(detections)[0]:
# Only report an onset if the "wait" samples was reported
if i > last_onset + wait:
peaks.append(i)
# Save last reported onset
last_onset = i
return np.array(peaks)
@cache(level=40)
def sparsify_rows(x, quantile=0.01, dtype=None):
"""Return a row-sparse matrix approximating the input
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of ``x``
dtype : np.dtype, optional
The dtype of the output array.
If not provided, then ``x.dtype`` will be used.
Returns
-------
x_sparse : ``scipy.sparse.csr_matrix`` [shape=x.shape]
Row-sparsified approximation of ``x``
If ``x.ndim == 1``, then ``x`` is interpreted as a row vector,
and ``x_sparse.shape == (1, len(x))``.
Raises
------
ParameterError
If ``x.ndim > 2``
If ``quantile`` lies outside ``[0, 1.0)``
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
"""
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError(
"Input must have 2 or fewer dimensions. "
"Provided x.shape={}.".format(x.shape)
)
if not 0.0 <= quantile < 1:
raise ParameterError("Invalid quantile {:.2f}".format(quantile))
if dtype is None:
dtype = x.dtype
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=dtype)
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr()
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in ``x``
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1.0 / float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = "<i{:d}".format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
def index_to_slice(idx, idx_min=None, idx_max=None, step=None, pad=True):
"""Generate a slice array from an index array.
Parameters
----------
idx : list-like
Array of index boundaries
idx_min, idx_max : None or int
Minimum and maximum allowed indices
step : None or int
Step size for each slice. If `None`, then the default
step of 1 is used.
pad : boolean
If `True`, pad ``idx`` to span the range ``idx_min:idx_max``.
Returns
-------
slices : list of slice
``slices[i] = slice(idx[i], idx[i+1], step)``
Additional slice objects may be added at the beginning or end,
depending on whether ``pad==True`` and the supplied values for
``idx_min`` and ``idx_max``.
See Also
--------
fix_frames
Examples
--------
>>> # Generate slices from spaced indices
>>> librosa.util.index_to_slice(np.arange(20, 100, 15))
[slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None),
slice(80, 95, None)]
>>> # Pad to span the range (0, 100)
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100)
[slice(0, 20, None), slice(20, 35, None), slice(35, 50, None), slice(50, 65, None),
slice(65, 80, None), slice(80, 95, None), slice(95, 100, None)]
>>> # Use a step of 5 for each slice
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100, step=5)
[slice(0, 20, 5), slice(20, 35, 5), slice(35, 50, 5), slice(50, 65, 5), slice(65, 80, 5),
slice(80, 95, 5), slice(95, 100, 5)]
"""
# First, normalize the index set
idx_fixed = fix_frames(idx, idx_min, idx_max, pad=pad)
# Now convert the indices to slices
return [slice(start, end, step) for (start, end) in zip(idx_fixed, idx_fixed[1:])]
@cache(level=40)
def sync(data, idx, aggregate=None, pad=True, axis=-1):
"""Synchronous aggregation of a multi-dimensional array between boundaries
.. note::
In order to ensure total coverage, boundary points may be added
to ``idx``.
If synchronizing a feature matrix against beat tracker output, ensure
that frame index numbers are properly aligned and use the same hop length.
Parameters
----------
data : np.ndarray
multi-dimensional array of features
idx : iterable of ints or slices
Either an ordered array of boundary indices, or
an iterable collection of slice objects.
aggregate : function
aggregation function (default: `np.mean`)
pad : boolean
If `True`, ``idx`` is padded to span the full range ``[0, data.shape[axis]]``
axis : int
The axis along which to aggregate data
Returns
-------
data_sync : ndarray
``data_sync`` will have the same dimension as ``data``, except that the ``axis``
coordinate will be reduced according to ``idx``.
For example, a 2-dimensional ``data`` with ``axis=-1`` should satisfy::
data_sync[:, i] = aggregate(data[:, idx[i-1]:idx[i]], axis=-1)
Raises
------
ParameterError
If the index set is not of consistent type (all slices or all integers)
Notes
-----
This function caches at level 40.
Examples
--------
Beat-synchronous CQT spectra
>>> y, sr = librosa.load(librosa.ex('choice'))
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> C = np.abs(librosa.cqt(y=y, sr=sr))
>>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1])
By default, use mean aggregation
>>> C_avg = librosa.util.sync(C, beats)
Use median-aggregation instead of mean
>>> C_med = librosa.util.sync(C, beats,
... aggregate=np.median)
Or sub-beat synchronization
>>> sub_beats = librosa.segment.subsegment(C, beats)
>>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1])
>>> C_med_sub = librosa.util.sync(C, sub_beats, aggregate=np.median)
Plot the results
>>> import matplotlib.pyplot as plt
>>> beat_t = librosa.frames_to_time(beats, sr=sr)
>>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr)
>>> fig, ax = plt.subplots(nrows=3, sharex=True, sharey=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... x_axis='time', ax=ax[0])
>>> ax[0].set(title='CQT power, shape={}'.format(C.shape))
>>> ax[0].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med,
... ref=np.max),
... x_coords=beat_t, x_axis='time', ax=ax[1])
>>> ax[1].set(title='Beat synchronous CQT power, '
... 'shape={}'.format(C_med.shape))
>>> ax[1].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub,
... ref=np.max),
... x_coords=subbeat_t, x_axis='time', ax=ax[2])
>>> ax[2].set(title='Sub-beat synchronous CQT power, '
... 'shape={}'.format(C_med_sub.shape))
"""
if aggregate is None:
aggregate = np.mean
shape = list(data.shape)
if np.all([isinstance(_, slice) for _ in idx]):
slices = idx
elif np.all([np.issubdtype(type(_), np.integer) for _ in idx]):
slices = index_to_slice(np.asarray(idx), 0, shape[axis], pad=pad)
else:
raise ParameterError("Invalid index set: {}".format(idx))
agg_shape = list(shape)
agg_shape[axis] = len(slices)
data_agg = np.empty(
agg_shape, order="F" if np.isfortran(data) else "C", dtype=data.dtype
)
idx_in = [slice(None)] * data.ndim
idx_agg = [slice(None)] * data_agg.ndim
for (i, segment) in enumerate(slices):
idx_in[axis] = segment
idx_agg[axis] = i
data_agg[tuple(idx_agg)] = aggregate(data[tuple(idx_in)], axis=axis)
return data_agg
def softmask(X, X_ref, power=1, split_zeros=False):
"""Robustly compute a soft-mask operation.
``M = X**power / (X**power + X_ref**power)``
Parameters
----------
X : np.ndarray
The (non-negative) input array corresponding to the positive mask elements
X_ref : np.ndarray
The (non-negative) array of reference or background elements.
Must have the same shape as ``X``.
power : number > 0 or np.inf
If finite, returns the soft mask computed in a numerically stable way
If infinite, returns a hard (binary) mask equivalent to ``X > X_ref``.
Note: for hard masks, ties are always broken in favor of ``X_ref`` (``mask=0``).
split_zeros : bool
If `True`, entries where ``X`` and ``X_ref`` are both small (close to 0)
will receive mask values of 0.5.
Otherwise, the mask is set to 0 for these entries.
Returns
-------
mask : np.ndarray, shape=X.shape
The output mask array
Raises
------
ParameterError
If ``X`` and ``X_ref`` have different shapes.
If ``X`` or ``X_ref`` are negative anywhere
If ``power <= 0``
Examples
--------
>>> X = 2 * np.ones((3, 3))
>>> X_ref = np.vander(np.arange(3.0))
>>> X
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]])
>>> X_ref
array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]])
>>> librosa.util.softmask(X, X_ref, power=1)
array([[ 1. , 1. , 0.667],
[ 0.667, 0.667, 0.667],
[ 0.333, 0.5 , 0.667]])
>>> librosa.util.softmask(X_ref, X, power=1)
array([[ 0. , 0. , 0.333],
[ 0.333, 0.333, 0.333],
[ 0.667, 0.5 , 0.333]])
>>> librosa.util.softmask(X, X_ref, power=2)
array([[ 1. , 1. , 0.8],
[ 0.8, 0.8, 0.8],
[ 0.2, 0.5, 0.8]])
>>> librosa.util.softmask(X, X_ref, power=4)
array([[ 1. , 1. , 0.941],
[ 0.941, 0.941, 0.941],
[ 0.059, 0.5 , 0.941]])
>>> librosa.util.softmask(X, X_ref, power=100)
array([[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 7.889e-31, 5.000e-01, 1.000e+00]])
>>> librosa.util.softmask(X, X_ref, power=np.inf)
array([[ True, True, True],
[ True, True, True],
[False, False, True]], dtype=bool)
"""
if X.shape != X_ref.shape:
raise ParameterError("Shape mismatch: {}!={}".format(X.shape, X_ref.shape))
if np.any(X < 0) or np.any(X_ref < 0):
raise ParameterError("X and X_ref must be non-negative")
if power <= 0:
raise ParameterError("power must be strictly positive")
# We're working with ints, cast to float.
dtype = X.dtype
if not np.issubdtype(dtype, np.floating):
dtype = np.float32
# Re-scale the input arrays relative to the larger value
Z = np.maximum(X, X_ref).astype(dtype)
bad_idx = Z < np.finfo(dtype).tiny
Z[bad_idx] = 1
# For finite power, compute the softmask
if np.isfinite(power):
mask = (X / Z) ** power
ref_mask = (X_ref / Z) ** power
good_idx = ~bad_idx
mask[good_idx] /= mask[good_idx] + ref_mask[good_idx]
# Wherever energy is below energy in both inputs, split the mask
if split_zeros:
mask[bad_idx] = 0.5
else:
mask[bad_idx] = 0.0
else:
# Otherwise, compute the hard mask
mask = X > X_ref
return mask
def tiny(x):
"""Compute the tiny-value corresponding to an input's data type.
This is the smallest "usable" number representable in ``x.dtype``
(e.g., float32).
This is primarily useful for determining a threshold for
numerical underflow in division or multiplication operations.
Parameters
----------
x : number or np.ndarray
The array to compute the tiny-value for.
All that matters here is ``x.dtype``
Returns
-------
tiny_value : float
The smallest positive usable number for the type of ``x``.
If ``x`` is integer-typed, then the tiny value for ``np.float32``
is returned instead.
See Also
--------
numpy.finfo
Examples
--------
For a standard double-precision floating point number:
>>> librosa.util.tiny(1.0)
2.2250738585072014e-308
Or explicitly as double-precision
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float64))
2.2250738585072014e-308
Or complex numbers
>>> librosa.util.tiny(1j)
2.2250738585072014e-308
Single-precision floating point:
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float32))
1.1754944e-38
Integer
>>> librosa.util.tiny(5)
1.1754944e-38
"""
# Make sure we have an array view
x = np.asarray(x)
# Only floating types generate a tiny
if np.issubdtype(x.dtype, np.floating) or np.issubdtype(
x.dtype, np.complexfloating
):
dtype = x.dtype
else:
dtype = np.float32
return np.finfo(dtype).tiny
def fill_off_diagonal(x, radius, value=0):
"""Sets all cells of a matrix to a given ``value``
if they lie outside a constraint region.
In this case, the constraint region is the
Sakoe-Chiba band which runs with a fixed ``radius``
along the main diagonal.
When ``x.shape[0] != x.shape[1]``, the radius will be
expanded so that ``x[-1, -1] = 1`` always.
``x`` will be modified in place.
Parameters
----------
x : np.ndarray [shape=(N, M)]
Input matrix, will be modified in place.
radius : float
The band radius (1/2 of the width) will be
``int(radius*min(x.shape))``
value : int
``x[n, m] = value`` when ``(n, m)`` lies outside the band.
Examples
--------
>>> x = np.ones((8, 8))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> x = np.ones((8, 12))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
"""
nx, ny = x.shape
# Calculate the radius in indices, rather than proportion
radius = np.round(radius * np.min(x.shape))
nx, ny = x.shape
offset = np.abs((x.shape[0] - x.shape[1]))
if nx < ny:
idx_u = np.triu_indices_from(x, k=radius + offset)
idx_l = np.tril_indices_from(x, k=-radius)
else:
idx_u = np.triu_indices_from(x, k=radius)
idx_l = np.tril_indices_from(x, k=-radius - offset)
# modify input matrix
x[idx_u] = value
x[idx_l] = value
def cyclic_gradient(data, edge_order=1, axis=-1):
"""Estimate the gradient of a function over a uniformly sampled,
periodic domain.
This is essentially the same as `np.gradient`, except that edge effects
are handled by wrapping the observations (i.e. assuming periodicity)
rather than extrapolation.
Parameters
----------
data : np.ndarray
The function values observed at uniformly spaced positions on
a periodic domain
edge_order: {1, 2}
The order of the difference approximation used for estimating
the gradient
axis : int
The axis along which gradients are calculated.
Returns
-------
grad : np.ndarray like ``data``
The gradient of ``data`` taken along the specified axis.
See Also
--------
numpy.gradient
Examples
--------
This example estimates the gradient of cosine (-sine) from 64
samples using direct (aperiodic) and periodic gradient
calculation.
>>> import matplotlib.pyplot as plt
>>> x = 2 * np.pi * np.linspace(0, 1, num=64, endpoint=False)
>>> y = np.cos(x)
>>> grad = np.gradient(y)
>>> cyclic_grad = librosa.util.cyclic_gradient(y)
>>> true_grad = -np.sin(x) * 2 * np.pi / len(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, true_grad, label='True gradient', linewidth=5,
... alpha=0.35)
>>> ax.plot(x, cyclic_grad, label='cyclic_gradient')
>>> ax.plot(x, grad, label='np.gradient', linestyle=':')
>>> ax.legend()
>>> # Zoom into the first part of the sequence
>>> ax.set(xlim=[0, np.pi/16], ylim=[-0.025, 0.025])
"""
# Wrap-pad the data along the target axis by `edge_order` on each side
padding = [(0, 0)] * data.ndim
padding[axis] = (edge_order, edge_order)
data_pad = np.pad(data, padding, mode="wrap")
# Compute the gradient
grad = np.gradient(data_pad, edge_order=edge_order, axis=axis)
# Remove the padding
slices = [slice(None)] * data.ndim
slices[axis] = slice(edge_order, -edge_order)
return grad[tuple(slices)]
@numba.jit(nopython=True, cache=True)
def __shear_dense(X, factor=+1, axis=-1):
"""Numba-accelerated shear for dense (ndarray) arrays"""
if axis == 0:
X = X.T
X_shear = np.empty_like(X)
for i in range(X.shape[1]):
X_shear[:, i] = np.roll(X[:, i], factor * i)
if axis == 0:
X_shear = X_shear.T
return X_shear
def __shear_sparse(X, factor=+1, axis=-1):
"""Fast shearing for sparse matrices
Shearing is performed using CSC array indices,
and the result is converted back to whatever sparse format
the data was originally provided in.
"""
fmt = X.format
if axis == 0:
X = X.T
# Now we're definitely rolling on the correct axis
X_shear = X.tocsc(copy=True)
# The idea here is to repeat the shear amount (factor * range)
# by the number of non-zeros for each column.
# The number of non-zeros is computed by diffing the index pointer array
roll = np.repeat(factor * np.arange(X_shear.shape[1]), np.diff(X_shear.indptr))
# In-place roll
np.mod(X_shear.indices + roll, X_shear.shape[0], out=X_shear.indices)
if axis == 0:
X_shear = X_shear.T
# And convert back to the input format
return X_shear.asformat(fmt)
def shear(X, factor=1, axis=-1):
"""Shear a matrix by a given factor.
The column ``X[:, n]`` will be displaced (rolled)
by ``factor * n``
This is primarily useful for converting between lag and recurrence
representations: shearing with ``factor=-1`` converts the main diagonal
to a horizontal. Shearing with ``factor=1`` converts a horizontal to
a diagonal.
Parameters
----------
X : np.ndarray [ndim=2] or scipy.sparse matrix
The array to be sheared
factor : integer
The shear factor: ``X[:, n] -> np.roll(X[:, n], factor * n)``
axis : integer
The axis along which to shear
Returns
-------
X_shear : same type as ``X``
The sheared matrix
Examples
--------
>>> E = np.eye(3)
>>> librosa.util.shear(E, factor=-1, axis=-1)
array([[1., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]])
>>> librosa.util.shear(E, factor=-1, axis=0)
array([[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]])
>>> librosa.util.shear(E, factor=1, axis=-1)
array([[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.]])
"""
if not np.issubdtype(type(factor), np.integer):
raise ParameterError("factor={} must be integer-valued".format(factor))
if scipy.sparse.isspmatrix(X):
return __shear_sparse(X, factor=factor, axis=axis)
else:
return __shear_dense(X, factor=factor, axis=axis)
def stack(arrays, axis=0):
"""Stack one or more arrays along a target axis.
This function is similar to `np.stack`, except that memory contiguity is
retained when stacking along the first dimension.
This is useful when combining multiple monophonic audio signals into a
multi-channel signal, or when stacking multiple feature representations
to form a multi-dimensional array.
Parameters
----------
arrays : list
one or more `np.ndarray`
axis : integer
The target axis along which to stack. ``axis=0`` creates a new first axis,
and ``axis=-1`` creates a new last axis.
Returns
-------
arr_stack : np.ndarray [shape=(len(arrays), array_shape) or shape=(array_shape, len(arrays))]
The input arrays, stacked along the target dimension.
If ``axis=0``, then ``arr_stack`` will be F-contiguous.
Otherwise, ``arr_stack`` will be C-contiguous by default, as computed by
`np.stack`.
Raises
------
ParameterError
- If ``arrays`` do not all have the same shape
- If no ``arrays`` are given
See Also
--------
numpy.stack
numpy.ndarray.flags
frame
Examples
--------
Combine two buffers into a contiguous arrays
>>> y_left = np.ones(5)
>>> y_right = -np.ones(5)
>>> y_stereo = librosa.util.stack([y_left, y_right], axis=0)
>>> y_stereo
array([[ 1., 1., 1., 1., 1.],
[-1., -1., -1., -1., -1.]])
>>> y_stereo.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
Or along the trailing axis
>>> y_stereo = librosa.util.stack([y_left, y_right], axis=-1)
>>> y_stereo
array([[ 1., -1.],
[ 1., -1.],
[ 1., -1.],
[ 1., -1.],
[ 1., -1.]])
>>> y_stereo.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
shapes = {arr.shape for arr in arrays}
if len(shapes) > 1:
raise ParameterError("all input arrays must have the same shape")
elif len(shapes) < 1:
raise ParameterError("at least one input array must be provided for stack")
shape_in = shapes.pop()
if axis != 0:
return np.stack(arrays, axis=axis)
else:
# If axis is 0, enforce F-ordering
shape = tuple([len(arrays)] + list(shape_in))
# Find the common dtype for all inputs
dtype = np.find_common_type([arr.dtype for arr in arrays], [])
# Allocate an empty array of the right shape and type
result = np.empty(shape, dtype=dtype, order="F")
# Stack into the preallocated buffer
np.stack(arrays, axis=axis, out=result)
return result
def dtype_r2c(d, default=np.complex64):
"""Find the complex numpy dtype corresponding to a real dtype.
This is used to maintain numerical precision and memory footprint
when constructing complex arrays from real-valued data
(e.g. in a Fourier transform).
A `float32` (single-precision) type maps to `complex64`,
while a `float64` (double-precision) maps to `complex128`.
Parameters
----------
d : np.dtype
The real-valued dtype to convert to complex.
If ``d`` is a complex type already, it will be returned.
default : np.dtype, optional
The default complex target type, if ``d`` does not match a
known dtype
Returns
-------
d_c : np.dtype
The complex dtype
See Also
--------
dtype_c2r
numpy.dtype
Examples
--------
>>> librosa.util.dtype_r2c(np.float32)
dtype('complex64')
>>> librosa.util.dtype_r2c(np.int16)
dtype('complex64')
>>> librosa.util.dtype_r2c(np.complex128)
dtype('complex128')
"""
mapping = {
np.dtype(np.float32): np.complex64,
np.dtype(np.float64): np.complex128,
np.dtype(np.float): np.complex,
}
# If we're given a complex type already, return it
dt = np.dtype(d)
if dt.kind == "c":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(dt, default))
def dtype_c2r(d, default=np.float32):
"""Find the real numpy dtype corresponding to a complex dtype.
This is used to maintain numerical precision and memory footprint
when constructing real arrays from complex-valued data
(e.g. in an inverse Fourier transform).
A `complex64` (single-precision) type maps to `float32`,
while a `complex128` (double-precision) maps to `float64`.
Parameters
----------
d : np.dtype
The complex-valued dtype to convert to real.
If ``d`` is a real (float) type already, it will be returned.
default : np.dtype, optional
The default real target type, if ``d`` does not match a
known dtype
Returns
-------
d_r : np.dtype
The real dtype
See Also
--------
dtype_r2c
numpy.dtype
Examples
--------
>>> librosa.util.dtype_r2c(np.complex64)
dtype('float32')
>>> librosa.util.dtype_r2c(np.float32)
dtype('float32')
>>> librosa.util.dtype_r2c(np.int16)
dtype('float32')
>>> librosa.util.dtype_r2c(np.complex128)
dtype('float64')
"""
mapping = {
np.dtype(np.complex64): np.float32,
np.dtype(np.complex128): np.float64,
np.dtype(np.complex): np.float,
}
# If we're given a real type already, return it
dt = np.dtype(d)
if dt.kind == "f":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(np.dtype(d), default))
| isc |
blab/stability | augur/src/HI_predictability.py | 2 | 26929 | ######
# script that explores the predictive power of inferred antigenic change
# It tree and mutation models inferred in intervals of 10years for H3N2
#
######
from collections import defaultdict
from diagnostic_figures import large_effect_mutations, figheight
from itertools import izip
from H3N2_process import H3N2_process, virus_config
from diagnostic_figures import get_slope
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from scipy.stats import ks_2samp
from fitness_tolerance import *
import seaborn as sns
plt.ion()
fig_fontsize=14
fs =fig_fontsize
params = {
'lam_HI':1.0,
'lam_avi':2.0,
'lam_pot':0.3,
'prefix':'H3N2_',
'serum_Kc':0.003,
}
def add_panel_label(ax,label, x_offset=-0.1):
'''add one letter labels to the upper left corner of a figure A, B, C etc '''
ax.text(x_offset, 0.95, label, transform=ax.transAxes, fontsize=fig_fontsize*1.5)
def select_nodes_in_season(tree, interval):
'''mark all nodes in a time interval specified by decimalized years, e.g. 2012.34 '''
for node in tree.leaf_iter(): # mark leafs
if node.num_date>=interval[0] and node.num_date<interval[1]:
node.alive=True
node.n_alive = 1
else:
node.alive=False
node.n_alive = 0
for node in tree.postorder_internal_node_iter(): # go over all internal nodes: alive iff at least one child alive
node.alive = any([n.alive for n in node.child_nodes()])
node.n_alive = np.sum([n.n_alive for n in node.child_nodes()])
def calc_LBI(tree, LBI_tau = 0.0005, attr = 'lb'):
'''
traverses the tree in postorder and preorder to calculate the
up and downstream tree length exponentially weighted by distance.
then adds them as LBI
tree -- dendropy tree for whose node the LBI is being computed
attr -- the attribute name used to store the result
'''
min_bl = 0.00005
# traverse the tree in postorder (children first) to calculate msg to parents
for node in tree.postorder_node_iter():
node.down_polarizer = 0
node.up_polarizer = 0
for child in node.child_nodes():
node.up_polarizer += child.up_polarizer
bl = max(min_bl, node.edge_length)/LBI_tau
node.up_polarizer *= np.exp(-bl)
if node.alive: node.up_polarizer += LBI_tau*(1-np.exp(-bl))
# traverse the tree in preorder (parents first) to calculate msg to children
for node in tree.preorder_internal_node_iter():
for child1 in node.child_nodes():
child1.down_polarizer = node.down_polarizer
for child2 in node.child_nodes():
if child1!=child2:
child1.down_polarizer += child2.up_polarizer
bl = max(min_bl, child1.edge_length)/LBI_tau
child1.down_polarizer *= np.exp(-bl)
if child1.alive: child1.down_polarizer += LBI_tau*(1-np.exp(-bl))
# go over all nodes and calculate the LBI (can be done in any order)
max_LBI = 0
for node in tree.postorder_node_iter():
tmp_LBI = node.down_polarizer
for child in node.child_nodes():
tmp_LBI += child.up_polarizer
node.__setattr__(attr, tmp_LBI)
if tmp_LBI>max_LBI:
max_LBI=tmp_LBI
return max_LBI
''' mutation model
goes over different intervals and fits the HI model
'''
mut_models = True
save_figs = True
if mut_models:
resolutions = ['1985to1995','1990to2000','1995to2005','2000to2010','2005to2016']
fig, axs = plt.subplots(1,len(resolutions), sharey=True, figsize=(4*figheight, 1.3*figheight))
cols={}
HI_distributions_mutations = []
#### make a plot of trajectories colored by HI effect
for res,ax in izip(resolutions,axs):
params['pivots_per_year'] = 6.0
params['resolution']=res
params['time_interval'] = map(float, res.split('to'))
#params['time_interval'] = [2015.8-int(res[:-1]), 2015.8]
if params['time_interval'][1]>2015:
params['time_interval'][1]=2015.8
# add all arguments to virus_config (possibly overriding)
virus_config.update(params)
# pass all these arguments to the processor: will be passed down as kwargs through all classes
myH3N2 = H3N2_process(**virus_config)
myH3N2.run(['HI'],
lam_HI = virus_config['lam_HI'],
lam_avi = virus_config['lam_avi'],
lam_pot = virus_config['lam_pot'],
)
cols = large_effect_mutations(myH3N2, ax, cols) # plot the mutation trajectories into a multi panel figure
for mut in myH3N2.mutation_effects: # for each mutation, make a list of mutation, effect and frequency trajectory
HI = myH3N2.mutation_effects[mut]
mutlabel = mut[0]+':'+mut[1][1:]
if mutlabel in myH3N2.frequencies["mutations"]["global"]:
HI_distributions_mutations.append([res, mut, HI, np.array(myH3N2.frequencies["mutations"]["global"][mutlabel])])
else:
print("no frequencies for ",mut, 'HI', HI)
continue
print(len(HI_distributions_mutations))
if save_figs:
plt.savefig("prediction_figures/"+"trajectories_mutations.pdf")
### make cumulative distribution of HI titers that fix or don't
freq_thres = 0.5 # minimal freq
HI_threshold =0.1 # minimal HI effect
fixed = np.array([ HI for res, mut, HI, freq in HI_distributions_mutations
if freq[0]<0.1 and freq.max()>freq_thres and HI>HI_threshold]) # condition on initially rare
failed = np.array([ HI for res, mut, HI, freq in HI_distributions_mutations
if freq[0]<0.1 and freq.max()<freq_thres and HI>HI_threshold])
D, p = ks_2samp(fixed, failed)
print("HI distribution of fixed and failed, KS stat:", D, "p-val:",p)
# plt cumulative distributions
plt.figure()
plt.plot(sorted(fixed), np.linspace(0,1,len(fixed)), label = '>'+str(freq_thres)+' n='+str(len(fixed)))
plt.plot(sorted(failed), np.linspace(0,1,len(failed)), label = '<'+str(freq_thres)+' n='+str(len(failed)))
plt.xlabel('HI effect')
plt.ylabel('cumulative distribution')
plt.legend(loc=4)
if save_figs:
plt.savefig("prediction_figures/"+"cumulative_HI_mutations.pdf")
################################################################
##### fraction successful
################################################################
plt.figure(figsize = (1.6*figheight, figheight))
ax3 = plt.subplot(1,1,1)
HI_max = np.array([[HI, freq.max()] for res, mut, HI, freq in HI_distributions_mutations if freq[0]<0.1 and freq.max()>0.1])
nreps=100
HI_threshold = np.array([0.0, 0.3, 0.7, 1.2, 4]) #defining HI categories
HI_binc = 0.5*(HI_threshold[:-1] + HI_threshold[1:])
for fi,freq_thres in enumerate([0.25, 0.5, 0.75, 0.95]):
frac_success = []
stddev_success = []
for HI_lower, HI_upper in zip(HI_threshold[:-1], HI_threshold[1:]):
ind = (HI_max[:,0]>=HI_lower)&(HI_max[:,0]<HI_upper)
vals = HI_max[ind,1]
tmp = []
for rep in xrange(nreps):
tmp_vals = vals[np.random.randint(len(vals), size=len(vals)/2)]
tmp.append((tmp_vals>freq_thres).mean())
stddev_success.append(np.std(tmp))
print(HI_lower, ind.sum())
frac_success.append((HI_max[ind,1]>freq_thres).mean())
ax3.errorbar(np.arange(len(frac_success))+0.5+0.03*fi, frac_success,stddev_success, label = "max freq >"+str(freq_thres), lw=2)
ax3.set_xlabel('HI effect', fontsize=fs)
ax3.set_ylabel('fraction reaching frequency threshold', fontsize=fs)
ax3.tick_params(labelsize=fs)
ax3.set_xticks(np.arange(len(HI_binc))+0.5)
ax3.set_xticklabels([str(lower)+'-'+str(upper) for lower, upper in zip(HI_threshold[:-1], HI_threshold[1:])])
plt.legend(loc=8, fontsize=fs)
plt.ylim([0,1])
plt.xlim([0,len(HI_binc)])
plt.tight_layout()
if save_figs:
plt.savefig("prediction_figures/"+'fraction_successful.pdf')
### make cumulative HI on backbone
HI_backbone = np.array([HI for res, mut, HI, freq in HI_distributions_mutations if freq[0]<0.1 and freq.max()>0.75])
HI_backbone.sort()
plt.figure(figsize = (1.2*figheight, figheight))
cumHI = HI_backbone.cumsum()
plt.plot(HI_backbone, cumHI/cumHI[-1])
plt.ylabel('fraction of cHI due to effects < cutoff', fontsize = fs)
plt.xlabel('effect size', fontsize = fs)
plt.tick_params(labelsize=fs)
plt.tight_layout()
if save_figs:
plt.savefig("prediction_figures/"+'cumulative_HI_effects.pdf')
''' analyze the tree model
This uses the 30 year span and investigates whether antigenic advance (as measured by cHI)
is predictive of clade success.
'''
res = '1985to2016'
params['pivots_per_year'] = 3.0
params['resolution']=res
params['time_interval'] = map(float, res.split('to'))
if params['time_interval'][1]>2015:
params['time_interval'][1]=2015.8 # set the upper time limit to past the last sequence
# add all arguments to virus_config (possibly overriding)
virus_config.update(params)
# pass all these arguments to the processor: will be passed down as kwargs through all classes
myH3N2 = H3N2_process(**virus_config)
myH3N2.load()
# assign dates to internal nodes as minimum for children. this is needed to calculate the recent
for node in myH3N2.tree.postorder_internal_node_iter():
node.num_date = np.min([c.num_date for c in node.child_nodes()])
assign_fitness(myH3N2.tree)
dates_fitness = np.array([(n.num_date, n.tol) for n in myH3N2.tree.postorder_internal_node_iter()])
dates_fitness_term = np.array([(n.num_date, n.tol) for n in myH3N2.tree.leaf_iter()])
pivots = myH3N2.tree.seed_node.pivots
HI_vs_max_freq_tree = []
dt=0.5
for node in myH3N2.tree.postorder_internal_node_iter():
if node.num_date<1987:
continue
if node.freq["global"] is not None and node.freq["global"].max()>0.05:
p = node.parent_node
cHI = node.dHI
while p is not None and (node.num_date - p.num_date)<dt:
cHI += p.dHI
p = p.parent_node
ind = (dates_fitness_term[:,0]<=node.num_date)&(dates_fitness_term[:,0]>node.num_date-dt)
HI_vs_max_freq_tree.append((cHI, np.array(node.freq["global"])))
freq_clusters = []
globbing_thres=0.25
print("combining trajectories")
for cHI, freq in HI_vs_max_freq_tree:
found = False
for fi, (cHIs, cfreqs) in enumerate(freq_clusters):
max_freq = np.mean(cfreqs, axis=0).max()+0.1
if np.max(np.abs(freq - np.mean(cfreqs, axis=0)))/max_freq<globbing_thres:
freq_clusters[fi][1].append(freq)
freq_clusters[fi][0].append(cHI)
found=True
if not found:
print ("adding new cluster")
freq_clusters.append([[cHI], [freq]])
print("generated",len(freq_clusters), " trajectory clusters")
freq_thres = 0.75
fixed = np.array([ max(HI) for HI, freqs in freq_clusters
if np.max(np.mean(freqs, axis=0))>freq_thres and max(HI)>0.01])
failed = np.array([ max(HI) for HI, freqs in freq_clusters
if np.max(np.mean(freqs, axis=0))<freq_thres and max(HI)>0.01])
print("split into lists of failed and successful trajectories")
D, p = ks_2samp(fixed, failed)
print("KS stat:", D, "p-val:",p)
plt.figure()
plt.plot(sorted(fixed), np.linspace(0,1,len(fixed)), label = '>'+str(freq_thres)+' n='+str(len(fixed)))
plt.plot(sorted(failed), np.linspace(0,1,len(failed)), label = '<'+str(freq_thres)+' n='+str(len(failed)))
plt.xlabel('HI effect')
plt.ylabel('cumulative distribution')
plt.legend(loc=4)
if save_figs:
plt.savefig("prediction_figures/"+"cumulative_HI_tree.pdf")
################################################################
#### plot tree frequencies
################################################################
fs=14
HI_cutoff=0.1
mycmap = cm.cool
plt.figure(figsize=(3*figheight, figheight))
ax1 = plt.subplot(1,1,1)
for cHIs, cfreqs in freq_clusters:
if max(cHIs)>HI_cutoff:
ax1.plot(pivots,np.mean(cfreqs, axis=0), c=mycmap(np.sqrt(np.max(cHIs))/2))
sm = plt.cm.ScalarMappable(cmap=mycmap, norm=plt.Normalize(vmin=0, vmax=2))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
cb = plt.colorbar(sm)
cb.set_ticks(np.sqrt([0, 0.3, 1, 2,4]))
cb.set_ticklabels(map(str, [0, 0.3, 1, 2,4]))
cb.set_label('HI effect', fontsize=fs)
ax1.set_ylabel('frequency', fontsize=fs)
ax1.set_xlabel('year', fontsize=fs)
ax1.set_xlim([1985,2016])
ax1.tick_params(labelsize=fs)
plt.tight_layout()
add_panel_label(ax1, "A", x_offset=-0.07)
if save_figs:
plt.savefig("prediction_figures/"+"trajectories_tree.pdf")
'''
the following is obsolete code that was used to plot the fraction of successful clades
vs their antigenic effect
'''
################################################################
##### add fraction successful
################################################################
#plt.figure(figsize=(2.4*figheight, figheight))
#ax2 = plt.subplot2grid((1,2),(0,0))
#plt.title("tree model", fontsize=fs)
##HI_threshold = np.array([0.1, 0.3, 0.8, 1.5, 4])
#HI_threshold = np.array([0.0, 0.3, 0.8, 1.5, 4])
#HI_binc = 0.5*(HI_threshold[:-1]+HI_threshold[1:])
#HI_max = np.array([[np.max(HI), np.max(np.mean(freqs, axis=0))] for HI, freqs in freq_clusters])
#for freq_thres in [0.5, 0.75, 0.95]:
# frac_success = []
# for HI_lower, HI_upper in zip(HI_threshold[:-1], HI_threshold[1:]):
# ind = (HI_max[:,0]>=HI_lower)&(HI_max[:,0]<HI_upper)
# print(HI_lower, ind.sum())
# frac_success.append((HI_max[ind,1]>freq_thres).mean())
# ax2.plot(np.arange(len(frac_success))+0.5, frac_success, 'o-', label = "max freq >"+str(freq_thres))
#
#ax2.set_xlabel('HI effect', fontsize=fs)
#ax2.set_ylabel('fraction reaching frequency threshold', fontsize=fs)
#ax2.tick_params(labelsize=fs)
#ax2.set_xticks(np.arange(len(HI_binc))+0.5)
#ax2.set_xticklabels([str(lower)+'-'+str(upper) for lower, upper in zip(HI_threshold[:-1], HI_threshold[1:])])
#plt.legend(loc=4, fontsize=fs)
#plt.ylim([0,1])
#plt.xlim([0,len(HI_binc)])
#
#ax3 = plt.subplot2grid((1,2),(0,1))
#plt.title("mutation model", fontsize=fs)
#HI_max = np.array([[HI, freq.max()] for res, mut, HI, freq in HI_distributions_mutations if freq[0]<0.1 and freq.max()>0.01])
#for freq_thres in [0.25, 0.5, 0.75, 0.95]:
# frac_success = []
# for HI_lower, HI_upper in zip(HI_threshold[:-1], HI_threshold[1:]):
# ind = (HI_max[:,0]>=HI_lower)&(HI_max[:,0]<HI_upper)
# print(HI_lower, ind.sum())
# frac_success.append((HI_max[ind,1]>freq_thres).mean())
# ax3.plot(np.arange(len(frac_success))+0.5, frac_success, 'o-', label = "max freq >"+str(freq_thres))
#
#ax3.set_xlabel('HI effect', fontsize=fs)
#ax3.set_ylabel('fraction reaching frequency threshold', fontsize=fs)
#ax3.tick_params(labelsize=fs)
#ax3.set_xticks(np.arange(len(HI_binc))+0.5)
#ax3.set_xticklabels([str(lower)+'-'+str(upper) for lower, upper in zip(HI_threshold[:-1], HI_threshold[1:])])
#plt.legend(loc=4, fontsize=fs)
#plt.ylim([0,1])
#plt.xlim([0,len(HI_binc)])
#
#plt.tight_layout()
#if save_figs:
# plt.savefig("prediction_figures/"+'combined_HI_dynamics.pdf')
#
#
#########################################################################
##### the following implements bona fide prediction by estimating HI models
##### and LBI for viruses sampled up to a time cutoff and examining the next season
#########################################################################
gof_by_year = []
alpha = 'ACGT'
def allele_freqs(seqs):
''' alignment -> nucleotide frequencies '''
tmp_seqs = np.array([np.fromstring(seq, 'S1') for seq in seqs])
af = np.zeros((4,tmp_seqs.shape[1]))
for ni,nuc in enumerate(alpha):
af[ni,:] = (tmp_seqs==nuc).mean(axis=0)
return af
def af_dist(af1, af2):
''' average distance between two population characterized by nucleotide frequencies af1 and af2'''
return 1-(af1*af2).sum(axis=0).mean(axis=0) # distance == 1-probability of being the same
def seq_dist(seq, af):
''' average distance betweeen a populiation characterized by nucleotide frequencies and a sequence'''
ind = np.array([alpha.index(nuc) for nuc in seq]) #calculate the indices in the frequency array that correspond to the sequence state
return 1.0-np.mean(af[ind, np.arange(len(seq))]) #1 - probability of being the same
# frequency cutoffs for a clade to be included
cutoffs = [0.01, 0.03, 0.05, 0.1]
# set up dictionaries to remember the highest scoring clades for sets defined by each of the cutoffs
LBI_HI_by_date = {cutoff:[] for cutoff in cutoffs}
best_scores = {cutoff:[] for cutoff in cutoffs}
best_LBI = {cutoff:[] for cutoff in cutoffs}
best_HI = {cutoff:[] for cutoff in cutoffs}
best_HI_vs_HI_of_best = {cutoff:[] for cutoff in cutoffs}
# loop over all years we want to include
for year in range(1990, 2015):
print("#############################################")
print("### YEAR:",year)
print("#############################################")
# train the HI model and remember some basic figures about the fit
myH3N2.map_HI(training_fraction = 1.0, method = 'nnl1reg', lam_HI=params['lam_HI'], map_to_tree = True,
lam_pot = params['lam_pot'], lam_avi = params['lam_avi'], cutoff_date = year+2./12.0, subset_strains = False, force_redo = True)
gof_by_year.append((year, myH3N2.fit_error, myH3N2.tree_graph.shape[0]))
# take and allele frequency snapshot of future season Sept until June
select_nodes_in_season(myH3N2.tree, (year+9.0/12, year+18.0/12))
future_seqs = [node.seq for node in myH3N2.tree.leaf_iter() if node.alive]
future_af = allele_freqs(future_seqs)
#current season May until Feb (all previously selected nodes will be erased)
select_nodes_in_season(myH3N2.tree, (year-7.0/12, year+2.0/12))
af = allele_freqs([node.seq for node in myH3N2.tree.leaf_iter() if node.alive])
avg_dist = af_dist(af, future_af)
max_LBI = calc_LBI(myH3N2.tree, LBI_tau = 0.001)
total_alive = 1.0*myH3N2.tree.seed_node.n_alive
# loop over the different frequency cut-offs
for cutoff in cutoffs:
# make a list of nodes (clades) that are used for prediction. frequency needs to be >cutoff and <0.95
nodes = [node for node in myH3N2.tree.postorder_node_iter()
if node.alive and node.n_alive/total_alive>cutoff and node.n_alive/total_alive<0.95]
# determine the minimal distance to future, the average cumulative antigenic advance, and the best possible node
all_distance_to_future = np.array([seq_dist(node.seq, future_af) for node in nodes])
best = np.argmin(all_distance_to_future)
min_dist = all_distance_to_future[best]
current_cHI = np.mean([n.cHI for n in nodes])
# determine the nodes with the highest LBI and the highest HI
all_LBI = np.array([n.lb for n in nodes])
best_LBI_node = nodes[np.argmax(all_LBI)]
best_HI_node = nodes[np.argmax([n.cHI for n in nodes])]
# remember the LBI and HI of the best node
best_scores[cutoff].append([year, nodes[best].lb/max_LBI, nodes[best].cHI-current_cHI])
# remember the LBI and HI, normalized (d/avg_d), standardized (d-min_d)/(avg_d-min_d), and min_d, avg_d for node with highest LBI
best_LBI[cutoff].append((year, best_LBI_node.lb/max_LBI, best_LBI_node.cHI-current_cHI,
(seq_dist(best_LBI_node.seq, future_af)-min_dist)/(avg_dist-min_dist),
seq_dist(best_LBI_node.seq, future_af)/avg_dist, min_dist, avg_dist))
# remember the LBI and HI, normalized (d/avg_d), standardized (d-min_d)/(avg_d-min_d), and min_d, avg_d for node with highest HI
best_HI[cutoff].append((year, best_HI_node.lb/max_LBI, best_HI_node.cHI-current_cHI,
(seq_dist(best_HI_node.seq, future_af)-min_dist)/(avg_dist-min_dist),
seq_dist(best_HI_node.seq, future_af)/avg_dist, min_dist, avg_dist))
# remember HI of the node closest to the future and the HI of the node iwth the highest HI
best_HI_vs_HI_of_best[cutoff].append((year, best_HI_node.cHI - current_cHI, nodes[best].cHI - current_cHI))
print(year, "avg_dist", avg_dist)
# make a list of the LBI, cHI, and distances for every node in the set belonging to cutoffs. (used for scattering LBI vs HI)
for node in nodes:
node_freq = node.n_alive/total_alive
if node.freq["global"] is not None:
tmp_freq = np.array(node.freq["global"])
ii = pivots.searchsorted(year+0.2)
nii= pivots.searchsorted(year+1.0)
LBI_HI_by_date[cutoff].append((node, year, node.lb/max_LBI, node.cHI-current_cHI,
(seq_dist(node.seq, future_af)-min_dist)/(avg_dist-min_dist),node_freq,
tmp_freq[ii], tmp_freq[nii], tmp_freq))
else:
#print("missing frequency", year, node.n_alive)
LBI_HI_by_date[cutoff].append((node, year, node.lb/max_LBI, node.cHI-current_cHI,
(seq_dist(node.seq, future_af)-min_dist)/(avg_dist-min_dist), node_freq,
0,0,0))
# make an array out of all values for slicing and plotting
clades = {}
for cutoff in cutoffs:
best_scores[cutoff] = np.array(best_scores[cutoff])
best_LBI[cutoff] = np.array(best_LBI[cutoff])
best_HI[cutoff] = np.array(best_HI[cutoff])
best_HI_vs_HI_of_best[cutoff] = np.array(best_HI_vs_HI_of_best[cutoff])
tmp = []
for cl in LBI_HI_by_date[cutoff]:
tmp.append(cl[1:-1]) # exclude node (entry 0) and frequencies (entry -1) since they aren't numbers
clades[cutoff] = np.array(tmp)
############
# make figure that shows the distance to future season for all years
# comparing LBI and HI at different cutoffs
############
cutoff = 0.01
plt.figure(figsize=(2.4*figheight,figheight))
ax=plt.subplot(121)
# -2 == mindist, -1 == avg_dits -3 == dist/avg_dist
ax.plot(best_HI[cutoff][:,0],best_HI[cutoff][:,-2]/best_HI[cutoff][:,-1], label='best',lw=2, c='k')
ax.plot(best_LBI[cutoff][:,0],best_LBI[cutoff][:,-3], label='LBI',lw=2)
ax.plot(best_HI[0.05][:,0],best_HI[0.05][:,-3], label='cHI >0.05',lw=2)
ax.plot(best_HI[0.01][:,0],best_HI[0.01][:,-3], label='cHI >0.01',lw=2)
ax.plot([1990, 2015], [1.0, 1.0], lw=3, c='k', ls='--')
ax.tick_params(labelsize=fs)
add_panel_label(ax, "B", x_offset=-0.12)
ax.set_xlabel('year', fontsize=fs)
ax.set_ylabel('distance to season year/year+1', fontsize=fs)
ax.set_yticks([0,0.5, 1.0, 1.5])
plt.legend(loc=2, fontsize=fs)
############
# second panel showing the distribytion of HI values of the best nodes
############
cols = sns.color_palette(n_colors=5)
symbs = ['o','d','v','^','<']
ax = plt.subplot(122)
ax.hist(best_scores[0.05][:,-1])
add_panel_label(ax, "C")
ax.set_yticks([0,2,4,6, 8])
ax.set_ylim([0,10])
ax.set_ylabel("#years", fontsize=fs)
ax.set_xlabel(r'$cHI-\langle cHI\rangle_{year}$', fontsize=fs)
ax.tick_params(labelsize=fs)
plt.tight_layout()
if save_figs:
plt.savefig("prediction_figures/"+'LBI_and_HI_vs_distance.pdf')
#fig, axs = plt.subplots(1,3, figsize=(3.0*figheight, figheight))
#lbi_cutoff = 0.2
#for ax, qi in izip(axs,[1,2]):
# for yi,year in enumerate(range(1990,2015)):
# ind = (clades[:,0]==year)&((clades[:,-3]>lbi_cutoff)|(clades[:,2]>.5)) #restrict to clades larger than cutoff
# if ind.sum()==0:
# continue
# lstr = str(year) if (year<1998 and qi==1) or (year>=1998 and year<2006 and qi==2) else None
# ax.scatter(clades[ind,qi], clades[ind,3], c=cols[yi%5], marker=symbs[yi//5], s=50, label=lstr)
# print(cols[yi%5])
# x_str = r'$cHI-\langle cHI\rangle_{year}$' if qi==2 else r'$LBI/\max(LBI)$'
# ax.set_xlabel(x_str, fontsize=fs)
# ax.tick_params(labelsize=fs)
# ax.set_xlim((0.2,1.4) if qi==1 else (-3,3))
# ax.set_xticks((0.25,0.5, 0.75, 1.0) if qi==1 else (-2,-1,0,1,2))
# ax.set_ylim((-0.2,2.5))
# if qi<3:
# ax.set_yticks([0, 0.5,1.0, 1.5, 2.0])
# ax.set_ylabel(r'distance to season year/year+1', fontsize=fs)
# ax.legend()
# add_panel_label(ax, "C" if qi==2 else "B", x_offset=-0.12)
#
#ax = axs[2]
#for yi, (year, lbi, cHI) in enumerate(best_scores):
# lstr = str(year) if (year>=2006) else None
# ax.scatter([lbi], [cHI], c=cols[yi%5], marker=symbs[yi//5], s=50, label=lstr)
#ax.set_xlabel(r'$LBI/\max(LBI)$', fontsize=fs)
#ax.set_ylabel(r'$cHI-\langle cHI\rangle_{year}$', fontsize=fs)
#ax.set_xlim([0, 1.1])
#ax.set_xticks([0, 0.25, 0.5, 0.75, 1])
#ax.set_yticks([-0.5, 0, 0.5, 1, 1.5])
#ax.legend()
#################################################################
### plot best HI vs HI of best
################################################################
plt.figure(figsize = (1.2*figheight, figheight))
ax=plt.subplot(111)
for col, cutoff in zip(['b', 'g'], [0.01, 0.05]):
plt.scatter(best_HI_vs_HI_of_best[cutoff][:,1],
best_HI_vs_HI_of_best[cutoff][:,2], label = '>'+str(cutoff), s=50, c=col) #, s=50*best_HI[:,-3])
plt.plot([0,3],[0,3])
plt.tick_params(labelsize=fs)
plt.xlabel(r'maximal $cHI-\langle cHI\rangle_{year}$', fontsize=fs)
plt.ylabel(r'successful $cHI-\langle cHI\rangle_{year}$', fontsize=fs)
plt.xticks([0,1,2,3,4])
plt.yticks([-1, 0,1,2,3])
plt.legend(loc=2)
plt.tight_layout()
if save_figs:
plt.savefig("prediction_figures/"+'best_HI_vs_HI_of_best.pdf')
################################################################
### scatter plot of LBI vs HI
################################################################
plt.figure(figsize=(2.4*figheight, figheight))
mycmap=cm.Set1
cutoff = 0.01
for li,lbi_cutoff in enumerate([0.2, 0.1]):
ax = plt.subplot(1,2,li+1)
ind = clades[cutoff][:,-3]>lbi_cutoff #restrict to clades larger than cutoff
if ind.sum()==0:
continue
ax.set_title('clades >'+str(lbi_cutoff)+' frequency')
ax.scatter(clades[cutoff][ind,1], clades[cutoff][ind,2], c=mycmap((clades[cutoff][ind,0]-1990)/25.0), s=80*(1-clades[cutoff][ind,3])**2)
ax.set_ylabel(r'$cHI-\langle cHI\rangle_{year}$', fontsize=fs)
ax.set_xlabel(r'$LBI/\max(LBI)$', fontsize=fs)
ax.tick_params(labelsize=fs)
ax.set_yticks([-3,-2,-1,0,1,2])
add_panel_label(ax, "C" if li else "B", x_offset=-0.15)
if li:
sm = plt.cm.ScalarMappable(cmap=mycmap, norm=plt.Normalize(vmin=1990, vmax=2015))
sm._A = []
cb = plt.colorbar(sm)
cb.set_ticks([1990,1995,2000, 2005, 2010, 2015])
cb.set_label('year', fontsize=fs)
plt.tight_layout()
if save_figs:
plt.savefig("prediction_figures/"+'LBI_HI.pdf')
| agpl-3.0 |
adykstra/mne-python | tutorials/epochs/plot_visualize_epochs.py | 10 | 5143 | """
.. _tut_viz_epochs:
Visualize Epochs data
=====================
"""
# sphinx_gallery_thumbnail_number = 7
import os.path as op
import mne
data_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample')
raw = mne.io.read_raw_fif(
op.join(data_path, 'sample_audvis_raw.fif'), preload=True)
raw.load_data().filter(None, 9, fir_design='firwin')
raw.set_eeg_reference('average', projection=True) # set EEG average reference
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
events = mne.read_events(op.join(data_path, 'sample_audvis_raw-eve.fif'))
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=-0.2, tmax=.5)
###############################################################################
# This tutorial focuses on visualization of epoched data. All of the functions
# introduced here are basically high level matplotlib functions with built in
# intelligence to work with epoched data. All the methods return a handle to
# matplotlib figure instance.
#
# Events used for constructing the epochs here are the triggers for subject
# being presented a smiley face at the center of the visual field. More of the
# paradigm at :ref:`BABDHIFJ`.
#
# All plotting functions start with ``plot``. Let's start with the most
# obvious. :func:`mne.Epochs.plot` offers an interactive browser that allows
# rejection by hand when called in combination with a keyword ``block=True``.
# This blocks the execution of the script until the browser window is closed.
epochs.plot(block=True)
###############################################################################
# The numbers at the top refer to the event id of the epoch. The number at the
# bottom is the running numbering for the epochs.
#
# Since we did no artifact correction or rejection, there are epochs
# contaminated with blinks and saccades. For instance, epoch number 1 seems to
# be contaminated by a blink (scroll to the bottom to view the EOG channel).
# This epoch can be marked for rejection by clicking on top of the browser
# window. The epoch should turn red when you click it. This means that it will
# be dropped as the browser window is closed.
#
# It is possible to plot event markers on epoched data by passing ``events``
# keyword to the epochs plotter. The events are plotted as vertical lines and
# they follow the same coloring scheme as :func:`mne.viz.plot_events`. The
# events plotter gives you all the events with a rough idea of the timing.
# Since the colors are the same, the event plotter can also function as a
# legend for the epochs plotter events. It is also possible to pass your own
# colors via ``event_colors`` keyword. Here we can plot the reaction times
# between seeing the smiley face and the button press (event 32).
#
# When events are passed, the epoch numbering at the bottom is switched off by
# default to avoid overlaps. You can turn it back on via settings dialog by
# pressing `o` key. You should check out `help` at the lower left corner of the
# window for more information about the interactive features.
events = mne.pick_events(events, include=[5, 32])
mne.viz.plot_events(events)
epochs['smiley'].plot(events=events)
###############################################################################
# To plot individual channels as an image, where you see all the epochs at one
# glance, you can use function :func:`mne.Epochs.plot_image`. It shows the
# amplitude of the signal over all the epochs plus an average (evoked response)
# of the activation. We explicitly set interactive colorbar on (it is also on
# by default for plotting functions with a colorbar except the topo plots). In
# interactive mode you can scale and change the colormap with mouse scroll and
# up/down arrow keys. You can also drag the colorbar with left/right mouse
# button. Hitting space bar resets the scale.
epochs.plot_image(278, cmap='interactive', sigma=1., vmin=-250, vmax=250)
###############################################################################
# We can also give an overview of all channels by calculating the global
# field power (or other other aggregation methods). However, combining
# multiple channel types (e.g., MEG and EEG) in this way is not sensible.
# Instead, we can use the ``group_by`` parameter. Setting ``group_by`` to
# 'type' combines channels by type.
# ``group_by`` can also be used to group channels into arbitrary groups, e.g.
# regions of interests, by providing a dictionary containing
# group name -> channel indices mappings.
epochs.plot_image(combine='gfp', group_by='type', sigma=2., cmap="YlGnBu_r")
###############################################################################
# You also have functions for plotting channelwise information arranged into a
# shape of the channel array. The image plotting uses automatic scaling by
# default, but noisy channels and different channel types can cause the scaling
# to be a bit off. Here we define the limits by hand.
epochs.plot_topo_image(vmin=-250, vmax=250, title='ERF images', sigma=2.,
fig_facecolor='w', font_color='k')
| bsd-3-clause |
zorojean/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
vibhorag/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
ndingwall/scikit-learn | examples/feature_selection/plot_feature_selection.py | 18 | 3371 | """
============================
Univariate Feature Selection
============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest, f_classif
# #############################################################################
# Import some data to play with
# The iris dataset
X, y = load_iris(return_X_y=True)
# Some noisy data not correlated
E = np.random.RandomState(42).uniform(0, 0.1, size=(X.shape[0], 20))
# Add the noisy data to the informative features
X = np.hstack((X, E))
# Split dataset to select feature and evaluate the classifier
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=0
)
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
# #############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function to select the four
# most significant features
selector = SelectKBest(f_classif, k=4)
selector.fit(X_train, y_train)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)')
# #############################################################################
# Compare to the weights of an SVM
clf = make_pipeline(MinMaxScaler(), LinearSVC())
clf.fit(X_train, y_train)
print('Classification accuracy without selecting features: {:.3f}'
.format(clf.score(X_test, y_test)))
svm_weights = np.abs(clf[-1].coef_).sum(axis=0)
svm_weights /= svm_weights.sum()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight')
clf_selected = make_pipeline(
SelectKBest(f_classif, k=4), MinMaxScaler(), LinearSVC()
)
clf_selected.fit(X_train, y_train)
print('Classification accuracy after univariate feature selection: {:.3f}'
.format(clf_selected.score(X_test, y_test)))
svm_weights_selected = np.abs(clf_selected[-1].coef_).sum(axis=0)
svm_weights_selected /= svm_weights_selected.sum()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
deepesch/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/cognitive_mapping_and_planning/tfcode/cmp_summary.py | 14 | 8338 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for setting up summaries for CMP.
"""
import sys, os, numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import arg_scope
import logging
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from src import utils
import src.file_utils as fu
import tfcode.nav_utils as nu
def _vis_readout_maps(outputs, global_step, output_dir, metric_summary, N):
# outputs is [gt_map, pred_map]:
if N >= 0:
outputs = outputs[:N]
N = len(outputs)
plt.set_cmap('jet')
fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*2), (5,5))
axes = axes.ravel()[::-1].tolist()
for i in range(N):
gt_map, pred_map = outputs[i]
for j in [0]:
for k in range(gt_map.shape[4]):
# Display something like the midpoint of the trajectory.
id = np.int(gt_map.shape[1]/2)
ax = axes.pop();
ax.imshow(gt_map[j,id,:,:,k], origin='lower', interpolation='none',
vmin=0., vmax=1.)
ax.set_axis_off();
if i == 0: ax.set_title('gt_map')
ax = axes.pop();
ax.imshow(pred_map[j,id,:,:,k], origin='lower', interpolation='none',
vmin=0., vmax=1.)
ax.set_axis_off();
if i == 0: ax.set_title('pred_map')
file_name = os.path.join(output_dir, 'readout_map_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
plt.close(fig)
def _vis(outputs, global_step, output_dir, metric_summary, N):
# Plot the value map, goal for various maps to see what if the model is
# learning anything useful.
#
# outputs is [values, goals, maps, occupancy, conf].
#
if N >= 0:
outputs = outputs[:N]
N = len(outputs)
plt.set_cmap('jet')
fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*5), (5,5))
axes = axes.ravel()[::-1].tolist()
for i in range(N):
values, goals, maps, occupancy, conf = outputs[i]
for j in [0]:
for k in range(values.shape[4]):
# Display something like the midpoint of the trajectory.
id = np.int(values.shape[1]/2)
ax = axes.pop();
ax.imshow(goals[j,id,:,:,k], origin='lower', interpolation='none')
ax.set_axis_off();
if i == 0: ax.set_title('goal')
ax = axes.pop();
ax.imshow(occupancy[j,id,:,:,k], origin='lower', interpolation='none')
ax.set_axis_off();
if i == 0: ax.set_title('occupancy')
ax = axes.pop();
ax.imshow(conf[j,id,:,:,k], origin='lower', interpolation='none',
vmin=0., vmax=1.)
ax.set_axis_off();
if i == 0: ax.set_title('conf')
ax = axes.pop();
ax.imshow(values[j,id,:,:,k], origin='lower', interpolation='none')
ax.set_axis_off();
if i == 0: ax.set_title('value')
ax = axes.pop();
ax.imshow(maps[j,id,:,:,k], origin='lower', interpolation='none')
ax.set_axis_off();
if i == 0: ax.set_title('incr map')
file_name = os.path.join(output_dir, 'value_vis_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
plt.close(fig)
def _summary_vis(m, batch_size, num_steps, arop_full_summary_iters):
arop = []; arop_summary_iters = []; arop_eval_fns = [];
vis_value_ops = []; vis_goal_ops = []; vis_map_ops = [];
vis_occupancy_ops = []; vis_conf_ops = [];
for i, val_op in enumerate(m.value_ops):
vis_value_op = tf.reduce_mean(tf.abs(val_op), axis=3, keep_dims=True)
vis_value_ops.append(vis_value_op)
vis_occupancy_op = tf.reduce_mean(tf.abs(m.occupancys[i]), 3, True)
vis_occupancy_ops.append(vis_occupancy_op)
vis_conf_op = tf.reduce_max(tf.abs(m.confs[i]), axis=3, keep_dims=True)
vis_conf_ops.append(vis_conf_op)
ego_goal_imgs_i_op = m.input_tensors['step']['ego_goal_imgs_{:d}'.format(i)]
vis_goal_op = tf.reduce_max(ego_goal_imgs_i_op, 4, True)
vis_goal_ops.append(vis_goal_op)
vis_map_op = tf.reduce_mean(tf.abs(m.ego_map_ops[i]), 4, True)
vis_map_ops.append(vis_map_op)
vis_goal_ops = tf.concat(vis_goal_ops, 4)
vis_map_ops = tf.concat(vis_map_ops, 4)
vis_value_ops = tf.concat(vis_value_ops, 3)
vis_occupancy_ops = tf.concat(vis_occupancy_ops, 3)
vis_conf_ops = tf.concat(vis_conf_ops, 3)
sh = tf.unstack(tf.shape(vis_value_ops))[1:]
vis_value_ops = tf.reshape(vis_value_ops, shape=[batch_size, -1] + sh)
sh = tf.unstack(tf.shape(vis_conf_ops))[1:]
vis_conf_ops = tf.reshape(vis_conf_ops, shape=[batch_size, -1] + sh)
sh = tf.unstack(tf.shape(vis_occupancy_ops))[1:]
vis_occupancy_ops = tf.reshape(vis_occupancy_ops, shape=[batch_size,-1] + sh)
# Save memory, only return time steps that need to be visualized, factor of
# 32 CPU memory saving.
id = np.int(num_steps/2)
vis_goal_ops = tf.expand_dims(vis_goal_ops[:,id,:,:,:], axis=1)
vis_map_ops = tf.expand_dims(vis_map_ops[:,id,:,:,:], axis=1)
vis_value_ops = tf.expand_dims(vis_value_ops[:,id,:,:,:], axis=1)
vis_conf_ops = tf.expand_dims(vis_conf_ops[:,id,:,:,:], axis=1)
vis_occupancy_ops = tf.expand_dims(vis_occupancy_ops[:,id,:,:,:], axis=1)
arop += [[vis_value_ops, vis_goal_ops, vis_map_ops, vis_occupancy_ops,
vis_conf_ops]]
arop_summary_iters += [arop_full_summary_iters]
arop_eval_fns += [_vis]
return arop, arop_summary_iters, arop_eval_fns
def _summary_readout_maps(m, num_steps, arop_full_summary_iters):
arop = []; arop_summary_iters = []; arop_eval_fns = [];
id = np.int(num_steps-1)
vis_readout_maps_gt = m.readout_maps_gt
vis_readout_maps_prob = tf.reshape(m.readout_maps_probs,
shape=tf.shape(vis_readout_maps_gt))
vis_readout_maps_gt = tf.expand_dims(vis_readout_maps_gt[:,id,:,:,:], 1)
vis_readout_maps_prob = tf.expand_dims(vis_readout_maps_prob[:,id,:,:,:], 1)
arop += [[vis_readout_maps_gt, vis_readout_maps_prob]]
arop_summary_iters += [arop_full_summary_iters]
arop_eval_fns += [_vis_readout_maps]
return arop, arop_summary_iters, arop_eval_fns
def _add_summaries(m, args, summary_mode, arop_full_summary_iters):
task_params = args.navtask.task_params
summarize_ops = [m.lr_op, m.global_step_op, m.sample_gt_prob_op] + \
m.loss_ops + m.acc_ops
summarize_names = ['lr', 'global_step', 'sample_gt_prob_op'] + \
m.loss_ops_names + ['acc_{:d}'.format(i) for i in range(len(m.acc_ops))]
to_aggregate = [0, 0, 0] + [1]*len(m.loss_ops_names) + [1]*len(m.acc_ops)
scope_name = 'summary'
with tf.name_scope(scope_name):
s_ops = nu.add_default_summaries(summary_mode, arop_full_summary_iters,
summarize_ops, summarize_names,
to_aggregate, m.action_prob_op,
m.input_tensors, scope_name=scope_name)
if summary_mode == 'val':
arop, arop_summary_iters, arop_eval_fns = _summary_vis(
m, task_params.batch_size, task_params.num_steps,
arop_full_summary_iters)
s_ops.additional_return_ops += arop
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
if args.arch.readout_maps:
arop, arop_summary_iters, arop_eval_fns = _summary_readout_maps(
m, task_params.num_steps, arop_full_summary_iters)
s_ops.additional_return_ops += arop
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
return s_ops
| bsd-2-clause |
nils-wisiol/pypuf | pypuf/studies/why_attackers_lose/fig_04_a.py | 1 | 3434 | """
Figure 4 (a) of "Why attackers lose: design and security analysis of arbitrarily large
XOR arbiter PUFs", Accepted 26 Feb 2019 Journal of Cryptographic Engineering.
This study examines the minimum number of votes needed such
that for a uniformly random challenge c we have Pr[Stab(c) ≥ 95%] ≥
80% for different k, as determined by a simulation (Sect. 6.2). The
simulation uses arbiter chain length of n = 32; however, we showed
that the results are independent of n. This log–log graph confirms the
result that the number of votes required grows polynomially.
"""
from matplotlib import pyplot
from matplotlib.ticker import FixedLocator, ScalarFormatter
from seaborn import lineplot, scatterplot
from pypuf.experiments.experiment.majority_vote import ExperimentMajorityVoteFindVotes, Parameters
from pypuf.studies.base import Study
class NumberOfVotesRequiredStudy(Study):
"""
Generates Figure 4 (a) of "Why attackers lose: design and security analysis of arbitrarily large XOR arbiter PUFs"
"""
SHUFFLE = True
COMPRESSION = True
RESTARTS = 200
K_RANGE = 2
K_MAX = 32
LOWERCASE_N = 32
UPPERCASE_N = 2000
S_RATIO = .033
ITERATIONS = 10
SEED_CHALLENGES = 0xf000
STAB_C = .95
STAB_ALL = .80
def experiments(self):
e = []
for i in range(self.RESTARTS):
for k in range(self.K_RANGE, self.K_MAX + 1, self.K_RANGE):
e.append(ExperimentMajorityVoteFindVotes(
progress_log_prefix=None,
parameters=Parameters(
n=self.LOWERCASE_N,
k=k,
challenge_count=self.UPPERCASE_N,
seed_instance=0xC0DEBA5E + i,
seed_instance_noise=0xdeadbeef + i,
transformation='id',
combiner='xor',
mu=0,
sigma=1,
sigma_noise_ratio=self.S_RATIO,
seed_challenges=self.SEED_CHALLENGES + i,
desired_stability=self.STAB_C,
overall_desired_stability=self.STAB_ALL,
minimum_vote_count=1,
iterations=self.ITERATIONS,
bias=None
)
))
return e
def plot(self):
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set(xscale='log', yscale='log')
ax.xaxis.set_major_locator(FixedLocator([2, 4, 6, 8, 12, 16, 20, 24, 28, 32]))
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.set_major_locator(FixedLocator([1, 2, 5, 10, 20, 50]))
ax.yaxis.set_major_formatter(ScalarFormatter())
r = self.experimenter.results[['k', 'vote_count']].groupby(['k']).mean().reset_index()
lineplot(
x='k', y='vote_count', data=r,
ax=ax, estimator=None, ci=None
)
scatterplot(
x='k', y='vote_count', data=r, ax=ax
)
fig = ax.get_figure()
fig.set_size_inches(6, 2.5)
ax.set_xlabel('number of arbiter chains in the MV XOR Arbiter PUF')
ax.set_ylabel('number of votes')
ax.set_title('Number of votes required for Pr[Stab(c)>95%] > 80%')
fig.savefig('figures/{}.pdf'.format(self.name()), bbox_inches='tight', pad_inches=0)
| gpl-3.0 |
NeuroDataDesign/pan-synapse | pipeline_1/code/tests/clusterTests.py | 1 | 2720 | import sys
sys.path.insert(0, '../functions/')
from epsilonDifference import epsilonDifference as floatEq
from cluster import Cluster
import epsilonDifference as epDiff
import matplotlib.pyplot as plt
import connectLib as cLib
import pickle
testData1 = [[3,3,3], [3,3,2], [3,3,4], [3,2,3], [3,4,3], [2,3,3], [4,3,3]]
testData2 = [[3,3,3], [3,3,4], [3,4,4], [3,4,5], [3,5,5], [4,5,5], [4,5,6]]
testData3 = [[0, 0, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]
testData4 = pickle.load(open('synthDat/exponDecayIndexList.synth', 'r'))
testData5 = pickle.load(open('synthDat/smallTwoGaussian.synth', 'r'))
print 'Cluster in cluster.py'
testCluster1 = Cluster(testData1)
testCluster2 = Cluster(testData2)
testCluster3 = Cluster(testData3)
testCluster4 = Cluster(testData4)
#test the centroid method
print '\tTest 1: ', testCluster1.getCentroid() == [3., 3., 3.],'\n\t\tExpected: [3, 3, 3]\tResult: ', testCluster1.getCentroid()
print '\tTest 2: ', epDiff.epsilonDifference(3.28571429, testCluster2.getCentroid()[0], .001) and epDiff.epsilonDifference(4.14285714, testCluster2.getCentroid()[1], .001) and epDiff.epsilonDifference(4.57142857, testCluster2.getCentroid()[2], .001),'\n\t\tExpected: [3.28571429, 4.14285714, 4.57142857]\tResult: ', testCluster2.getCentroid()
print '\tTest 3: ', testCluster3.getCentroid() == [0.5, 0.5, 0.5],'\n\t\tExpected: [0.5, 0.5, 0.5]\tResult: ', testCluster3.getCentroid()
#test the std distance method
print '\tTest 4: ', testCluster3.getStdDeviation() == 0,'\n\t\tExpected: 0\tResult: ', testCluster3.getStdDeviation()
print '\tTest 5: ', epDiff.epsilonDifference(testCluster1.getStdDeviation(), 0.3499271),'\n\t\tExpected: 0.3499271\tResult: ', testCluster1.getStdDeviation()
#test the getVolume method
print '\tTest 6: ', testCluster1.getVolume() == 7,'\n\t\tExpected: 7\tResult: ', testCluster1.getVolume()
print '\tTest 7: ', testCluster2.getVolume() == 7,'\n\t\tExpected: 7\tResult: ', testCluster2.getVolume()
print '\tTest 8: ', testCluster3.getVolume() == 8,'\n\t\tExpected: 8\tResult: ', testCluster3.getVolume()
#test the densityOfSlice method
#NOTE:slicing from 1 to remove background cluster
clusterList = cLib.connectedComponents(cLib.otsuVox(testData5))[1:]
test10 = cLib.densityOfSlice(clusterList, 0, 5, 0, 5, 0, 5)
print '\tTest 10: ', epDiff.epsilonDifference(test10, 2.22222222),'\n\t\tExpected: 2.22222\tResult: ', test10
test11 = cLib.densityOfSlice(clusterList, 0, 2, 0, 2, 0, 2)
print '\tTest 11: ', epDiff.epsilonDifference(test11, 17.3611),'\n\t\tExpected: 17.31111\tResult: ', test11
test12 = cLib.densityOfSlice(clusterList, 2, 3, 2, 3, 2, 3)
print '\tTest 12: ', test12 == 0.,'\n\t\tExpected: 0.\tResult: ', test12
| apache-2.0 |
tylerbrazier/archive | datamining/assign3Kmeans/kmeans.py | 1 | 3230 | #!/usr/bin/python2
# Not very optimized
import sys
import math
import random
import matplotlib.pyplot as plot
import numpy
def dist(pointA, pointB):
"pointA and pointB should be lists"
total = 0
for i in range(0, len(pointA)):
total += (pointA[i] - pointB[i])**2
return math.sqrt(total)
def findClosest(point, meanPoints):
'''
returns the index of the mean point in meanPoints that the
point argument is closest to.
'''
index = 0
shortestDist = dist(point, meanPoints[0])
for i in range(1, len(meanPoints)):
currentDist = dist(point, meanPoints[i])
if currentDist < shortestDist:
shortestDist = currentDist
index = i
return index
def findCentroid(points):
"argument is a list of lists; returns a list (point)"
totals = [0 for attr in points[0]] # holds total for each point attribute
for point in points:
for i in range(0, len(point)):
totals[i] += point[i]
centroid = []
for i in range(0, len(totals)):
centroid.append(totals[i] / len(points))
return centroid
''' old implementation
totalX = 0
totalY = 0
for point in points:
totalX += point.x
totalY += point.y
return Point( (totalX / len(points)), (totalY / len(points)) )
'''
filename = sys.argv[1]
k = int(sys.argv[2])
data = numpy.loadtxt(filename)
meanPoints = []
# find initial random means
maxAttrs = [0 for i in data[1]]
for point in data:
for i in range(0, len(point)):
if point[i] > maxAttrs[i]:
maxAttrs[i] = point[i]
for i in range(0, k):
randPoint = []
for maxAttr in maxAttrs:
randPoint.append(random.random() * maxAttr)
meanPoints.append(randPoint)
maxIterations = 20
epsilonThreshold = 0.00001
delta = 1
iterations = 0
while iterations < maxIterations and delta > epsilonThreshold:
delta = 0
# assign points to means
memberships = [ [] for i in range(0, k) ] # [ [], [] ] when k = 2
membersToPrint = [] # for the report of which points belong where
for point in data:
memberships[findClosest(point, meanPoints)].append(point)
membersToPrint.append(findClosest(point, meanPoints))
# update mean points
previousMeanPoints = meanPoints
meanPoints = []
for group in memberships:
if len(group) != 0:
meanPoints.append(findCentroid(group))
# calculate delta
for i in range(0, len(meanPoints)):
delta += dist(meanPoints[i], previousMeanPoints[i])
iterations += 1
# report
print "mean points :", meanPoints
for i in range(0, len(memberships)):
print "number of points in cluster", i, ":", len(memberships[i])
print "number of iterations :", iterations
print "delta :", delta
print "membership :", membersToPrint
# plot 2d data
if data.shape[1] == 2:
xs = []
ys = []
for point in data:
xs.append(point[0])
ys.append(point[1])
meanXs = []
meanYs = []
for point in meanPoints:
meanXs.append(point[0])
meanYs.append(point[1])
plot.plot(xs, ys, 'ro', meanXs, meanYs, 'bs')
plot.axis([0, round(max(xs)) + 1, 0, round(max(ys)) + 1])
plot.show()
| mit |
pbrod/scipy | scipy/spatial/_plotutils.py | 23 | 5505 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
return func(obj, ax=ax, **kw)
# As of matplotlib 2.0, the "hold" mechanism is deprecated.
# When matplotlib 1.x is no longer supported, this check can be removed.
was_held = ax.ishold()
if was_held:
return func(obj, ax=ax, **kw)
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
margin = 0.1 * points.ptp(axis=0)
xy_min = points.min(axis=0) - margin
xy_max = points.max(axis=0) + margin
ax.set_xlim(xy_min[0], xy_max[0])
ax.set_ylim(xy_min[1], xy_max[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
x, y = tri.points.T
ax.plot(x, y, 'o')
ax.triplot(x, y, tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = [hull.points[simplex] for simplex in hull.simplices]
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_points: bool, optional
Add the Voronoi points to the plot.
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
line_colors : string, optional
Specifies the line color for polygon boundaries
line_width : float, optional
Specifies the line width for polygon boundaries
line_alpha: float, optional
Specifies the line alpha for polygon boundaries
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
if kw.get('show_points', True):
ax.plot(vor.points[:,0], vor.points[:,1], '.')
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
line_colors = kw.get('line_colors', 'k')
line_width = kw.get('line_width', 1.0)
line_alpha = kw.get('line_alpha', 1.0)
center = vor.points.mean(axis=0)
ptp_bound = vor.points.ptp(axis=0)
finite_segments = []
infinite_segments = []
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
finite_segments.append(vor.vertices[simplex])
else:
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
infinite_segments.append([vor.vertices[i], far_point])
ax.add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='solid'))
ax.add_collection(LineCollection(infinite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
arabenjamin/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tests/test_generic.py | 2 | 53567 | # -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import nose
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (Index, Series, DataFrame, Panel,
isnull, notnull, date_range, period_range)
from pandas.core.index import Index, MultiIndex
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_almost_equal,
assert_equal,
ensure_clean)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
#------------------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape,int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name,None)
else:
arr = np.empty(shape,dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape/arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr,new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr,dtype=dtype,**kwargs)
def _compare(self, result, expected):
self._comparator(result,expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = { axis : list('ABCD') }
obj = self._construct(4,**kwargs)
# no values passed
#self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{ axis : str.lower })
expected = obj.copy()
setattr(expected,axis,list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = { }
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n,**kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n,value='empty',**kwargs)
self._compare(result,expected)
# get the bool data
arr = np.array([True,True,False,True])
o = self._construct(n,value=arr,**kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i,d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda : bool(obj == 0))
self.assertRaises(ValueError, lambda : bool(obj == 1))
self.assertRaises(ValueError, lambda : bool(obj))
obj = self._construct(shape=4,value=1)
self.assertRaises(ValueError, lambda : bool(obj == 0))
self.assertRaises(ValueError, lambda : bool(obj == 1))
self.assertRaises(ValueError, lambda : bool(obj))
obj = self._construct(shape=4,value=np.nan)
self.assertRaises(ValueError, lambda : bool(obj == 0))
self.assertRaises(ValueError, lambda : bool(obj == 1))
self.assertRaises(ValueError, lambda : bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda : bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4,value=1)
obj2 = self._construct(shape=4,value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda : obj1 and obj2)
self.assertRaises(ValueError, lambda : obj1 or obj2)
self.assertRaises(ValueError, lambda : not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min','max','max','var','std','prod','sum','cumsum','cumprod',
'median','skew','kurt','compound','cummax','cummin','all','any']:
f = getattr(np,op,None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A","datetime64[h]"), ("B","str"), ("C","int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x,m,None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y,m,None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in [ '__add__','__sub__','__truediv__','__mul__' ]:
result = getattr(o,op)(1)
self.check_metadata(o,result)
# ops with like
for op in [ '__add__','__sub__','__truediv__','__mul__' ]:
try:
result = getattr(o,op)(o)
self.check_metadata(o,result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in [ '__eq__','__le__', '__ge__' ]:
v1 = getattr(o,op)(o)
self.check_metadata(o,v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o,result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in [ '__eq__','__le__', '__ge__' ]:
# this is a name matching op
v1 = getattr(o,op)(o)
v2 = getattr(o,op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [ tm.makeFloatIndex, tm.makeIntIndex,
tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
axis = o._get_axis_name(0)
setattr(o,axis,index(len(getattr(o,axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[:])
self._compare(o.tail(0), o.iloc[0:])
# bounded
self._compare(o.head(len(o)+1), o)
self._compare(o.tail(len(o)+1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10**len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o,5)) == 5)
self.assertTrue(len(np.array_split(o,2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x,y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11,21,31],
index=MultiIndex.from_tuples([("A",x) for x in ["a","B","c"]]))
result = s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1,2,3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1,'2',3.])
result = o._get_numeric_data()
expected = Series([],dtype=object)
self._compare(result, expected)
o = Series([True,False,True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True,False,True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101',periods=3))
result = o._get_numeric_data()
expected = Series([],dtype='M8[ns]')
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [ Series([np.nan]), Series([pd.NaT]), Series([True]), Series([False]) ]:
self.assertRaises(ValueError, lambda : bool(s))
for s in [ Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda : s.bool())
# multiple bool are still an error
for s in [Series([True,True]), Series([False, False])]:
self.assertRaises(ValueError, lambda : bool(s))
self.assertRaises(ValueError, lambda : s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]),
Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda : bool(s))
self.assertRaises(ValueError, lambda : s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3),range(3))
o.name = 'foo'
o2 = Series(range(3),range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o,result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101',periods=1000,freq='s'),
name='foo')
result = ts.resample('1T')
self.check_metadata(ts,result)
result = ts.resample('1T',how='min')
self.check_metadata(ts,result)
result = ts.resample('1T',how=lambda x: x.sum())
self.check_metadata(ts,result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name','filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([ getattr(o,name) for o in other.objs if getattr(o,name,None) ])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename,'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.], index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
def test_describe_percentiles(self):
with tm.assert_produces_warning(FutureWarning):
desc = self.series.describe(percentile_width=50)
assert '75%' in desc.index
assert '25%' in desc.index
with tm.assert_produces_warning(FutureWarning):
desc = self.series.describe(percentile_width=95)
assert '97.5%' in desc.index
assert '2.5%' in desc.index
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
assert_series_equal(noneSeries.describe(),
Series([0, 0], index=['count', 'unique']))
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x,y)
def test_rename_mi(self):
df = DataFrame([11,21,31],
index=MultiIndex.from_tuples([("A",x) for x in ["a","B","c"]]))
result = df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda : df.bool())
self.assertRaises(ValueError, lambda : bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5], 'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.], 'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5], 'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3,'A'] = 3
expected.loc[5,'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5], 'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.], 'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5], 'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.])
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2,'A'] = 3
expected.ix[5,'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
result = df.interpolate(method='pchip')
expected.ix[2,'A'] = 3
expected.ix[5,'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3,1] = 5
expected.loc[0,2] = 3
expected.loc[1,3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7], 'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array([1, 2, 3, 4], dtype='float64'),
'B': np.array([1, 2, 3, 4], dtype='int64'),
'C': np.array([1., 2., 3, 4.], dtype='float64'),
'D': np.array([1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
desc = tm.makeDataFrame().describe()
desc = tm.makeMixedDataFrame().describe()
desc = tm.makeTimeDataFrame().describe()
def test_describe_percentiles(self):
with tm.assert_produces_warning(FutureWarning):
desc = tm.makeDataFrame().describe(percentile_width=50)
assert '75%' in desc.index
assert '25%' in desc.index
with tm.assert_produces_warning(FutureWarning):
desc = tm.makeDataFrame().describe(percentile_width=95)
assert '97.5%' in desc.index
assert '2.5%' in desc.index
def test_describe_quantiles_both(self):
with tm.assertRaises(ValueError):
tm.makeDataFrame().describe(percentile_width=50,
percentiles=[25, 75])
def test_describe_percentiles_percent_or_raw(self):
df = tm.makeDataFrame()
with tm.assertRaises(ValueError):
df.describe(percentiles=[10, 50, 100])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD',]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC',1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal( descD.ts, df.ts.describe())
desc = df.describe(include=['object','number', 'datetime'])
assert_frame_equal(desc.loc[:,["numC","numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:,["catA","catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:,"ts":].dropna().sort_index(), descDs)
desc = df.loc[:,'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:,'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles = [], include='all')
cnt = Series(data=[4,4,6,6,6], index=['catA','catB','numC','numD','ts'])
assert_series_equal( desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles = [], include='all')
assert_series_equal( desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC","numD"], 1).describe(percentiles = [], include='all')
assert_series_equal( desc.count(), cnt.drop(["numC","numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24)%20,"D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta("8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22, 3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df,result)
# resample
df = DataFrame(np.random.randn(1000,2),
index=date_range('20130101',periods=1000,freq='s'))
result = df.resample('1T')
self.check_metadata(df,result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right, name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
self.assertEqual(result.filename,'fname1.csv|fname2.csv')
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([ getattr(o,name) for o in other.objs if getattr(o,name,None) ])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
self.assertEqual(result.filename,'foo+foo')
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_tz_convert_and_localize(self):
l0 = date_range('20140701', periods=5, freq='D')
# TODO: l1 should be a PeriodIndex for testing
# after GH2106 is addressed
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_convert('UTC')
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_localize('UTC')
# l1 = period_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
for fn in ['tz_localize', 'tz_convert']:
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
self.assertTrue(df1.index.equals(l0_expected))
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5),
MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
self.assertFalse(df3.index.levels[0].equals(l0))
self.assertTrue(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1))
self.assertFalse(df3.index.levels[1].equals(l1_expected))
df3 = getattr(df2, fn)('US/Pacific', level=1)
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df5 = getattr(df4, fn)('US/Pacific', level=1)
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
# Bad Inputs
for fn in ['tz_localize', 'tz_convert']:
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with tm.assertRaisesRegexp(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x':[1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert_equal(df.y, 5)
assert_series_equal(df['y'], Series([2, 4, 6]))
class TestPanel(tm.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y)
class TestNDFrame(tm.TestCase):
# tests that don't fit elsewhere
def test_squeeze(self):
# noop
for s in [ tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries() ]:
tm.assert_series_equal(s.squeeze(),s)
for df in [ tm.makeTimeDataFrame() ]:
tm.assert_frame_equal(df.squeeze(),df)
for p in [ tm.makePanel() ]:
tm.assert_panel_equal(p.squeeze(),p)
for p4d in [ tm.makePanel4D() ]:
tm.assert_panel4d_equal(p4d.squeeze(),p4d)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(),df['A'])
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(),p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'],minor_axis=['A'])
tm.assert_series_equal(p.squeeze(),p.ix['ItemA',:,'A'])
p4d = tm.makePanel4D().reindex(labels=['label1'])
tm.assert_panel_equal(p4d.squeeze(),p4d['label1'])
p4d = tm.makePanel4D().reindex(labels=['label1'],items=['ItemA'])
tm.assert_frame_equal(p4d.squeeze(),p4d.ix['label1','ItemA'])
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
s1[1] = 99
self.assertFalse(s1.equals(s2))
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
s2[0] = 9.9
self.assertFalse(s1.equals(s2))
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(np.random.random(10,), index=index, columns=['floats'])
df1['text'] = 'the sky is so blue. we could use more chocolate.'.split()
df1['start'] = date_range('2000-1-1', periods=10, freq='T')
df1['end'] = date_range('2000-1-1', periods=10, freq='D')
df1['diff'] = df1['end'] - df1['start']
df1['bool'] = (np.arange(10) % 3 == 0)
df1.ix[::2] = nan
df2 = df1.copy()
self.assertTrue(df1['text'].equals(df2['text']))
self.assertTrue(df1['start'].equals(df2['start']))
self.assertTrue(df1['end'].equals(df2['end']))
self.assertTrue(df1['diff'].equals(df2['diff']))
self.assertTrue(df1['bool'].equals(df2['bool']))
self.assertTrue(df1.equals(df2))
self.assertFalse(df1.equals(object))
# different dtype
different = df1.copy()
different['floats'] = different['floats'].astype('float32')
self.assertFalse(df1.equals(different))
# different index
different_index = -index
different = df2.set_index(different_index)
self.assertFalse(df1.equals(different))
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
self.assertFalse(df1.equals(different))
# DatetimeIndex
index = pd.date_range('2000-1-1', periods=10, freq='T')
df1 = df1.set_index(index)
df2 = df1.copy()
self.assertTrue(df1.equals(df2))
# MultiIndex
df3 = df1.set_index(['text'], append=True)
df2 = df1.set_index(['text'], append=True)
self.assertTrue(df3.equals(df2))
df2 = df1.set_index(['floats'], append=True)
self.assertFalse(df3.equals(df2))
# NaN in index
df3 = df1.set_index(['floats'], append=True)
df2 = df1.set_index(['floats'], append=True)
self.assertTrue(df3.equals(df2))
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2))
d = pd.Series(index=range(2))
e = pd.Series(index=range(2))
f = pd.Series(index=range(2))
c[:-1] = d[:-1] = e[0] = f[0] = False
self.assertTrue(a.equals(a))
self.assertTrue(a.equals(b))
self.assertTrue(a.equals(c))
self.assertTrue(a.equals(d))
self.assertFalse(a.equals(e))
self.assertTrue(e.equals(f))
def test_describe_raises(self):
with tm.assertRaises(NotImplementedError):
tm.makePanel().describe()
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
Nacturne/CoreNLP_copy | python_tools/lexical/stats_gen.py | 1 | 1460 | from Core import TreeClass as tc
import pandas as pd
file_original = open('input/test.txt', 'r')
file_predicted = open('input/full_hid25_re0-0007-test.txt', 'r')
original_temp = []
predicted_temp = []
for line in file_original:
original_tree = tc.ScoreTree(line)
for node in original_tree.allNodes():
if not node.isLeaf():
data_entry = [] # will be populated with ['score', 'l_child_score', 'r_child_score', 'phrase_length']
data_entry.append(int(node.label))
data_entry.append(int(node.children[0].label))
data_entry.append(int(node.children[1].label))
data_entry.append(node.num_phrases())
original_temp.append(data_entry)
for line in file_predicted:
predicted_tree = tc.ScoreTree(line)
for node in predicted_tree.allNodes():
if not node.isLeaf():
data_entry = [] # will be populated with ['pred_score', 'pred_phrase_length']
data_entry.append(int(node.label))
data_entry.append(node.num_phrases())
predicted_temp.append(data_entry)
original_frame = pd.DataFrame(original_temp, columns=['score', 'l_child_score', 'r_child_score', 'phrase_length'])
predicted_frame = pd.DataFrame(predicted_temp, columns=['pred_score', 'pred_phrase_length'])
result = pd.concat([original_frame, predicted_frame], axis=1)
result.to_csv('output/stats.txt',sep='\t')
file_original.close()
file_predicted.close() | gpl-2.0 |
cactusbin/nyt | matplotlib/examples/pylab_examples/line_collection2.py | 9 | 1327 | from pylab import *
from matplotlib.collections import LineCollection
# In order to efficiently plot many lines in a single set of axes,
# Matplotlib has the ability to add the lines all at once. Here is a
# simple example showing how it is done.
N = 50
x = arange(N)
# Here are many sets of y to plot vs x
ys = [x+i for i in x]
# We need to set the plot limits, they will not autoscale
ax = axes()
ax.set_xlim((amin(x),amax(x)))
ax.set_ylim((amin(amin(ys)),amax(amax(ys))))
# colors is sequence of rgba tuples
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq)
# where onoffseq is an even length tuple of on and off ink in points.
# If linestyle is omitted, 'solid' is used
# See matplotlib.collections.LineCollection for more information
line_segments = LineCollection([list(zip(x,y)) for y in ys], # Make a sequence of x,y pairs
linewidths = (0.5,1,1.5,2),
linestyles = 'solid')
line_segments.set_array(x)
ax.add_collection(line_segments)
fig = gcf()
axcb = fig.colorbar(line_segments)
axcb.set_label('Line Number')
ax.set_title('Line Collection with mapped colors')
sci(line_segments) # This allows interactive changing of the colormap.
show()
| unlicense |
wilsonkichoi/zipline | zipline/finance/trading.py | 3 | 18887 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import logbook
import datetime
import pandas as pd
import numpy as np
from six import string_types
from sqlalchemy import create_engine
from zipline.assets import AssetDBWriter, AssetFinder
from zipline.data.loader import load_market_data
from zipline.utils import tradingcalendar
from zipline.errors import (
NoFurtherDataError
)
from zipline.utils.memoize import remember_last, lazyval
log = logbook.Logger('Trading')
class TradingEnvironment(object):
"""
The financial simulations in zipline depend on information
about the benchmark index and the risk free rates of return.
The benchmark index defines the benchmark returns used in
the calculation of performance metrics such as alpha/beta. Many
components, including risk, performance, transforms, and
batch_transforms, need access to a calendar of trading days and
market hours. The TradingEnvironment maintains two time keeping
facilities:
- a DatetimeIndex of trading days for calendar calculations
- a timezone name, which should be local to the exchange
hosting the benchmark index. All dates are normalized to UTC
for serialization and storage, and the timezone is used to
ensure proper rollover through daylight savings and so on.
User code will not normally need to use TradingEnvironment
directly. If you are extending zipline's core financial
components and need to use the environment, you must import the module and
build a new TradingEnvironment object, then pass that TradingEnvironment as
the 'env' arg to your TradingAlgorithm.
Parameters
----------
load : callable, optional
The function that returns benchmark returns and treasury curves.
The treasury curves are expected to be a DataFrame with an index of
dates and columns of the curve names, e.g. '10year', '1month', etc.
bm_symbol : str, optional
The benchmark symbol
exchange_tz : tz-coercable, optional
The timezone of the exchange.
min_date : datetime, optional
The oldest date that we know about in this environment.
max_date : datetime, optional
The most recent date that we know about in this environment.
env_trading_calendar : pd.DatetimeIndex, optional
The calendar of datetimes that define our market hours.
asset_db_path : str or sa.engine.Engine, optional
The path to the assets db or sqlalchemy Engine object to use to
construct an AssetFinder.
"""
# Token used as a substitute for pickling objects that contain a
# reference to a TradingEnvironment
PERSISTENT_TOKEN = "<TradingEnvironment>"
def __init__(self,
load=None,
bm_symbol='^GSPC',
exchange_tz="US/Eastern",
min_date=None,
max_date=None,
env_trading_calendar=tradingcalendar,
asset_db_path=':memory:'):
self.trading_day = env_trading_calendar.trading_day.copy()
# `tc_td` is short for "trading calendar trading days"
tc_td = env_trading_calendar.trading_days
self.trading_days = tc_td[tc_td.slice_indexer(min_date, max_date)]
self.first_trading_day = self.trading_days[0]
self.last_trading_day = self.trading_days[-1]
self.early_closes = env_trading_calendar.get_early_closes(
self.first_trading_day, self.last_trading_day)
self.open_and_closes = env_trading_calendar.open_and_closes.loc[
self.trading_days]
self.bm_symbol = bm_symbol
if not load:
load = load_market_data
self.benchmark_returns, self.treasury_curves = \
load(self.trading_day, self.trading_days, self.bm_symbol)
if max_date:
tr_c = self.treasury_curves
# Mask the treasury curves down to the current date.
# In the case of live trading, the last date in the treasury
# curves would be the day before the date considered to be
# 'today'.
self.treasury_curves = tr_c[tr_c.index <= max_date]
self.exchange_tz = exchange_tz
if isinstance(asset_db_path, string_types):
asset_db_path = 'sqlite:///%s' % asset_db_path
self.engine = engine = create_engine(asset_db_path)
else:
self.engine = engine = asset_db_path
if engine is not None:
AssetDBWriter(engine).init_db()
self.asset_finder = AssetFinder(engine)
else:
self.asset_finder = None
@lazyval
def market_minutes(self):
return self.minutes_for_days_in_range(self.first_trading_day,
self.last_trading_day)
def write_data(self, **kwargs):
"""Write data into the asset_db.
Parameters
----------
**kwargs
Forwarded to AssetDBWriter.write
"""
AssetDBWriter(self.engine).write(**kwargs)
def normalize_date(self, test_date):
test_date = pd.Timestamp(test_date, tz='UTC')
return pd.tseries.tools.normalize_date(test_date)
def utc_dt_in_exchange(self, dt):
return pd.Timestamp(dt).tz_convert(self.exchange_tz)
def exchange_dt_in_utc(self, dt):
return pd.Timestamp(dt, tz=self.exchange_tz).tz_convert('UTC')
def is_market_hours(self, test_date):
if not self.is_trading_day(test_date):
return False
mkt_open, mkt_close = self.get_open_and_close(test_date)
return test_date >= mkt_open and test_date <= mkt_close
def is_trading_day(self, test_date):
dt = self.normalize_date(test_date)
return (dt in self.trading_days)
def next_trading_day(self, test_date):
dt = self.normalize_date(test_date)
delta = datetime.timedelta(days=1)
while dt <= self.last_trading_day:
dt += delta
if dt in self.trading_days:
return dt
return None
def previous_trading_day(self, test_date):
dt = self.normalize_date(test_date)
delta = datetime.timedelta(days=-1)
while self.first_trading_day < dt:
dt += delta
if dt in self.trading_days:
return dt
return None
def add_trading_days(self, n, date):
"""
Adds n trading days to date. If this would fall outside of the
trading calendar, a NoFurtherDataError is raised.
:Arguments:
n : int
The number of days to add to date, this can be positive or
negative.
date : datetime
The date to add to.
:Returns:
new_date : datetime
n trading days added to date.
"""
if n == 1:
return self.next_trading_day(date)
if n == -1:
return self.previous_trading_day(date)
idx = self.get_index(date) + n
if idx < 0 or idx >= len(self.trading_days):
raise NoFurtherDataError(
msg='Cannot add %d days to %s' % (n, date)
)
return self.trading_days[idx]
def days_in_range(self, start, end):
start_date = self.normalize_date(start)
end_date = self.normalize_date(end)
mask = ((self.trading_days >= start_date) &
(self.trading_days <= end_date))
return self.trading_days[mask]
def opens_in_range(self, start, end):
return self.open_and_closes.market_open.loc[start:end]
def closes_in_range(self, start, end):
return self.open_and_closes.market_close.loc[start:end]
def minutes_for_days_in_range(self, start, end):
"""
Get all market minutes for the days between start and end, inclusive.
"""
start_date = self.normalize_date(start)
end_date = self.normalize_date(end)
o_and_c = self.open_and_closes[
self.open_and_closes.index.slice_indexer(start_date, end_date)]
opens = o_and_c.market_open
closes = o_and_c.market_close
one_min = pd.Timedelta(1, unit='m')
all_minutes = []
for i in range(0, len(o_and_c.index)):
market_open = opens[i]
market_close = closes[i]
day_minutes = np.arange(market_open, market_close + one_min,
dtype='datetime64[m]')
all_minutes.append(day_minutes)
# Concatenate all minutes and truncate minutes before start/after end.
return pd.DatetimeIndex(
np.concatenate(all_minutes), copy=False, tz='UTC',
)
def next_open_and_close(self, start_date):
"""
Given the start_date, returns the next open and close of
the market.
"""
next_open = self.next_trading_day(start_date)
if next_open is None:
raise NoFurtherDataError(
msg=("Attempt to backtest beyond available history. "
"Last known date: %s" % self.last_trading_day)
)
return self.get_open_and_close(next_open)
def previous_open_and_close(self, start_date):
"""
Given the start_date, returns the previous open and close of the
market.
"""
previous = self.previous_trading_day(start_date)
if previous is None:
raise NoFurtherDataError(
msg=("Attempt to backtest beyond available history. "
"First known date: %s" % self.first_trading_day)
)
return self.get_open_and_close(previous)
def next_market_minute(self, start):
"""
Get the next market minute after @start. This is either the immediate
next minute, the open of the same day if @start is before the market
open on a trading day, or the open of the next market day after @start.
"""
if self.is_trading_day(start):
market_open, market_close = self.get_open_and_close(start)
# If start before market open on a trading day, return market open.
if start < market_open:
return market_open
# If start is during trading hours, then get the next minute.
elif start < market_close:
return start + datetime.timedelta(minutes=1)
# If start is not in a trading day, or is after the market close
# then return the open of the *next* trading day.
return self.next_open_and_close(start)[0]
@remember_last
def previous_market_minute(self, start):
"""
Get the next market minute before @start. This is either the immediate
previous minute, the close of the same day if @start is after the close
on a trading day, or the close of the market day before @start.
"""
if self.is_trading_day(start):
market_open, market_close = self.get_open_and_close(start)
# If start after the market close, return market close.
if start > market_close:
return market_close
# If start is during trading hours, then get previous minute.
if start > market_open:
return start - datetime.timedelta(minutes=1)
# If start is not a trading day, or is before the market open
# then return the close of the *previous* trading day.
return self.previous_open_and_close(start)[1]
def get_open_and_close(self, day):
index = self.open_and_closes.index.get_loc(day.date())
todays_minutes = self.open_and_closes.iloc[index]
return todays_minutes[0], todays_minutes[1]
def market_minutes_for_day(self, stamp):
market_open, market_close = self.get_open_and_close(stamp)
return pd.date_range(market_open, market_close, freq='T')
def open_close_window(self, start, count, offset=0, step=1):
"""
Return a DataFrame containing `count` market opens and closes,
beginning with `start` + `offset` days and continuing `step` minutes at
a time.
"""
# TODO: Correctly handle end of data.
start_idx = self.get_index(start) + offset
stop_idx = start_idx + (count * step)
index = np.arange(start_idx, stop_idx, step)
return self.open_and_closes.iloc[index]
def market_minute_window(self, start, count, step=1):
"""
Return a DatetimeIndex containing `count` market minutes, starting with
`start` and continuing `step` minutes at a time.
"""
if not self.is_market_hours(start):
raise ValueError("market_minute_window starting at "
"non-market time {minute}".format(minute=start))
all_minutes = []
current_day_minutes = self.market_minutes_for_day(start)
first_minute_idx = current_day_minutes.searchsorted(start)
minutes_in_range = current_day_minutes[first_minute_idx::step]
# Build up list of lists of days' market minutes until we have count
# minutes stored altogether.
while True:
if len(minutes_in_range) >= count:
# Truncate off extra minutes
minutes_in_range = minutes_in_range[:count]
all_minutes.append(minutes_in_range)
count -= len(minutes_in_range)
if count <= 0:
break
if step > 0:
start, _ = self.next_open_and_close(start)
current_day_minutes = self.market_minutes_for_day(start)
else:
_, start = self.previous_open_and_close(start)
current_day_minutes = self.market_minutes_for_day(start)
minutes_in_range = current_day_minutes[::step]
# Concatenate all the accumulated minutes.
return pd.DatetimeIndex(
np.concatenate(all_minutes), copy=False, tz='UTC',
)
def trading_day_distance(self, first_date, second_date):
first_date = self.normalize_date(first_date)
second_date = self.normalize_date(second_date)
# TODO: May be able to replace the following with searchsorted.
# Find leftmost item greater than or equal to day
i = bisect.bisect_left(self.trading_days, first_date)
if i == len(self.trading_days): # nothing found
return None
j = bisect.bisect_left(self.trading_days, second_date)
if j == len(self.trading_days):
return None
return j - i
def get_index(self, dt):
"""
Return the index of the given @dt, or the index of the preceding
trading day if the given dt is not in the trading calendar.
"""
ndt = self.normalize_date(dt)
if ndt in self.trading_days:
return self.trading_days.searchsorted(ndt)
else:
return self.trading_days.searchsorted(ndt) - 1
class SimulationParameters(object):
def __init__(self, period_start, period_end,
capital_base=10e3,
emission_rate='daily',
data_frequency='daily',
env=None,
arena='backtest'):
self.period_start = period_start
self.period_end = period_end
self.capital_base = capital_base
self.emission_rate = emission_rate
self.data_frequency = data_frequency
# copied to algorithm's environment for runtime access
self.arena = arena
if env is not None:
self.update_internal_from_env(env=env)
def update_internal_from_env(self, env):
assert self.period_start <= self.period_end, \
"Period start falls after period end."
assert self.period_start <= env.last_trading_day, \
"Period start falls after the last known trading day."
assert self.period_end >= env.first_trading_day, \
"Period end falls before the first known trading day."
self.first_open = self._calculate_first_open(env)
self.last_close = self._calculate_last_close(env)
start_index = env.get_index(self.first_open)
end_index = env.get_index(self.last_close)
# take an inclusive slice of the environment's
# trading_days.
self.trading_days = env.trading_days[start_index:end_index + 1]
def _calculate_first_open(self, env):
"""
Finds the first trading day on or after self.period_start.
"""
first_open = self.period_start
one_day = datetime.timedelta(days=1)
while not env.is_trading_day(first_open):
first_open = first_open + one_day
mkt_open, _ = env.get_open_and_close(first_open)
return mkt_open
def _calculate_last_close(self, env):
"""
Finds the last trading day on or before self.period_end
"""
last_close = self.period_end
one_day = datetime.timedelta(days=1)
while not env.is_trading_day(last_close):
last_close = last_close - one_day
_, mkt_close = env.get_open_and_close(last_close)
return mkt_close
@property
def days_in_period(self):
"""return the number of trading days within the period [start, end)"""
return len(self.trading_days)
def __repr__(self):
return """
{class_name}(
period_start={period_start},
period_end={period_end},
capital_base={capital_base},
data_frequency={data_frequency},
emission_rate={emission_rate},
first_open={first_open},
last_close={last_close})\
""".format(class_name=self.__class__.__name__,
period_start=self.period_start,
period_end=self.period_end,
capital_base=self.capital_base,
data_frequency=self.data_frequency,
emission_rate=self.emission_rate,
first_open=self.first_open,
last_close=self.last_close)
def noop_load(*args, **kwargs):
"""
A method that can be substituted in as the load method in a
TradingEnvironment to prevent it from loading benchmarks.
Accepts any arguments, but returns only a tuple of Nones regardless
of input.
"""
return None, None
| apache-2.0 |
gtfierro/backchannel | readtopo.py | 1 | 3711 | import yaml
import networkx as nx
import matplotlib.pyplot as plt
from collections import defaultdict
import sys
class Topo:
def __init__(self, yaml_string):
self.raw = yaml.load(yaml_string)
self.G = nx.Graph()
self.hops = defaultdict(list)
self.hops_edges = defaultdict(list)
if 'root' not in self.raw.iterkeys():
print 'Graph has no root!'
sys.exit(1)
self.root = str(self.raw.pop('root'))
self.G.add_node(self.root)
for node in self.raw.iterkeys():
self.G.add_node(str(node))
for node, edges in self.raw.iteritems():
for edge in edges:
self.G.add_edge(str(node), str(edge))
edges = list(nx.traversal.bfs_edges(self.G, self.root))
for edge in edges:
if edge[0] == self.root: # edge[1] is single-hop
self.hops[1].append(edge[1])
self.hops_edges[1].append(edge)
continue
for i in range(1, len(edges)+1): # worst case scenario
if edge[0] in self.hops[i]:
self.hops[i+1].append(edge[1])
self.hops_edges[1+1].append(edge)
continue
print self.hops
def draw(self):
# node attrs
node_size=1600
# 1-hop, 2-hop etc
root_color = 'red'
node_tiers = ['blue','green','yellow']
node_color='blue'
node_alpha=0.3
node_text_size=12
# edge attrs
edge_color='black'
edge_alpha=0.3
edge_tickness=1
edge_text_pos=0.3
f = plt.figure()
graph_pos = nx.shell_layout(self.G)
# draw graph
nx.draw_networkx_nodes(self.G, graph_pos, nodelist=[self.root], alpha=node_alpha, node_color=root_color)
for hop, nodes in self.hops.iteritems():
if len(nodes) == 0: continue
print hop
nx.draw_networkx_nodes(self.G, graph_pos, nodelist=nodes,
alpha=node_alpha, node_color=node_tiers[hop-1])
nx.draw_networkx_edges(self.G,graph_pos, edgelist=self.hops_edges[hop],
width=edge_tickness, alpha=edge_alpha, edge_color=edge_color)
nx.draw_networkx_labels(self.G, graph_pos,font_size=node_text_size)
#print "Drawing..."
#f.savefig("graph.png")
#plt.show()
def generate_ignore_block(self, node, ignored):
def _ignore_neighbor(neighbor, OID="::212:6d02:0:"):
return 'storm.os.ignoreNeighbor("{0}{1}")'.format(OID, neighbor)
code = ''
if len(ignored) > 0:
code += 'if (storm.os.nodeid() == {0}) then\n\t'.format(int(node, 16))
code += '\n\t'.join(map(_ignore_neighbor, ignored))
code += '\nend'
return code
def to_code(self):
edges = list(nx.traversal.bfs_edges(self.G, self.root))
node_set = set(self.G.nodes())
ignoreblocks = []
for node in self.G.nodes():
allowed_neighbors = set(self.G[node].keys())
allowed_neighbors.add(node) # add yourself
ignore_these = node_set.difference(allowed_neighbors)
ignoreblocks.append(self.generate_ignore_block(node, ignore_these))
framework = """sh = require "stormsh"
sh.start()
{0}
cord.enter_loop()
"""
code = framework.format('\n'.join(ignoreblocks))
with open('./main.lua', 'w') as f:
f.write(code)
if __name__ == '__main__':
filename = sys.argv[1]
print 'Loading topology from {0}'.format(filename)
topo = Topo(open(filename).read())
topo.draw()
topo.to_code()
| apache-2.0 |
jgliss/pydoas | docs/conf.py | 1 | 10922 | # -*- coding: utf-8 -*-
#
# PyDOAS documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 12 14:39:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import matplotlib
# This was inserted based on this blog: https://github.com/spinus/sphinxcontrib-images/issues/41, after the following build error occured: Could not import extension sphinxcontrib.images (exception: cannot import name make_admonition), apparently due to a compatibility error between an updated version of sphinx (1.6) and the extension sphinxcontrib.images
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx.util import compat
compat.make_admonition = BaseAdmonition
matplotlib.use('agg')
with open(os.path.join("..", "VERSION")) as f:
__version__ = f.readline()
f.close()
sys.path.insert(0, os.path.abspath('../'))
MOCK_MODULES = [
'numpy',
'pandas'
'matplotlib'
]
#sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#extensions = [
# 'sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.todo',
# 'sphinx.ext.pngmath',
# 'sphinx.ext.ifconfig',
# 'sphinx.ext.viewcode',
#]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pydoas'
copyright = '2016, Jonas Gliss'
author = 'Jonas Gliss'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
print("LIB VERSION %s" %__version__)
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pydoasdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pydoas.tex', 'pydoas Documentation',
'Jonas Gliss', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pydoas', 'pydoas Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pydoas', 'pydoas Documentation',
author, 'pydoas', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
autodoc_member_order = 'bysource'
images_config = {
'default_image_width' : '300px',
'default_group' : 'default'
}
| bsd-3-clause |
DGrady/pandas | pandas/tests/io/parser/usecols.py | 11 | 18059 | # -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas._libs.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("'usecols' must either be all strings, all unicode, "
"all integers or a callable")
usecols = [0, 'b', 2]
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
assert len(result.columns) == 2
assert (result['b'] == exp['b']).all()
assert (result['c'] == exp['c']).all()
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
pytest.raises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_usecols_index_col_conflict(self):
# see gh-4201: test that index_col as integer reflects usecols
data = 'a,b,c,d\nA,a,1,one\nB,b,2,two'
expected = DataFrame({'c': [1, 2]}, index=Index(
['a', 'b'], name='b'))
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'b': ['a', 'b'], 'c': [1, 2], 'd': ('one', 'two')})
expected = expected.set_index(['b', 'c'])
df = self.read_csv(StringIO(data), usecols=['b', 'c', 'd'],
index_col=['b', 'c'])
tm.assert_frame_equal(expected, df)
def test_usecols_implicit_index_col(self):
# see gh-2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# see gh-2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep=r'\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_integer_like_header(self):
data = """2,0,1
1000,2000,3000
4000,5000,6000
"""
usecols = [0, 1] # column selection by index
expected = DataFrame(data=[[1000, 2000],
[4000, 5000]],
columns=['2', '0'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['0', '1'] # column selection by name
expected = DataFrame(data=[[2000, 3000],
[5000, 6000]],
columns=['0', '1'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates(self):
# See gh-9755
s = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
# See gh-13604
s = """2008-02-07 09:40,1032.43
2008-02-07 09:50,1042.54
2008-02-07 10:00,1051.65
"""
parse_dates = [0]
names = ['date', 'values']
usecols = names[:]
index = Index([Timestamp('2008-02-07 09:40'),
Timestamp('2008-02-07 09:50'),
Timestamp('2008-02-07 10:00')],
name='date')
cols = {'values': [1032.43, 1042.54, 1051.65]}
expected = DataFrame(cols, index=index)
df = self.read_csv(StringIO(s), parse_dates=parse_dates, index_col=0,
usecols=usecols, header=None, names=names)
tm.assert_frame_equal(df, expected)
# See gh-14792
s = """a,b,c,d,e,f,g,h,i,j
2016/09/21,1,1,2,3,4,5,6,7,8"""
parse_dates = [0]
usecols = list('abcdefghij')
cols = {'a': Timestamp('2016-09-21'),
'b': [1], 'c': [1], 'd': [2],
'e': [3], 'f': [4], 'g': [5],
'h': [6], 'i': [7], 'j': [8]}
expected = DataFrame(cols, columns=usecols)
df = self.read_csv(StringIO(s), usecols=usecols,
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
s = """a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8"""
parse_dates = [[0, 1]]
usecols = list('abcdefghij')
cols = {'a_b': '2016/09/21 1',
'c': [1], 'd': [2], 'e': [3], 'f': [4],
'g': [5], 'h': [6], 'i': [7], 'j': [8]}
expected = DataFrame(cols, columns=['a_b'] + list('cdefghij'))
df = self.read_csv(StringIO(s), usecols=usecols,
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates_and_full_names(self):
# See gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
names = list('abcde')
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), names=names,
usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), names=names,
usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates_and_usecol_names(self):
# See gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
names = list('acd')
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), names=names,
usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), names=names,
usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_unicode_strings(self):
# see gh-13219
s = '''AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'AAA': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'BBB': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=[u'AAA', u'BBB'])
tm.assert_frame_equal(df, expected)
def test_usecols_with_single_byte_unicode_strings(self):
# see gh-13219
s = '''A,B,C,D
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'A': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'B': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=[u'A', u'B'])
tm.assert_frame_equal(df, expected)
def test_usecols_with_mixed_encoding_strings(self):
s = '''AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
msg = ("'usecols' must either be all strings, all unicode, "
"all integers or a callable")
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(s), usecols=[u'AAA', b'BBB'])
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(s), usecols=[b'AAA', u'BBB'])
def test_usecols_with_multibyte_characters(self):
s = '''あああ,いい,ううう,ええええ
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'あああ': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'いい': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=['あああ', 'いい'])
tm.assert_frame_equal(df, expected)
def test_usecols_with_multibyte_unicode_characters(self):
pytest.skip('TODO: see gh-13253')
s = '''あああ,いい,ううう,ええええ
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'あああ': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'いい': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=[u'あああ', u'いい'])
tm.assert_frame_equal(df, expected)
def test_empty_usecols(self):
# should not raise
data = 'a,b,c\n1,2,3\n4,5,6'
expected = DataFrame()
result = self.read_csv(StringIO(data), usecols=set([]))
tm.assert_frame_equal(result, expected)
def test_np_array_usecols(self):
# See gh-12546
data = 'a,b,c\n1,2,3'
usecols = np.array(['a', 'b'])
expected = DataFrame([[1, 2]], columns=usecols)
result = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
def test_callable_usecols(self):
# See gh-14154
s = '''AaA,bBb,CCC,ddd
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'AaA': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'bBb': {0: 8, 1: 2, 2: 7},
'ddd': {0: 'a', 1: 'b', 2: 'a'}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=lambda x:
x.upper() in ['AAA', 'BBB', 'DDD'])
tm.assert_frame_equal(df, expected)
# Check that a callable returning only False returns
# an empty DataFrame
expected = DataFrame()
df = self.read_csv(StringIO(s), usecols=lambda x: False)
tm.assert_frame_equal(df, expected)
def test_incomplete_first_row(self):
# see gh-6710
data = '1,2\n1,2,3'
names = ['a', 'b', 'c']
expected = DataFrame({'a': [1, 1],
'c': [np.nan, 3]})
usecols = ['a', 'c']
df = self.read_csv(StringIO(data), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = lambda x: x in ['a', 'c']
df = self.read_csv(StringIO(data), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_uneven_length_cols(self):
# see gh-8985
usecols = [0, 1, 2]
data = '19,29,39\n' * 2 + '10,20,30,40'
expected = DataFrame([[19, 29, 39],
[19, 29, 39],
[10, 20, 30]])
df = self.read_csv(StringIO(data), header=None, usecols=usecols)
tm.assert_frame_equal(df, expected)
# see gh-9549
usecols = ['A', 'B', 'C']
data = ('A,B,C\n1,2,3\n3,4,5\n1,2,4,5,1,6\n'
'1,2,3,,,1,\n1,2,3\n5,6,7')
expected = DataFrame({'A': [1, 3, 1, 1, 1, 5],
'B': [2, 4, 2, 2, 2, 6],
'C': [3, 5, 4, 3, 3, 7]})
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_raise_on_usecols_names_mismatch(self):
# GH 14671
data = 'a,b,c,d\n1,2,3,4\n5,6,7,8'
if self.engine == 'c':
msg = 'Usecols do not match names'
else:
msg = 'is not in list'
usecols = ['a', 'b', 'c', 'd']
df = self.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame({'a': [1, 5], 'b': [2, 6], 'c': [3, 7],
'd': [4, 8]})
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b', 'c', 'f']
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
usecols = ['a', 'b', 'f']
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(data), header=0, names=names)
expected = DataFrame({'A': [1, 5], 'B': [2, 6], 'C': [3, 7],
'D': [4, 8]})
tm.assert_frame_equal(df, expected)
# TODO: https://github.com/pandas-dev/pandas/issues/16469
# usecols = ['A','C']
# df = self.read_csv(StringIO(data), header=0, names=names,
# usecols=usecols)
# expected = DataFrame({'A': [1,5], 'C': [3,7]})
# tm.assert_frame_equal(df, expected)
#
# usecols = [0,2]
# df = self.read_csv(StringIO(data), header=0, names=names,
# usecols=usecols)
# expected = DataFrame({'A': [1,5], 'C': [3,7]})
# tm.assert_frame_equal(df, expected)
usecols = ['A', 'B', 'C', 'f']
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), header=0, names=names,
usecols=usecols)
usecols = ['A', 'B', 'f']
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), names=names, usecols=usecols)
| bsd-3-clause |
sys-bio/tellurium | tellurium/tests/sedml/test_phrasedml.py | 2 | 30460 | """
Testing phrasedml.
test_sedml_phrasedml.py : phrasedml based tests.
test_sedml_kisao.py : SED-ML kisao support
test_sedml_omex.py : SED-ML tests based on Combine Archives
test_sedml_sedml.py : sed-ml tests
"""
from __future__ import absolute_import, print_function, division
import os
import shutil
import tempfile
import unittest
import pytest
import matplotlib
import tellurium as te
try:
import tesedml as libsedml
except ImportError:
import libsedml
import phrasedml
from tellurium.sedml.utils import run_case
from tellurium import temiriam
from tellurium.utils import omex
from tellurium.sedml.tesedml import executeSEDML, executeCombineArchive
class PhrasedmlTestCase(unittest.TestCase):
""" Testing execution and archives based on phrasedml input. """
def setUp(self):
# switch the backend of matplotlib, so plots can be tested
self.backend = matplotlib.rcParams['backend']
matplotlib.pyplot.switch_backend("Agg")
# create a test instance
self.antimony = '''
model myModel
S1 -> S2; k1*S1;
S1 = 10; S2 = 0;
k1 = 1;
end
'''
self.phrasedml = '''
model1 = model "myModel"
sim1 = simulate uniform(0, 5, 100)
task1 = run sim1 on model1
plot "Figure 1" time vs S1, S2
'''
# self.tep = tephrasedml.experiment(self.antimony, self.phrasedml)
self.a1 = """
model m1()
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
self.a2 = """
model m2()
v0: X1 -> X2; p1*X1;
X1 = 5.0; X2 = 20.0;
p1 = 0.2;
end
"""
def tearDown(self):
matplotlib.pyplot.switch_backend(self.backend)
matplotlib.pyplot.close('all')
def test_execute(self):
"""Test execute."""
inline_omex = '\n'.join([self.antimony, self.phrasedml])
te.executeInlineOmex(inline_omex)
def test_exportAsCombine(self):
""" Test exportAsCombine. """
inline_omex = '\n'.join([self.antimony, self.phrasedml])
tmpdir = tempfile.mkdtemp()
te.exportInlineOmex(inline_omex, os.path.join(tmpdir, 'archive.omex'))
shutil.rmtree(tmpdir)
def test_1Model1PhrasedML(self):
""" Minimal example which should work. """
antimony_str = """
model test
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
phrasedml_str = """
model0 = model "test"
sim0 = simulate uniform(0, 10, 100)
task0 = run sim0 on model0
plot task0.time vs task0.S1
"""
inline_omex = '\n'.join([antimony_str, phrasedml_str])
te.executeInlineOmex(inline_omex)
def test_1Model2PhrasedML(self):
""" Test multiple models and multiple phrasedml files. """
p1 = """
model1 = model "m1"
sim1 = simulate uniform(0, 6, 100)
task1 = run sim1 on model1
plot task1.time vs task1.S1, task1.S2
"""
p2 = """
model1 = model "m1"
model2 = model model1 with S1=S2+20
sim1 = simulate uniform(0, 6, 100)
task1 = run sim1 on model2
plot task1.time vs task1.S1, task1.S2
"""
inline_omex = '\n'.join([self.a1, p1])
te.executeInlineOmex(inline_omex)
inline_omex = '\n'.join([self.a1, p2])
te.executeInlineOmex(inline_omex)
inline_omex = '\n'.join([self.a1, p1, p2])
te.executeInlineOmex(inline_omex)
def test_2Model1PhrasedML(self):
""" Test multiple models and multiple phrasedml files. """
p1 = """
model1 = model "m1"
model2 = model "m2"
model3 = model model1 with S1=S2+20
sim1 = simulate uniform(0, 6, 100)
task1 = run sim1 on model1
task2 = run sim1 on model2
plot "Timecourse test1" task1.time vs task1.S1, task1.S2
plot "Timecourse test2" task2.time vs task2.X1, task2.X2
"""
inline_omex = '\n'.join([self.a1, self.a2, p1])
te.executeInlineOmex(inline_omex)
def test_2Model2PhrasedML(self):
""" Test multiple models and multiple phrasedml files. """
p1 = """
model1 = model "m1"
model2 = model "m2"
sim1 = simulate uniform(0, 6, 100)
task1 = run sim1 on model1
task2 = run sim1 on model2
plot task1.time vs task1.S1, task1.S2, task2.time vs task2.X1, task2.X2
"""
p2 = """
model1 = model "m1"
model2 = model "m2"
sim1 = simulate uniform(0, 20, 20)
task1 = run sim1 on model1
task2 = run sim1 on model2
plot task1.time vs task1.S1, task1.S2, task2.time vs task2.X1, task2.X2
"""
inline_omex = '\n'.join([self.a1, self.a2, p1, p2])
te.executeInlineOmex(inline_omex)
############################################
# Real world tests
############################################
def run_example(self, a_str, p_str):
# execute
tmpdir = tempfile.mkdtemp()
try:
run_case(
call_file=os.path.realpath(__file__),
antimony_str=a_str,
phrasedml_str=p_str,
working_dir=tmpdir
)
finally:
shutil.rmtree(tmpdir)
def test_case_01(self):
a_str = """
model case_01
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
p_str = """
model0 = model "case_01"
sim0 = simulate uniform(0, 10, 100)
task0 = run sim0 on model0
plot "UniformTimecourse" task0.time vs task0.S1
report task0.time vs task0.S1
"""
self.run_example(a_str, p_str)
def test_case_02(self):
a_str = """
model case_02
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
p_str = """
model0 = model "case_02"
model1 = model model0 with S1=5.0
sim0 = simulate uniform(0, 6, 100)
task0 = run sim0 on model1
task1 = repeat task0 for k1 in uniform(0.0, 5.0, 5), reset = true
plot "Repeated task with reset" task1.time vs task1.S1, task1.S2
report task1.time vs task1.S1, task1.S2
plot "Repeated task varying k1" task1.k1 vs task1.S1
report task1.k1 vs task1.S1
"""
self.run_example(a_str, p_str)
def test_case_03(self):
a_str = '''
model case_03()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_03"
mod2 = model mod1 with S2=S1+4
sim1 = simulate uniform(0, 10, 100)
task1 = run sim1 on mod1
task2 = run sim1 on mod2
plot "ComputeChanges" task1.time vs task1.S1, task1.S2, task2.S1, task2.S2
report task1.time vs task1.S1, task1.S2, task2.S1, task2.S2
'''
self.run_example(a_str, p_str)
def test_case_04(self):
a_str = '''
model case_04()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_04"
mod2 = model mod1 with S2=S1+4
mod3 = model mod2 with S1=20.0
sim1 = simulate uniform(0, 10, 100)
task1 = run sim1 on mod1
task2 = run sim1 on mod2
task3 = run sim1 on mod3
plot "Example plot" task1.time vs task1.S1, task1.S2, task2.S1, task2.S2, task3.S1, task3.S2
report task1.time vs task1.S1, task1.S2, task2.S1, task2.S2, task3.S1, task3.S2
'''
self.run_example(a_str, p_str)
def test_case_05(self):
a_str = '''
model case_05()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_05"
sim1 = simulate uniform(0, 10, 100)
task1 = run sim1 on mod1
plot "Example plot" task1.time vs task1.S1, task1.S2, task1.S1/task1.S2
report task1.time vs task1.S1, task1.S2, task1.S1/task1.S2
plot "Normalized plot" task1.S1/max(task1.S1) vs task1.S2/max(task1.S2)
report task1.S1/max(task1.S1) vs task1.S2/max(task1.S2)
'''
self.run_example(a_str, p_str)
def test_case_06(self):
a_str = '''
model case_06()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_06"
sim1 = simulate uniform(0, 10, 100)
task1 = run sim1 on mod1
repeat1 = repeat task1 for S1 in [1, 3, 5], S2 in uniform(0, 10, 2), reset=True
repeat2 = repeat task1 for S1 in [1, 3, 5], S2 in uniform(0, 10, 2), reset=False
plot "Example plot" repeat1.time vs repeat1.S1, repeat1.S2
report repeat1.time vs repeat1.S1, repeat1.S2
plot "Example plot" repeat2.time vs repeat2.S1, repeat2.S2
report repeat2.time vs repeat2.S1, repeat2.S2
'''
self.run_example(a_str, p_str)
def test_case_07(self):
a_str = '''
model case_07()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_07"
sim1 = simulate uniform(0, 10, 100)
task1 = run sim1 on mod1
repeat1 = repeat task1 for S1 in [1, 3, 5], reset=True
report task1.time, task1.S1, task1.S2, task1.S1/task1.S2
report repeat1.time, repeat1.S1, repeat1.S2, repeat1.S1/repeat1.S2
'''
self.run_example(a_str, p_str)
def test_case_08(self):
a_str = '''
model case_08()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_08"
mod2 = model "case_08"
sim1 = simulate uniform(0, 10, 20)
sim2 = simulate uniform(0, 3, 10)
task1 = run sim1 on mod1
task2 = run sim2 on mod1
repeat1 = repeat [task1, task2] for S2 in uniform(0, 10, 9), mod1.S1 = S2+3, reset=False
plot "Repeated Multiple Subtasks" repeat1.mod1.time vs repeat1.mod1.S1, repeat1.mod1.S2
# plot "Repeated Multiple Subtasks" repeat1.mod2.time vs repeat1.mod2.S1, repeat1.mod2.S2
'''
self.run_example(a_str, p_str)
def test_case_09(self):
a_str = '''
// Created by libAntimony v2.9
model *case_09()
// Compartments and Species:
compartment compartment_;
species MKKK in compartment_, MKKK_P in compartment_, MKK in compartment_;
species MKK_P in compartment_, MKK_PP in compartment_, MAPK in compartment_;
species MAPK_P in compartment_, MAPK_PP in compartment_;
// Reactions:
J0: MKKK => MKKK_P; (J0_V1*MKKK)/((1 + (MAPK_PP/J0_Ki)^J0_n)*(J0_K1 + MKKK));
J1: MKKK_P => MKKK; (J1_V2*MKKK_P)/(J1_KK2 + MKKK_P);
J2: MKK => MKK_P; (J2_k3*MKKK_P*MKK)/(J2_KK3 + MKK);
J3: MKK_P => MKK_PP; (J3_k4*MKKK_P*MKK_P)/(J3_KK4 + MKK_P);
J4: MKK_PP => MKK_P; (J4_V5*MKK_PP)/(J4_KK5 + MKK_PP);
J5: MKK_P => MKK; (J5_V6*MKK_P)/(J5_KK6 + MKK_P);
J6: MAPK => MAPK_P; (J6_k7*MKK_PP*MAPK)/(J6_KK7 + MAPK);
J7: MAPK_P => MAPK_PP; (J7_k8*MKK_PP*MAPK_P)/(J7_KK8 + MAPK_P);
J8: MAPK_PP => MAPK_P; (J8_V9*MAPK_PP)/(J8_KK9 + MAPK_PP);
J9: MAPK_P => MAPK; (J9_V10*MAPK_P)/(J9_KK10 + MAPK_P);
// Species initializations:
MKKK = 90;
MKKK_P = 10;
MKK = 280;
MKK_P = 10;
MKK_PP = 10;
MAPK = 280;
MAPK_P = 10;
MAPK_PP = 10;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
J0_V1 = 2.5;
J0_Ki = 9;
J0_n = 1;
J0_K1 = 10;
J1_V2 = 0.25;
J1_KK2 = 8;
J2_k3 = 0.025;
J2_KK3 = 15;
J3_k4 = 0.025;
J3_KK4 = 15;
J4_V5 = 0.75;
J4_KK5 = 15;
J5_V6 = 0.75;
J5_KK6 = 15;
J6_k7 = 0.025;
J6_KK7 = 15;
J7_k8 = 0.025;
J7_KK8 = 15;
J8_V9 = 0.5;
J8_KK9 = 15;
J9_V10 = 0.5;
J9_KK10 = 15;
// Other declarations:
const compartment_, J0_V1, J0_Ki, J0_n, J0_K1, J1_V2, J1_KK2, J2_k3, J2_KK3;
const J3_k4, J3_KK4, J4_V5, J4_KK5, J5_V6, J5_KK6, J6_k7, J6_KK7, J7_k8;
const J7_KK8, J8_V9, J8_KK9, J9_V10, J9_KK10;
end
'''
p_str = '''
mod1 = model "case_09"
# sim1 = simulate uniform_stochastic(0, 4000, 1000)
sim1 = simulate uniform(0, 4000, 1000)
task1 = run sim1 on mod1
repeat1 = repeat task1 for local.x in uniform(0, 10, 10), reset=true
plot "MAPK oscillations" repeat1.MAPK vs repeat1.time vs repeat1.MAPK_P, repeat1.MAPK vs repeat1.time vs repeat1.MAPK_PP, repeat1.MAPK vs repeat1.time vs repeat1.MKK
report repeat1.MAPK vs repeat1.time vs repeat1.MAPK_P, repeat1.MAPK vs repeat1.time vs repeat1.MAPK_PP, repeat1.MAPK vs repeat1.time vs repeat1.MKK
'''
self.run_example(a_str, p_str)
def test_case_10(self):
a_str = '''
model case_10()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_10"
mod2 = model "case_10"
sim1 = simulate uniform(0, 10, 100)
sim2 = simulate uniform(0, 3, 10)
task1 = run sim1 on mod1
task2 = run sim2 on mod2
repeat1 = repeat [task1, task2] for local.X in uniform(0, 10, 9), mod1.S1 = X, mod2.S1 = X+3
plot repeat1.mod1.time vs repeat1.mod1.S1, repeat1.mod1.S2, repeat1.mod2.time vs repeat1.mod2.S1, repeat1.mod2.S2
'''
self.run_example(a_str, p_str)
def test_case_11(self):
a_str = '''
model case_11()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_11"
sim1 = simulate uniform(0, 10, 100)
task1 = run sim1 on mod1
rtask1 = repeat task1 for k1 in uniform(0, 1, 2)
rtask2 = repeat rtask1 for k2 in uniform(0, 1, 3)
rtask3 = repeat rtask2 for S1 in [5, 10], reset=true
plot "RepeatedTask of RepeatedTask" rtask3.time vs rtask3.S1, rtask3.S2
plot rtask3.k1 vs rtask3.k2 vs rtask3.S1
'''
self.run_example(a_str, p_str)
def test_case_12(self):
a_str = '''
model case_12()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.2; k2=0.01
end
'''
p_str = '''
mod1 = model "case_12"
sim1 = simulate uniform(0, 2, 10, 49)
sim2 = simulate uniform(0, 15, 49)
task1 = run sim1 on mod1
task2 = run sim2 on mod1
repeat1 = repeat task1 for S1 in uniform(0, 10, 4), S2 = S1+20, reset=true
repeat2 = repeat task2 for S1 in uniform(0, 10, 4), S2 = S1+20, reset=true
plot "Offset simulation" repeat2.time vs repeat2.S1, repeat2.S2, repeat1.time vs repeat1.S1, repeat1.S2
report repeat2.time vs repeat2.S1, repeat2.S2, repeat1.time vs repeat1.S1, repeat1.S2
'''
self.run_example(a_str, p_str)
def test_lorenz(self):
a_str = '''
model lorenz
x' = sigma*(y - x);
y' = x*(rho - z) - y;
z' = x*y - beta*z;
x = 0.96259; y = 2.07272; z = 18.65888;
sigma = 10; rho = 28; beta = 2.67;
end
'''
p_str = '''
model1 = model "lorenz"
sim1 = simulate uniform(0,15,2000)
task1 = run sim1 on model1
plot task1.z vs task1.x
'''
self.run_example(a_str, p_str)
def test_oneStep(self):
a_str = '''
// Created by libAntimony v2.9
model *oneStep()
// Compartments and Species:
compartment compartment_;
species S1 in compartment_, S2 in compartment_, $X0 in compartment_, $X1 in compartment_;
species $X2 in compartment_;
// Reactions:
J0: $X0 => S1; J0_v0;
J1: S1 => $X1; J1_k3*S1;
J2: S1 => S2; (J2_k1*S1 - J2_k_1*S2)*(1 + J2_c*S2^J2_q);
J3: S2 => $X2; J3_k2*S2;
// Species initializations:
S1 = 0;
S2 = 1;
X0 = 1;
X1 = 0;
X2 = 0;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
J0_v0 = 8;
J1_k3 = 0;
J2_k1 = 1;
J2_k_1 = 0;
J2_c = 1;
J2_q = 3;
J3_k2 = 5;
// Other declarations:
const compartment_, J0_v0, J1_k3, J2_k1, J2_k_1, J2_c, J2_q, J3_k2;
end
'''
p_str = '''
model1 = model "oneStep"
stepper = simulate onestep(0.1)
task0 = run stepper on model1
task1 = repeat task0 for local.x in uniform(0, 10, 100), J0_v0 = piecewise(8, x<4, 0.1, 4<=x<6, 8)
plot "One Step Simulation" task1.time vs task1.S1, task1.S2, task1.J0_v0
report task1.time vs task1.S1, task1.S2, task1.J0_v0
'''
self.run_example(a_str, p_str)
def test_parameterScan1D(self):
a_str = '''
// Created by libAntimony v2.9
model *parameterScan1D()
// Compartments and Species:
compartment compartment_;
species S1 in compartment_, S2 in compartment_, $X0 in compartment_, $X1 in compartment_;
species $X2 in compartment_;
// Reactions:
J0: $X0 => S1; J0_v0;
J1: S1 => $X1; J1_k3*S1;
J2: S1 => S2; (J2_k1*S1 - J2_k_1*S2)*(1 + J2_c*S2^J2_q);
J3: S2 => $X2; J3_k2*S2;
// Species initializations:
S1 = 0;
S2 = 1;
X0 = 1;
X1 = 0;
X2 = 0;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
J0_v0 = 8;
J1_k3 = 0;
J2_k1 = 1;
J2_k_1 = 0;
J2_c = 1;
J2_q = 3;
J3_k2 = 5;
// Other declarations:
const compartment_, J0_v0, J1_k3, J2_k1, J2_k_1, J2_c, J2_q, J3_k2;
end
'''
p_str = '''
model1 = model "parameterScan1D"
timecourse1 = simulate uniform(0, 20, 1000)
task0 = run timecourse1 on model1
task1 = repeat task0 for J0_v0 in [8, 4, 0.4], reset=true
plot task1.time vs task1.S1, task1.S2
'''
self.run_example(a_str, p_str)
def test_parameterScan2D(self):
a_str = '''
// Created by libAntimony v2.9
model *parameterScan2D()
// Compartments and Species:
compartment compartment_;
species MKKK in compartment_, MKKK_P in compartment_, MKK in compartment_;
species MKK_P in compartment_, MKK_PP in compartment_, MAPK in compartment_;
species MAPK_P in compartment_, MAPK_PP in compartment_;
// Reactions:
J0: MKKK => MKKK_P; (J0_V1*MKKK)/((1 + (MAPK_PP/J0_Ki)^J0_n)*(J0_K1 + MKKK));
J1: MKKK_P => MKKK; (J1_V2*MKKK_P)/(J1_KK2 + MKKK_P);
J2: MKK => MKK_P; (J2_k3*MKKK_P*MKK)/(J2_KK3 + MKK);
J3: MKK_P => MKK_PP; (J3_k4*MKKK_P*MKK_P)/(J3_KK4 + MKK_P);
J4: MKK_PP => MKK_P; (J4_V5*MKK_PP)/(J4_KK5 + MKK_PP);
J5: MKK_P => MKK; (J5_V6*MKK_P)/(J5_KK6 + MKK_P);
J6: MAPK => MAPK_P; (J6_k7*MKK_PP*MAPK)/(J6_KK7 + MAPK);
J7: MAPK_P => MAPK_PP; (J7_k8*MKK_PP*MAPK_P)/(J7_KK8 + MAPK_P);
J8: MAPK_PP => MAPK_P; (J8_V9*MAPK_PP)/(J8_KK9 + MAPK_PP);
J9: MAPK_P => MAPK; (J9_V10*MAPK_P)/(J9_KK10 + MAPK_P);
// Species initializations:
MKKK = 90;
MKKK_P = 10;
MKK = 280;
MKK_P = 10;
MKK_PP = 10;
MAPK = 280;
MAPK_P = 10;
MAPK_PP = 10;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
J0_V1 = 2.5;
J0_Ki = 9;
J0_n = 1;
J0_K1 = 10;
J1_V2 = 0.25;
J1_KK2 = 8;
J2_k3 = 0.025;
J2_KK3 = 15;
J3_k4 = 0.025;
J3_KK4 = 15;
J4_V5 = 0.75;
J4_KK5 = 15;
J5_V6 = 0.75;
J5_KK6 = 15;
J6_k7 = 0.025;
J6_KK7 = 15;
J7_k8 = 0.025;
J7_KK8 = 15;
J8_V9 = 0.5;
J8_KK9 = 15;
J9_V10 = 0.5;
J9_KK10 = 15;
// Other declarations:
const compartment_, J0_V1, J0_Ki, J0_n, J0_K1, J1_V2, J1_KK2, J2_k3, J2_KK3;
const J3_k4, J3_KK4, J4_V5, J4_KK5, J5_V6, J5_KK6, J6_k7, J6_KK7, J7_k8;
const J7_KK8, J8_V9, J8_KK9, J9_V10, J9_KK10;
end
'''
p_str = '''
model_3 = model "parameterScan2D"
sim_repeat = simulate uniform(0,3000,100)
task_1 = run sim_repeat on model_3
repeatedtask_1 = repeat task_1 for J1_KK2 in [1, 5, 10, 50, 60, 70, 80, 90, 100], reset=true
repeatedtask_2 = repeat repeatedtask_1 for J4_KK5 in uniform(1, 40, 10), reset=true
plot repeatedtask_2.J4_KK5 vs repeatedtask_2.J1_KK2
plot repeatedtask_2.time vs repeatedtask_2.MKK, repeatedtask_2.MKK_P
'''
self.run_example(a_str, p_str)
def test_repeatedStochastic(self):
a_str = '''
// Created by libAntimony v2.9
model *repeatedStochastic()
// Compartments and Species:
compartment compartment_;
species MKKK in compartment_, MKKK_P in compartment_, MKK in compartment_;
species MKK_P in compartment_, MKK_PP in compartment_, MAPK in compartment_;
species MAPK_P in compartment_, MAPK_PP in compartment_;
// Reactions:
J0: MKKK => MKKK_P; (J0_V1*MKKK)/((1 + (MAPK_PP/J0_Ki)^J0_n)*(J0_K1 + MKKK));
J1: MKKK_P => MKKK; (J1_V2*MKKK_P)/(J1_KK2 + MKKK_P);
J2: MKK => MKK_P; (J2_k3*MKKK_P*MKK)/(J2_KK3 + MKK);
J3: MKK_P => MKK_PP; (J3_k4*MKKK_P*MKK_P)/(J3_KK4 + MKK_P);
J4: MKK_PP => MKK_P; (J4_V5*MKK_PP)/(J4_KK5 + MKK_PP);
J5: MKK_P => MKK; (J5_V6*MKK_P)/(J5_KK6 + MKK_P);
J6: MAPK => MAPK_P; (J6_k7*MKK_PP*MAPK)/(J6_KK7 + MAPK);
J7: MAPK_P => MAPK_PP; (J7_k8*MKK_PP*MAPK_P)/(J7_KK8 + MAPK_P);
J8: MAPK_PP => MAPK_P; (J8_V9*MAPK_PP)/(J8_KK9 + MAPK_PP);
J9: MAPK_P => MAPK; (J9_V10*MAPK_P)/(J9_KK10 + MAPK_P);
// Species initializations:
MKKK = 90;
MKKK_P = 10;
MKK = 280;
MKK_P = 10;
MKK_PP = 10;
MAPK = 280;
MAPK_P = 10;
MAPK_PP = 10;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
J0_V1 = 2.5;
J0_Ki = 9;
J0_n = 1;
J0_K1 = 10;
J1_V2 = 0.25;
J1_KK2 = 8;
J2_k3 = 0.025;
J2_KK3 = 15;
J3_k4 = 0.025;
J3_KK4 = 15;
J4_V5 = 0.75;
J4_KK5 = 15;
J5_V6 = 0.75;
J5_KK6 = 15;
J6_k7 = 0.025;
J6_KK7 = 15;
J7_k8 = 0.025;
J7_KK8 = 15;
J8_V9 = 0.5;
J8_KK9 = 15;
J9_V10 = 0.5;
J9_KK10 = 15;
// Other declarations:
const compartment_, J0_V1, J0_Ki, J0_n, J0_K1, J1_V2, J1_KK2, J2_k3, J2_KK3;
const J3_k4, J3_KK4, J4_V5, J4_KK5, J5_V6, J5_KK6, J6_k7, J6_KK7, J7_k8;
const J7_KK8, J8_V9, J8_KK9, J9_V10, J9_KK10;
end
'''
p_str = '''
model1 = model "repeatedStochastic"
timecourse1 = simulate uniform_stochastic(0, 4000, 1000)
timecourse1.algorithm.seed = 1003
timecourse2 = simulate uniform_stochastic(0, 4000, 1000)
task1 = run timecourse1 on model1
task2 = run timecourse2 on model1
repeat1 = repeat task1 for local.x in uniform(0, 10, 10), reset=true
repeat2 = repeat task2 for local.x in uniform(0, 10, 10), reset=true
plot "Repeats with SEED" repeat1.time vs repeat1.MAPK, repeat1.MAPK_P, repeat1.MAPK_PP, repeat1.MKK, repeat1.MKK_P, repeat1.MKKK, repeat1.MKKK_P
plot "Repeats without SEED" repeat2.time vs repeat2.MAPK, repeat2.MAPK_P, repeat2.MAPK_PP, repeat2.MKK, repeat2.MKK_P, repeat2.MKKK, repeat2.MKKK_P
'''
self.run_example(a_str, p_str)
def test_repressilator(self):
# Get SBML from URN and set for phrasedml
urn = "urn:miriam:biomodels.db:BIOMD0000000012"
sbml_str = temiriam.getSBMLFromBiomodelsURN(urn=urn)
return_code = phrasedml.setReferencedSBML(urn, sbml_str)
assert return_code # valid SBML
# <SBML species>
# PX - LacI protein
# PY - TetR protein
# PZ - cI protein
# X - LacI mRNA
# Y - TetR mRNA
# Z - cI mRNA
# <SBML parameters>
# ps_a - tps_active: Transcription from free promotor in transcripts per second and promotor
# ps_0 - tps_repr: Transcription from fully repressed promotor in transcripts per second and promotor
phrasedml_str = """
model1 = model "{}"
model2 = model model1 with ps_0=1.3E-5, ps_a=0.013
sim1 = simulate uniform(0, 1000, 1000)
task1 = run sim1 on model1
task2 = run sim1 on model2
# A simple timecourse simulation
plot "Timecourse of repressilator" task1.time vs task1.PX, task1.PZ, task1.PY
# Applying preprocessing
plot "Timecourse after pre-processing" task2.time vs task2.PX, task2.PZ, task2.PY
# Applying postprocessing
plot "Timecourse after post-processing" task1.PX/max(task1.PX) vs task1.PZ/max(task1.PZ), \
task1.PY/max(task1.PY) vs task1.PX/max(task1.PX), \
task1.PZ/max(task1.PZ) vs task1.PY/max(task1.PY)
""".format(urn)
# convert to sedml
print(phrasedml_str)
sedml_str = phrasedml.convertString(phrasedml_str)
if sedml_str is None:
print(phrasedml.getLastError())
raise IOError("sedml could not be generated")
# run SEDML directly
try:
tmp_dir = tempfile.mkdtemp()
executeSEDML(sedml_str, workingDir=tmp_dir)
finally:
shutil.rmtree(tmp_dir)
# create combine archive and execute
try:
tmp_dir = tempfile.mkdtemp()
sedml_location = "repressilator_sedml.xml"
sedml_path = os.path.join(tmp_dir, sedml_location)
omex_path = os.path.join(tmp_dir, "repressilator.omex")
with open(sedml_path, "w") as f:
f.write(sedml_str)
entries = [
omex.Entry(location=sedml_location, formatKey="sedml", master=True)
]
omex.combineArchiveFromEntries(omexPath=omex_path, entries=entries, workingDir=tmp_dir)
executeCombineArchive(omex_path, workingDir=tmp_dir)
finally:
shutil.rmtree(tmp_dir)
def test_simpletimecourse(self):
a_str = '''
// Created by libAntimony v2.9
model MAPKcascade()
// Compartments and Species:
compartment compartment_;
species MKKK in compartment_, MKKK_P in compartment_, MKK in compartment_;
species MKK_P in compartment_, MKK_PP in compartment_, MAPK in compartment_;
species MAPK_P in compartment_, MAPK_PP in compartment_;
// Reactions:
J0: MKKK => MKKK_P; (J0_V1*MKKK)/((1 + (MAPK_PP/J0_Ki)^J0_n)*(J0_K1 + MKKK));
J1: MKKK_P => MKKK; (J1_V2*MKKK_P)/(J1_KK2 + MKKK_P);
J2: MKK => MKK_P; (J2_k3*MKKK_P*MKK)/(J2_KK3 + MKK);
J3: MKK_P => MKK_PP; (J3_k4*MKKK_P*MKK_P)/(J3_KK4 + MKK_P);
J4: MKK_PP => MKK_P; (J4_V5*MKK_PP)/(J4_KK5 + MKK_PP);
J5: MKK_P => MKK; (J5_V6*MKK_P)/(J5_KK6 + MKK_P);
J6: MAPK => MAPK_P; (J6_k7*MKK_PP*MAPK)/(J6_KK7 + MAPK);
J7: MAPK_P => MAPK_PP; (J7_k8*MKK_PP*MAPK_P)/(J7_KK8 + MAPK_P);
J8: MAPK_PP => MAPK_P; (J8_V9*MAPK_PP)/(J8_KK9 + MAPK_PP);
J9: MAPK_P => MAPK; (J9_V10*MAPK_P)/(J9_KK10 + MAPK_P);
// Species initializations:
MKKK = 90;
MKKK_P = 10;
MKK = 280;
MKK_P = 10;
MKK_PP = 10;
MAPK = 280;
MAPK_P = 10;
MAPK_PP = 10;
// Compartment initializations:
compartment_ = 1;
// Variable initializations:
J0_V1 = 2.5;
J0_Ki = 9;
J0_n = 1;
J0_K1 = 10;
J1_V2 = 0.25;
J1_KK2 = 8;
J2_k3 = 0.025;
J2_KK3 = 15;
J3_k4 = 0.025;
J3_KK4 = 15;
J4_V5 = 0.75;
J4_KK5 = 15;
J5_V6 = 0.75;
J5_KK6 = 15;
J6_k7 = 0.025;
J6_KK7 = 15;
J7_k8 = 0.025;
J7_KK8 = 15;
J8_V9 = 0.5;
J8_KK9 = 15;
J9_V10 = 0.5;
J9_KK10 = 15;
// Other declarations:
const compartment_, J0_V1, J0_Ki, J0_n, J0_K1, J1_V2, J1_KK2, J2_k3, J2_KK3;
const J3_k4, J3_KK4, J4_V5, J4_KK5, J5_V6, J5_KK6, J6_k7, J6_KK7, J7_k8;
const J7_KK8, J8_V9, J8_KK9, J9_V10, J9_KK10;
end
'''
p_str = '''
model1 = model "MAPKcascade"
sim1 = simulate uniform(0,4000,1000)
task1 = run sim1 on model1
plot task1.time vs task1.MAPK, task1.MAPK_P, task1.MAPK_PP
'''
self.run_example(a_str, p_str)
| apache-2.0 |
cl4rke/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
wdwvt1/scikit-bio | skbio/stats/ordination/tests/test_ordination.py | 3 | 35844 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
from six import binary_type, text_type
import warnings
import unittest
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pandas as pd
from IPython.core.display import Image, SVG
from nose.tools import assert_is_instance, assert_true
from scipy.spatial.distance import pdist
from skbio import DistanceMatrix
from skbio.stats.ordination import (
CA, RDA, CCA, PCoA, OrdinationResults, corr, mean_and_std,
assert_ordination_results_equal)
from skbio.util import get_data_path
def normalize_signs(arr1, arr2):
"""Change column signs so that "column" and "-column" compare equal.
This is needed because results of eigenproblmes can have signs
flipped, but they're still right.
Notes
=====
This function tries hard to make sure that, if you find "column"
and "-column" almost equal, calling a function like np.allclose to
compare them after calling `normalize_signs` succeeds.
To do so, it distinguishes two cases for every column:
- It can be all almost equal to 0 (this includes a column of
zeros).
- Otherwise, it has a value that isn't close to 0.
In the first case, no sign needs to be flipped. I.e., for
|epsilon| small, np.allclose(-epsilon, 0) is true if and only if
np.allclose(epsilon, 0) is.
In the second case, the function finds the number in the column
whose absolute value is largest. Then, it compares its sign with
the number found in the same index, but in the other array, and
flips the sign of the column as needed.
"""
# Let's convert everyting to floating point numbers (it's
# reasonable to assume that eigenvectors will already be floating
# point numbers). This is necessary because np.array(1) /
# np.array(0) != np.array(1.) / np.array(0.)
arr1 = np.asarray(arr1, dtype=np.float64)
arr2 = np.asarray(arr2, dtype=np.float64)
if arr1.shape != arr2.shape:
raise ValueError(
"Arrays must have the same shape ({0} vs {1}).".format(arr1.shape,
arr2.shape)
)
# To avoid issues around zero, we'll compare signs of the values
# with highest absolute value
max_idx = np.abs(arr1).argmax(axis=0)
max_arr1 = arr1[max_idx, range(arr1.shape[1])]
max_arr2 = arr2[max_idx, range(arr2.shape[1])]
sign_arr1 = np.sign(max_arr1)
sign_arr2 = np.sign(max_arr2)
# Store current warnings, and ignore division by zero (like 1. /
# 0.) and invalid operations (like 0. / 0.)
wrn = np.seterr(invalid='ignore', divide='ignore')
differences = sign_arr1 / sign_arr2
# The values in `differences` can be:
# 1 -> equal signs
# -1 -> diff signs
# Or nan (0/0), inf (nonzero/0), 0 (0/nonzero)
np.seterr(**wrn)
# Now let's deal with cases where `differences != \pm 1`
special_cases = (~np.isfinite(differences)) | (differences == 0)
# In any of these cases, the sign of the column doesn't matter, so
# let's just keep it
differences[special_cases] = 1
return arr1 * differences, arr2
def chi_square_distance(data_table, between_rows=True):
"""Computes the chi-square distance between two rows or columns of input.
It is a measure that has no upper limit, and it excludes double-zeros.
Parameters
----------
data_table : 2D array_like
An array_like object of shape (n, p). The input must be a
frequency table (so that the sum of all cells equals 1, and
all values are non-negative).
between_rows : bool (defaults to True)
Indicates whether distance is computed between rows (default)
or columns.
Returns
-------
Y : ndarray
Returns a condensed distance matrix. For each i and j (where
i<j<n), the chi square distance between u=X[i] and v=X[j] is
computed and stored in `Y[(n choose 2) - (n - i choose 2) + (j
- i - 1)]`.
See Also
--------
scipy.spatial.distance.squareform
References
----------
This coefficient appears in Legendre and Legendre (1998) as
formula 7.54 (as D_{16}). Another source is
http://www.springerreference.com/docs/html/chapterdbid/60817.html
"""
data_table = np.asarray(data_table, dtype=np.float64)
if not np.allclose(data_table.sum(), 1):
raise ValueError("Input is not a frequency table: if it is an"
" abundance table you could scale it as"
" `data_table / data_table.sum()`.")
if np.any(data_table < 0):
raise ValueError("A frequency table can't have negative values.")
# The distances are always computed between the rows of F
F = data_table if between_rows else data_table.T
row_sums = F.sum(axis=1, keepdims=True)
column_sums = F.sum(axis=0)
scaled_F = F / (row_sums * np.sqrt(column_sums))
return pdist(scaled_F, 'euclidean')
class TestNormalizeSigns(object):
def test_shapes_and_nonarray_input(self):
with npt.assert_raises(ValueError):
normalize_signs([[1, 2], [3, 5]], [[1, 2]])
def test_works_when_different(self):
"""Taking abs value of everything would lead to false
positives."""
a = np.array([[1, -1],
[2, 2]])
b = np.array([[-1, -1],
[2, 2]])
with npt.assert_raises(AssertionError):
npt.assert_equal(*normalize_signs(a, b))
def test_easy_different(self):
a = np.array([[1, 2],
[3, -1]])
b = np.array([[-1, 2],
[-3, -1]])
npt.assert_equal(*normalize_signs(a, b))
def test_easy_already_equal(self):
a = np.array([[1, -2],
[3, 1]])
b = a.copy()
npt.assert_equal(*normalize_signs(a, b))
def test_zeros(self):
a = np.array([[0, 3],
[0, -1]])
b = np.array([[0, -3],
[0, 1]])
npt.assert_equal(*normalize_signs(a, b))
def test_hard(self):
a = np.array([[0, 1],
[1, 2]])
b = np.array([[0, 1],
[-1, 2]])
npt.assert_equal(*normalize_signs(a, b))
def test_harder(self):
"""We don't want a value that might be negative due to
floating point inaccuracies to make a call to allclose in the
result to be off."""
a = np.array([[-1e-15, 1],
[5, 2]])
b = np.array([[1e-15, 1],
[5, 2]])
# Clearly a and b would refer to the same "column
# eigenvectors" but a slopppy implementation of
# normalize_signs could change the sign of column 0 and make a
# comparison fail
npt.assert_almost_equal(*normalize_signs(a, b))
def test_column_zeros(self):
a = np.array([[0, 1],
[0, 2]])
b = np.array([[0, -1],
[0, -2]])
npt.assert_equal(*normalize_signs(a, b))
def test_column_almost_zero(self):
a = np.array([[1e-15, 3],
[-2e-14, -6]])
b = np.array([[0, 3],
[-1e-15, -6]])
npt.assert_almost_equal(*normalize_signs(a, b))
class TestChiSquareDistance(object):
def test_errors(self):
a = np.array([[-0.5, 0],
[1, 0.5]])
with npt.assert_raises(ValueError):
chi_square_distance(a)
b = np.array([[0.5, 0],
[0.5, 0.1]])
with npt.assert_raises(ValueError):
chi_square_distance(b)
def test_results(self):
"""Some random numbers."""
a = np.array([[0.02808988764, 0.056179775281, 0.084269662921,
0.140449438202],
[0.01404494382, 0.196629213483, 0.109550561798,
0.033707865169],
[0.02808988764, 0.112359550562, 0.056179775281,
0.140449438202]])
dist = chi_square_distance(a)
expected = [0.91413919964333856,
0.33651110106124049,
0.75656884966269089]
npt.assert_almost_equal(dist, expected)
def test_results2(self):
"""A tiny example from Legendre & Legendre 1998, p. 285."""
a = np.array([[0, 1, 1],
[1, 0, 0],
[0, 4, 4]])
dist = chi_square_distance(a / a.sum())
# Note L&L used a terrible calculator because they got a wrong
# number (says it's 3.477) :(
expected = [3.4785054261852175, 0, 3.4785054261852175]
npt.assert_almost_equal(dist, expected)
class TestUtils(object):
def setup(self):
self.x = np.array([[1, 2, 3], [4, 5, 6]])
self.y = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
def test_mean_and_std_no_mean_no_std(self):
with npt.assert_raises(ValueError):
mean_and_std(self.x, with_mean=False, with_std=False)
def test_corr_shape_mismatch(self):
with npt.assert_raises(ValueError):
corr(self.x, self.y)
def test_assert_ordination_results_equal(self):
minimal1 = OrdinationResults([1, 2])
# a minimal set of results should be equal to itself
assert_ordination_results_equal(minimal1, minimal1)
# type mismatch
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, 'foo')
# numeric values should be checked that they're almost equal
almost_minimal1 = OrdinationResults([1.0000001, 1.9999999])
assert_ordination_results_equal(minimal1, almost_minimal1)
# species_ids missing in one, present in the other
almost_minimal1.species_ids = ['abc', 'def']
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
almost_minimal1.species_ids = None
# site_ids missing in one, present in the other
almost_minimal1.site_ids = ['abc', 'def']
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
almost_minimal1.site_ids = None
# test each of the optional numeric attributes
for attr in ('species', 'site', 'biplot', 'site_constraints',
'proportion_explained'):
# missing optional numeric attribute in one, present in the other
setattr(almost_minimal1, attr, [[1, 2], [3, 4]])
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(almost_minimal1, attr, None)
# optional numeric attributes present in both, but not almost equal
setattr(minimal1, attr, [[1, 2], [3, 4]])
setattr(almost_minimal1, attr, [[1, 2], [3.00002, 4]])
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(minimal1, attr, None)
setattr(almost_minimal1, attr, None)
# optional numeric attributes present in both, and almost equal
setattr(minimal1, attr, [[1, 2], [3, 4]])
setattr(almost_minimal1, attr, [[1, 2], [3.00000002, 4]])
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(minimal1, attr, None)
setattr(almost_minimal1, attr, None)
class TestCAResults(object):
def setup(self):
"""Data from table 9.11 in Legendre & Legendre 1998."""
self.X = np.loadtxt(get_data_path('L&L_CA_data'))
self.ordination = CA(self.X, ['Site1', 'Site2', 'Site3'],
['Species1', 'Species2', 'Species3'])
def test_scaling2(self):
scores = self.ordination.scores(scaling=2)
# p. 460 L&L 1998
F_hat = np.array([[0.40887, -0.06955],
[-0.11539, 0.29977],
[-0.30997, -0.18739]])
npt.assert_almost_equal(*normalize_signs(F_hat, scores.species),
decimal=5)
V_hat = np.array([[-0.84896, -0.88276],
[-0.22046, 1.34482],
[1.66697, -0.47032]])
npt.assert_almost_equal(*normalize_signs(V_hat, scores.site),
decimal=5)
def test_scaling1(self):
scores = self.ordination.scores(scaling=1)
# p. 458
V = np.array([[1.31871, -0.34374],
[-0.37215, 1.48150],
[-0.99972, -0.92612]])
npt.assert_almost_equal(*normalize_signs(V, scores.species), decimal=5)
F = np.array([[-0.26322, -0.17862],
[-0.06835, 0.27211],
[0.51685, -0.09517]])
npt.assert_almost_equal(*normalize_signs(F, scores.site), decimal=5)
def test_maintain_chi_square_distance_scaling1(self):
"""In scaling 1, chi^2 distance among rows (sites) is equal to
euclidean distance between them in transformed space."""
frequencies = self.X / self.X.sum()
chi2_distances = chi_square_distance(frequencies)
transformed_sites = self.ordination.scores(1).site
euclidean_distances = pdist(transformed_sites, 'euclidean')
npt.assert_almost_equal(chi2_distances, euclidean_distances)
def test_maintain_chi_square_distance_scaling2(self):
"""In scaling 2, chi^2 distance among columns (species) is
equal to euclidean distance between them in transformed space."""
frequencies = self.X / self.X.sum()
chi2_distances = chi_square_distance(frequencies, between_rows=False)
transformed_species = self.ordination.scores(2).species
euclidean_distances = pdist(transformed_species, 'euclidean')
npt.assert_almost_equal(chi2_distances, euclidean_distances)
class TestCAErrors(object):
def test_negative(self):
X = np.array([[1, 2], [-0.1, -2]])
with npt.assert_raises(ValueError):
CA(X, None, None)
class TestRDAErrors(object):
def test_shape(self):
for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]:
Y = np.random.randn(n, p)
X = np.random.randn(n_, m)
yield npt.assert_raises, ValueError, RDA, Y, X, None, None
class TestRDAResults(object):
# STATUS: L&L only shows results with scaling 1, and they agree
# with vegan's (module multiplying by a constant). I can also
# compute scaling 2, agreeing with vegan, but there are no written
# results in L&L.
def setup(self):
"""Data from table 11.3 in Legendre & Legendre 1998."""
Y = np.loadtxt(get_data_path('example2_Y'))
X = np.loadtxt(get_data_path('example2_X'))
self.ordination = RDA(Y, X,
['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5'])
def test_scaling1(self):
scores = self.ordination.scores(1)
# Load data as computed with vegan 2.0-8
vegan_species = np.loadtxt(get_data_path(
'example2_species_scaling1_from_vegan'))
npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
vegan_site = np.loadtxt(get_data_path(
'example2_site_scaling1_from_vegan'))
npt.assert_almost_equal(scores.site, vegan_site, decimal=6)
def test_scaling2(self):
scores = self.ordination.scores(2)
# Load data as computed with vegan 2.0-8
vegan_species = np.loadtxt(get_data_path(
'example2_species_scaling2_from_vegan'))
npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
vegan_site = np.loadtxt(get_data_path(
'example2_site_scaling2_from_vegan'))
npt.assert_almost_equal(scores.site, vegan_site, decimal=6)
class TestCCAErrors(object):
def setup(self):
"""Data from table 11.3 in Legendre & Legendre 1998."""
self.Y = np.loadtxt(get_data_path('example3_Y'))
self.X = np.loadtxt(get_data_path('example3_X'))
def test_shape(self):
X, Y = self.X, self.Y
with npt.assert_raises(ValueError):
CCA(Y, X[:-1], None, None)
def test_Y_values(self):
X, Y = self.X, self.Y
Y[0, 0] = -1
with npt.assert_raises(ValueError):
CCA(Y, X, None, None)
Y[0] = 0
with npt.assert_raises(ValueError):
CCA(Y, X, None, None)
class TestCCAResults(object):
def setup(self):
"""Data from table 11.3 in Legendre & Legendre 1998
(p. 590). Loaded results as computed with vegan 2.0-8 and
compared with table 11.5 if also there."""
Y = np.loadtxt(get_data_path('example3_Y'))
X = np.loadtxt(get_data_path('example3_X'))
self.ordination = CCA(Y, X[:, :-1],
['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5', 'Species6', 'Species7',
'Species8'])
def test_scaling1_species(self):
scores = self.ordination.scores(1)
vegan_species = np.loadtxt(get_data_path(
'example3_species_scaling1_from_vegan'))
npt.assert_almost_equal(scores.species, vegan_species, decimal=6)
def test_scaling1_site(self):
scores = self.ordination.scores(1)
vegan_site = np.loadtxt(get_data_path(
'example3_site_scaling1_from_vegan'))
npt.assert_almost_equal(scores.site, vegan_site, decimal=4)
def test_scaling2_species(self):
scores = self.ordination.scores(2)
vegan_species = np.loadtxt(get_data_path(
'example3_species_scaling2_from_vegan'))
npt.assert_almost_equal(scores.species, vegan_species, decimal=5)
def test_scaling2_site(self):
scores = self.ordination.scores(2)
vegan_site = np.loadtxt(get_data_path(
'example3_site_scaling2_from_vegan'))
npt.assert_almost_equal(scores.site, vegan_site, decimal=4)
class TestPCoAResults(object):
def setup(self):
"""Sample data set from page 111 of W.J Krzanowski. Principles
of multivariate analysis, 2000, Oxford University Press."""
matrix = np.loadtxt(get_data_path('PCoA_sample_data'))
dist_matrix = DistanceMatrix(matrix, map(str, range(matrix.shape[0])))
self.dist_matrix = dist_matrix
def test_negative_eigenvalue_warning(self):
"""This data has some small negative eigenvalues."""
npt.assert_warns(RuntimeWarning, PCoA, self.dist_matrix)
def test_values(self):
"""Adapted from cogent's `test_principal_coordinate_analysis`:
"I took the example in the book (see intro info), and did the
principal coordinates analysis, plotted the data and it looked
right"."""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
ordination = PCoA(self.dist_matrix)
scores = ordination.scores()
exp_eigvals = np.array([0.73599103, 0.26260032, 0.14926222, 0.06990457,
0.02956972, 0.01931184, 0., 0., 0., 0., 0., 0.,
0., 0.])
exp_site = np.loadtxt(get_data_path('exp_PCoAzeros_site'))
exp_prop_expl = np.array([0.58105792, 0.20732046, 0.1178411,
0.05518899, 0.02334502, 0.01524651, 0., 0.,
0., 0., 0., 0., 0., 0.])
exp_site_ids = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13']
# Note the absolute value because column can have signs swapped
npt.assert_almost_equal(scores.eigvals, exp_eigvals)
npt.assert_almost_equal(np.abs(scores.site), exp_site)
npt.assert_almost_equal(scores.proportion_explained, exp_prop_expl)
npt.assert_equal(scores.site_ids, exp_site_ids)
class TestPCoAResultsExtensive(object):
def setup(self):
matrix = np.loadtxt(get_data_path('PCoA_sample_data_2'))
self.ids = [str(i) for i in range(matrix.shape[0])]
dist_matrix = DistanceMatrix(matrix, self.ids)
self.ordination = PCoA(dist_matrix)
def test_values(self):
results = self.ordination.scores()
npt.assert_equal(len(results.eigvals), len(results.site[0]))
expected = np.array([[-0.028597, 0.22903853, 0.07055272,
0.26163576, 0.28398669, 0.0],
[0.37494056, 0.22334055, -0.20892914,
0.05057395, -0.18710366, 0.0],
[-0.33517593, -0.23855979, -0.3099887,
0.11521787, -0.05021553, 0.0],
[0.25412394, -0.4123464, 0.23343642,
0.06403168, -0.00482608, 0.0],
[-0.28256844, 0.18606911, 0.28875631,
-0.06455635, -0.21141632, 0.0],
[0.01727687, 0.012458, -0.07382761,
-0.42690292, 0.1695749, 0.0]])
npt.assert_almost_equal(*normalize_signs(expected, results.site))
expected = np.array([0.3984635, 0.36405689, 0.28804535, 0.27479983,
0.19165361, 0.0])
npt.assert_almost_equal(results.eigvals, expected)
expected = np.array([0.2626621381, 0.2399817314, 0.1898758748,
0.1811445992, 0.1263356565, 0.0])
npt.assert_almost_equal(results.proportion_explained, expected)
npt.assert_equal(results.site_ids, self.ids)
class TestPCoAEigenResults(object):
def setup(self):
dist_matrix = DistanceMatrix.read(get_data_path('PCoA_sample_data_3'))
self.ordination = PCoA(dist_matrix)
self.ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
'PC.355', 'PC.607', 'PC.634']
def test_values(self):
results = self.ordination.scores()
npt.assert_almost_equal(len(results.eigvals), len(results.site[0]))
expected = np.loadtxt(get_data_path('exp_PCoAEigenResults_site'))
npt.assert_almost_equal(*normalize_signs(expected, results.site))
expected = np.array([0.51236726, 0.30071909, 0.26791207, 0.20898868,
0.19169895, 0.16054235, 0.15017696, 0.12245775,
0.0])
npt.assert_almost_equal(results.eigvals, expected)
expected = np.array([0.2675738328, 0.157044696, 0.1399118638,
0.1091402725, 0.1001110485, 0.0838401162,
0.0784269939, 0.0639511764, 0.0])
npt.assert_almost_equal(results.proportion_explained, expected)
npt.assert_equal(results.site_ids, self.ids)
class TestPCoAPrivateMethods(object):
def setup(self):
self.matrix = np.arange(1, 7).reshape(2, 3)
self.matrix2 = np.arange(1, 10).reshape(3, 3)
def test_E_matrix(self):
E = PCoA._E_matrix(self.matrix)
expected_E = np.array([[-0.5, -2., -4.5],
[-8., -12.5, -18.]])
npt.assert_almost_equal(E, expected_E)
def test_F_matrix(self):
F = PCoA._F_matrix(self.matrix2)
expected_F = np.zeros((3, 3))
# Note that `test_make_F_matrix` in cogent is wrong
npt.assert_almost_equal(F, expected_F)
class TestPCoAErrors(object):
def test_input(self):
with npt.assert_raises(TypeError):
PCoA([[1, 2], [3, 4]])
class TestOrdinationResults(unittest.TestCase):
def setUp(self):
# Define in-memory CA results to serialize and deserialize.
eigvals = np.array([0.0961330159181, 0.0409418140138])
species = np.array([[0.408869425742, 0.0695518116298],
[-0.1153860437, -0.299767683538],
[-0.309967102571, 0.187391917117]])
site = np.array([[-0.848956053187, 0.882764759014],
[-0.220458650578, -1.34482000302],
[1.66697179591, 0.470324389808]])
biplot = None
site_constraints = None
prop_explained = None
species_ids = ['Species1', 'Species2', 'Species3']
site_ids = ['Site1', 'Site2', 'Site3']
self.ordination_results = OrdinationResults(
eigvals=eigvals, species=species, site=site, biplot=biplot,
site_constraints=site_constraints,
proportion_explained=prop_explained, species_ids=species_ids,
site_ids=site_ids)
# DataFrame for testing plot method. Has a categorical column with a
# mix of numbers and strings. Has a numeric column with a mix of ints,
# floats, and strings that can be converted to floats. Has a numeric
# column with missing data (np.nan).
self.df = pd.DataFrame([['foo', '42', 10],
[22, 0, 8],
[22, -4.2, np.nan],
['foo', '42.19', 11]],
index=['A', 'B', 'C', 'D'],
columns=['categorical', 'numeric', 'nancolumn'])
# Minimal ordination results for easier testing of plotting method.
# Paired with df above.
eigvals = np.array([0.50, 0.25, 0.25])
site = np.array([[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6]])
self.min_ord_results = OrdinationResults(eigvals=eigvals, site=site,
site_ids=['A', 'B', 'C', 'D'])
def test_str(self):
exp = ("Ordination results:\n"
"\tEigvals: 2\n"
"\tProportion explained: N/A\n"
"\tSpecies: 3x2\n"
"\tSite: 3x2\n"
"\tBiplot: N/A\n"
"\tSite constraints: N/A\n"
"\tSpecies IDs: 'Species1', 'Species2', 'Species3'\n"
"\tSite IDs: 'Site1', 'Site2', 'Site3'")
obs = str(self.ordination_results)
self.assertEqual(obs, exp)
# all optional attributes missing
exp = ("Ordination results:\n"
"\tEigvals: 1\n"
"\tProportion explained: N/A\n"
"\tSpecies: N/A\n"
"\tSite: N/A\n"
"\tBiplot: N/A\n"
"\tSite constraints: N/A\n"
"\tSpecies IDs: N/A\n"
"\tSite IDs: N/A")
obs = str(OrdinationResults(np.array([4.2])))
self.assertEqual(obs, exp)
def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
exp_legend_exists, exp_xlabel, exp_ylabel,
exp_zlabel):
# check type
assert_is_instance(fig, mpl.figure.Figure)
# check number of subplots
axes = fig.get_axes()
npt.assert_equal(len(axes), exp_num_subplots)
# check title
ax = axes[0]
npt.assert_equal(ax.get_title(), exp_title)
# shouldn't have tick labels
for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
ax.get_zticklabels()):
npt.assert_equal(tick_label.get_text(), '')
# check if legend is present
legend = ax.get_legend()
if exp_legend_exists:
assert_true(legend is not None)
else:
assert_true(legend is None)
# check axis labels
npt.assert_equal(ax.get_xlabel(), exp_xlabel)
npt.assert_equal(ax.get_ylabel(), exp_ylabel)
npt.assert_equal(ax.get_zlabel(), exp_zlabel)
def test_plot_no_metadata(self):
fig = self.min_ord_results.plot()
self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
def test_plot_with_numeric_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'numeric', axes=(1, 0, 2),
axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
self.check_basic_figure_sanity(
fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
def test_plot_with_categorical_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'categorical', axes=[2, 0, 1], title='a title',
cmap='Accent')
self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
def test_plot_with_invalid_axis_labels(self):
with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'):
self.min_ord_results.plot(axes=[2, 0, 1],
axis_labels=('a', 'b', 'c', 'd'))
def test_validate_plot_axes_valid_input(self):
# shouldn't raise an error on valid input. nothing is returned, so
# nothing to check here
self.min_ord_results._validate_plot_axes(self.min_ord_results.site.T,
(1, 2, 0))
def test_validate_plot_axes_invalid_input(self):
# not enough dimensions
with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'):
self.min_ord_results._validate_plot_axes(
np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
coord_matrix = self.min_ord_results.site.T
# wrong number of axes
with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'):
self.min_ord_results._validate_plot_axes(coord_matrix, [])
with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'):
self.min_ord_results._validate_plot_axes(coord_matrix,
(0, 1, 2, 3))
# duplicate axes
with six.assertRaisesRegex(self, ValueError, 'must be unique'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
# out of range axes
with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
def test_get_plot_point_colors_invalid_input(self):
# column provided without df
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(None, 'numeric',
['B', 'C'], 'jet')
# df provided without column
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(self.df, None,
['B', 'C'], 'jet')
# column not in df
with six.assertRaisesRegex(self, ValueError, 'missingcol'):
self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
['B', 'C'], 'jet')
# id not in df
with six.assertRaisesRegex(self, ValueError, 'numeric'):
self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
# missing data in df
with six.assertRaisesRegex(self, ValueError, 'nancolumn'):
self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
['B', 'C', 'A'], 'jet')
def test_get_plot_point_colors_no_df_or_column(self):
obs = self.min_ord_results._get_plot_point_colors(None, None,
['B', 'C'], 'jet')
npt.assert_equal(obs, (None, None))
def test_get_plot_point_colors_numeric_column(self):
# subset of the ids in df
exp = [0.0, -4.2, 42.0]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp)
assert_true(obs[1] is None)
# all ids in df
exp = [0.0, 42.0, 42.19, -4.2]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp)
assert_true(obs[1] is None)
def test_get_plot_point_colors_categorical_column(self):
# subset of the ids in df
exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
exp_color_dict = {
'foo': [0.5, 0., 0., 1.],
22: [0., 0., 0.5, 1.]
}
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
npt.assert_equal(obs[1], exp_color_dict)
# all ids in df
exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
[0., 0., 0.5, 1.]]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
# should get same color dict as before
npt.assert_equal(obs[1], exp_color_dict)
def test_plot_categorical_legend(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# we shouldn't have a legend yet
assert_true(ax.get_legend() is None)
self.min_ord_results._plot_categorical_legend(
ax, {'foo': 'red', 'bar': 'green'})
# make sure we have a legend now
legend = ax.get_legend()
assert_true(legend is not None)
# do some light sanity checking to make sure our input labels and
# colors are present. we're not using nose.tools.assert_items_equal
# because it isn't available in Python 3.
labels = [t.get_text() for t in legend.get_texts()]
npt.assert_equal(sorted(labels), ['bar', 'foo'])
colors = [l.get_color() for l in legend.get_lines()]
npt.assert_equal(sorted(colors), ['green', 'red'])
def test_repr_png(self):
obs = self.min_ord_results._repr_png_()
assert_is_instance(obs, binary_type)
assert_true(len(obs) > 0)
def test_repr_svg(self):
obs = self.min_ord_results._repr_svg_()
# print_figure(format='svg') can return text or bytes depending on the
# version of IPython
assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type))
assert_true(len(obs) > 0)
def test_png(self):
assert_is_instance(self.min_ord_results.png, Image)
def test_svg(self):
assert_is_instance(self.min_ord_results.svg, SVG)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
psi-rking/psi4 | psi4/driver/qcdb/mpl.py | 7 | 54234 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with matplotlib plotting routines. These are not hooked up to
any particular qcdb data structures but can be called with basic
arguments.
"""
import os
#import matplotlib
#matplotlib.use('Agg')
def expand_saveas(saveas, def_filename, def_path=os.path.abspath(os.curdir), def_prefix='', relpath=False):
"""Analyzes string *saveas* to see if it contains information on
path to save file, name to save file, both or neither (*saveas*
ends in '/' to indicate directory only) (able to expand '.'). A full
absolute filename is returned, lacking only file extension. Based on
analysis of missing parts of *saveas*, path information from *def_path*
and/or filename information from *def_prefix* + *def_filename* is
inserted. *def_prefix* is intended to be something like ``mplthread_``
to identify the type of figure.
"""
defname = def_prefix + def_filename.replace(' ', '_')
if saveas is None:
pth = def_path
fil = defname
else:
pth, fil = os.path.split(saveas)
pth = pth if pth != '' else def_path
fil = fil if fil != '' else defname
abspathfile = os.path.join(os.path.abspath(pth), fil)
if relpath:
return os.path.relpath(abspathfile, os.getcwd())
else:
return abspathfile
def segment_color(argcolor, saptcolor):
"""Find appropriate color expression between overall color directive
*argcolor* and particular color availibility *rxncolor*.
"""
import matplotlib
# validate any sapt color
if saptcolor is not None:
if saptcolor < 0.0 or saptcolor > 1.0:
saptcolor = None
if argcolor is None:
# no color argument, so take from rxn
if rxncolor is None:
clr = 'grey'
elif saptcolor is not None:
clr = matplotlib.cm.jet(saptcolor)
else:
clr = rxncolor
elif argcolor == 'sapt':
# sapt color from rxn if available
if saptcolor is not None:
clr = matplotlib.cm.jet(saptcolor)
else:
clr = 'grey'
elif argcolor == 'rgb':
# HB/MX/DD sapt color from rxn if available
if saptcolor is not None:
if saptcolor < 0.333:
clr = 'blue'
elif saptcolor < 0.667:
clr = 'green'
else:
clr = 'red'
else:
clr = 'grey'
else:
# color argument is name of mpl color
clr = argcolor
return clr
def bars(data, title='', saveas=None, relpath=False, graphicsformat=['pdf'], view=True):
"""Generates a 'gray-bars' diagram between model chemistries with error
statistics in list *data*, which is supplied as part of the dictionary
for each participating bar/modelchem, along with *mc* keys in argument
*data*. The plot is labeled with *title* and each bar with *mc* key and
plotted at a fixed scale to facilitate comparison across projects.
"""
import hashlib
import matplotlib.pyplot as plt
# initialize plot, fix dimensions for consistent Illustrator import
fig, ax = plt.subplots(figsize=(12, 7))
plt.ylim([0, 4.86])
plt.xlim([0, 6])
plt.xticks([])
# label plot and tiers
ax.text(0.4, 4.6, title,
verticalalignment='bottom', horizontalalignment='left',
family='Times New Roman', weight='bold', fontsize=12)
widths = [0.15, 0.02, 0.02, 0.02] # TT, HB, MX, DD
xval = 0.1 # starting posn along x-axis
# plot bar sets
for bar in data:
if bar is not None:
lefts = [xval, xval + 0.025, xval + 0.065, xval + 0.105]
rect = ax.bar(lefts, bar['data'], widths, linewidth=0)
rect[0].set_color('grey')
rect[1].set_color('red')
rect[2].set_color('green')
rect[3].set_color('blue')
ax.text(xval + .08, 4.3, bar['mc'],
verticalalignment='center', horizontalalignment='right', rotation='vertical',
family='Times New Roman', fontsize=8)
xval += 0.20
# save and show
pltuid = title + '_' + hashlib.sha1((title + repr([bar['mc'] for bar in data if bar is not None])).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='bar_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
def flat(data, color=None, title='', xlimit=4.0, xlines=[0.0, 0.3, 1.0], mae=None, mape=None, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Generates a slat diagram between model chemistries with errors in
single-item list *data*, which is supplied as part of the dictionary
for each participating reaction, along with *dbse* and *rxn* keys in
argument *data*. Limits of plot are *xlimit* from the zero-line. If
*color* is None, slats are black, if 'sapt', colors are taken from
sapt_colors module. Summary statistic *mae* is plotted on the
overbound side and relative statistic *mape* on the underbound side.
Saves a file with name *title* and plots to screen if *view*.
"""
import matplotlib.pyplot as plt
Nweft = 1
positions = range(-1, -1 * Nweft - 1, -1)
# initialize plot
fig, ax = plt.subplots(figsize=(12, 0.33))
plt.xlim([-xlimit, xlimit])
plt.ylim([-1 * Nweft - 1, 0])
plt.yticks([])
plt.xticks([])
# fig.patch.set_visible(False)
# ax.patch.set_visible(False)
ax.axis('off')
for xl in xlines:
plt.axvline(xl, color='grey', linewidth=4)
if xl != 0.0:
plt.axvline(-1 * xl, color='grey', linewidth=4)
# plot reaction errors and threads
for rxn in data:
xvals = rxn['data']
clr = segment_color(color, rxn['color'] if 'color' in rxn else None)
ax.plot(xvals, positions, '|', color=clr, markersize=13.0, mew=4)
# plot trimmings
if mae is not None:
plt.axvline(-1 * mae, color='black', linewidth=12)
if mape is not None: # equivalent to MAE for a 10 kcal/mol interaction energy
ax.plot(0.025 * mape, positions, 'o', color='black', markersize=15.0)
# save and show
pltuid = title # simple (not really unique) filename for LaTeX integration
pltfile = expand_saveas(saveas, pltuid, def_prefix='flat_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight',
frameon=False, pad_inches=0.0)
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close() # give this a try
return files_saved
#def mpl_distslat_multiplot_files(pltfile, dbid, dbname, xmin, xmax, mcdats, labels, titles):
# """Saves a plot with basename *pltfile* with a slat representation
# of the modelchems errors in *mcdat*. Plot is in PNG, PDF, & EPS
# and suitable for download, no mouseover properties. Both labeled
# and labelless (for pub) figures are constructed.
#
# """
# import matplotlib as mpl
# from matplotlib.axes import Subplot
# import sapt_colors
# from matplotlib.figure import Figure
#
# nplots = len(mcdats)
# fht = nplots * 0.8
# fig, axt = plt.subplots(figsize=(12.0, fht))
# plt.subplots_adjust(left=0.01, right=0.99, hspace=0.3)
#
# axt.set_xticks([])
# axt.set_yticks([])
# plt.axis('off')
#
# for item in range(nplots):
# mcdat = mcdats[item]
# label = labels[item]
# title = titles[item]
#
# erdat = np.array(mcdat)
# yvals = np.ones(len(mcdat))
# y = np.array([sapt_colors.sapt_colors[dbname][i] for i in label])
#
# ax = Subplot(fig, nplots, 1, item + 1)
# fig.add_subplot(ax)
# sc = ax.scatter(erdat, yvals, c=y, s=3000, marker="|", cmap=mpl.cm.jet, vmin=0, vmax=1)
#
# ax.set_yticks([])
# ax.set_xticks([])
# ax.set_frame_on(False)
# ax.set_xlim([xmin, xmax])
#
# # Write files with only slats
# plt.savefig('scratch/' + pltfile + '_plain' + '.png', transparent=True, format='PNG')
# plt.savefig('scratch/' + pltfile + '_plain' + '.pdf', transparent=True, format='PDF')
# plt.savefig('scratch/' + pltfile + '_plain' + '.eps', transparent=True, format='EPS')
#
# # Rewrite files with guides and labels
# for item in range(nplots):
# ax_again = fig.add_subplot(nplots, 1, item + 1)
# ax_again.set_title(titles[item], fontsize=8)
# ax_again.text(xmin + 0.3, 1.0, stats(np.array(mcdats[item])), fontsize=7, family='monospace', verticalalignment='center')
# ax_again.plot([0, 0], [0.9, 1.1], color='#cccc00', lw=2)
# ax_again.set_frame_on(False)
# ax_again.set_yticks([])
# ax_again.set_xticks([-12.0, -8.0, -4.0, -2.0, -1.0, 0.0, 1.0, 2.0, 4.0, 8.0, 12.0])
# ax_again.tick_params(axis='both', which='major', labelbottom='off', bottom='off')
# ax_again.set_xticks([-12.0, -8.0, -4.0, -2.0, -1.0, 0.0, 1.0, 2.0, 4.0, 8.0, 12.0])
# ax_again.tick_params(axis='both', which='major', labelbottom='on', bottom='off')
#
# plt.savefig('scratch/' + pltfile + '_trimd' + '.png', transparent=True, format='PNG')
# plt.savefig('scratch/' + pltfile + '_trimd' + '.pdf', transparent=True, format='PDF')
# plt.savefig('scratch/' + pltfile + '_trimd' + '.eps', transparent=True, format='EPS')
def valerr(data, color=None, title='', xtitle='', view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""
"""
import hashlib
from itertools import cycle
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(4, 6))
ax1 = fig.add_subplot(211)
plt.axhline(0.0, axes=ax1, color='black')
ax1.set_ylabel('Reaction Energy')
plt.title(title)
ax2 = plt.subplot(212, sharex=ax1)
plt.axhline(0.0, axes=ax2, color='#cccc00')
ax2.set_ylabel('Energy Error')
ax2.set_xlabel(xtitle)
xmin = 500.0
xmax = -500.0
vmin = 1.0
vmax = -1.0
emin = 1.0
emax = -1.0
linecycler = cycle(['-', '--', '-.', ':'])
# plot reaction errors and threads
for trace, tracedata in data.items():
vaxis = []
vmcdata = []
verror = []
for rxn in tracedata:
clr = segment_color(color, rxn['color'] if 'color' in rxn else None)
xmin = min(xmin, rxn['axis'])
xmax = max(xmax, rxn['axis'])
ax1.plot(rxn['axis'], rxn['mcdata'], '^', color=clr, markersize=6.0, mew=0, zorder=10)
vmcdata.append(rxn['mcdata'])
vaxis.append(rxn['axis'])
vmin = min(0, vmin, rxn['mcdata'])
vmax = max(0, vmax, rxn['mcdata'])
if rxn['bmdata'] is not None:
ax1.plot(rxn['axis'], rxn['bmdata'], 'o', color='black', markersize=6.0, zorder=1)
vmin = min(0, vmin, rxn['bmdata'])
vmax = max(0, vmax, rxn['bmdata'])
if rxn['error'][0] is not None:
ax2.plot(rxn['axis'], rxn['error'][0], 's', color=clr, mew=0, zorder=8)
emin = min(0, emin, rxn['error'][0])
emax = max(0, emax, rxn['error'][0])
verror.append(rxn['error'][0])
ls = next(linecycler)
ax1.plot(vaxis, vmcdata, ls, color='grey', label=trace, zorder=3)
ax2.plot(vaxis, verror, ls, color='grey', label=trace, zorder=4)
xbuf = max(0.05, abs(0.02 * xmax))
vbuf = max(0.1, abs(0.02 * vmax))
ebuf = max(0.01, abs(0.02 * emax))
plt.xlim([xmin - xbuf, xmax + xbuf])
ax1.set_ylim([vmin - vbuf, vmax + vbuf])
plt.legend(fontsize='x-small', frameon=False)
ax2.set_ylim([emin - ebuf, emax + ebuf])
# save and show
pltuid = title + '_' + hashlib.sha1(title.encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='valerr_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close() # give this a try
return files_saved
def disthist(data, title='', xtitle='', xmin=None, xmax=None,
me=None, stde=None, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Saves a plot with name *saveas* with a histogram representation
of the reaction errors in *data*. Also plots a gaussian distribution
with mean *me* and standard deviation *stde*. Plot has x-range
*xmin* to *xmax*, x-axis label *xtitle* and overall title *title*.
"""
import hashlib
import numpy as np
import matplotlib.pyplot as plt
def gaussianpdf(u, v, x):
"""*u* is mean, *v* is variance, *x* is value, returns probability"""
return 1.0 / np.sqrt(2.0 * np.pi * v) * np.exp(-pow(x - u, 2) / 2.0 / v)
me = me if me is not None else np.mean(data)
stde = stde if stde is not None else np.std(data, ddof=1)
evenerr = max(abs(me - 4.0 * stde), abs(me + 4.0 * stde))
xmin = xmin if xmin is not None else -1 * evenerr
xmax = xmax if xmax is not None else evenerr
dx = (xmax - xmin) / 40.
nx = int(round((xmax - xmin) / dx)) + 1
pdfx = []
pdfy = []
for i in range(nx):
ix = xmin + i * dx
pdfx.append(ix)
pdfy.append(gaussianpdf(me, pow(stde, 2), ix))
fig, ax1 = plt.subplots(figsize=(16, 6))
plt.axvline(0.0, color='#cccc00')
ax1.set_xlim(xmin, xmax)
ax1.hist(data, bins=30, range=(xmin, xmax), color='#2d4065', alpha=0.7)
ax1.set_xlabel(xtitle)
ax1.set_ylabel('Count')
ax2 = ax1.twinx()
ax2.fill(pdfx, pdfy, color='k', alpha=0.2)
ax2.set_ylabel('Probability Density')
plt.title(title)
# save and show
pltuid = title + '_' + hashlib.sha1((title + str(me) + str(stde) + str(xmin) + str(xmax)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='disthist_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
#def thread(data, labels, color=None, title='', xlimit=4.0, mae=None, mape=None):
# """Generates a tiered slat diagram between model chemistries with
# errors (or simply values) in list *data*, which is supplied as part of the
# dictionary for each participating reaction, along with *dbse* and *rxn* keys
# in argument *data*. The plot is labeled with *title* and each tier with
# an element of *labels* and plotted at *xlimit* from the zero-line. If
# *color* is None, slats are black, if 'sapt', colors are taken from *color*
# key in *data* [0, 1]. Summary statistics *mae* are plotted on the
# overbound side and relative statistics *mape* on the underbound side.
#
# """
# from random import random
# import matplotlib.pyplot as plt
#
# # initialize tiers/wefts
# Nweft = len(labels)
# lenS = 0.2
# gapT = 0.04
# positions = range(-1, -1 * Nweft - 1, -1)
# posnS = []
# for weft in range(Nweft):
# posnS.extend([positions[weft] + lenS, positions[weft] - lenS, None])
# posnT = []
# for weft in range(Nweft - 1):
# posnT.extend([positions[weft] - lenS - gapT, positions[weft + 1] + lenS + gapT, None])
#
# # initialize plot
# fht = Nweft * 0.8
# fig, ax = plt.subplots(figsize=(12, fht))
# plt.subplots_adjust(left=0.01, right=0.99, hspace=0.3)
# plt.xlim([-xlimit, xlimit])
# plt.ylim([-1 * Nweft - 1, 0])
# plt.yticks([])
#
# # label plot and tiers
# ax.text(-0.9 * xlimit, -0.25, title,
# verticalalignment='bottom', horizontalalignment='left',
# family='Times New Roman', weight='bold', fontsize=12)
# for weft in labels:
# ax.text(-0.9 * xlimit, -(1.2 + labels.index(weft)), weft,
# verticalalignment='bottom', horizontalalignment='left',
# family='Times New Roman', weight='bold', fontsize=18)
#
# # plot reaction errors and threads
# for rxn in data:
#
# # preparation
# xvals = rxn['data']
# clr = segment_color(color, rxn['color'] if 'color' in rxn else None)
# slat = []
# for weft in range(Nweft):
# slat.extend([xvals[weft], xvals[weft], None])
# thread = []
# for weft in range(Nweft - 1):
# thread.extend([xvals[weft], xvals[weft + 1], None])
#
# # plotting
# ax.plot(slat, posnS, color=clr, linewidth=1.0, solid_capstyle='round')
# ax.plot(thread, posnT, color=clr, linewidth=0.5, solid_capstyle='round',
# alpha=0.3)
#
# # labeling
# try:
# toplblposn = next(item for item in xvals if item is not None)
# botlblposn = next(item for item in reversed(xvals) if item is not None)
# except StopIteration:
# pass
# else:
# ax.text(toplblposn, -0.75 + 0.6 * random(), rxn['sys'],
# verticalalignment='bottom', horizontalalignment='center',
# family='Times New Roman', fontsize=8)
# ax.text(botlblposn, -1 * Nweft - 0.75 + 0.6 * random(), rxn['sys'],
# verticalalignment='bottom', horizontalalignment='center',
# family='Times New Roman', fontsize=8)
#
# # plot trimmings
# if mae is not None:
# ax.plot([-x for x in mae], positions, 's', color='black')
# if mape is not None: # equivalent to MAE for a 10 kcal/mol IE
# ax.plot([0.025 * x for x in mape], positions, 'o', color='black')
#
# plt.axvline(0, color='black')
# plt.show()
def threads(data, labels, color=None, title='', xlimit=4.0, mae=None, mape=None,
mousetext=None, mouselink=None, mouseimag=None, mousetitle=None, mousediv=None,
labeled=True, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Generates a tiered slat diagram between model chemistries with
errors (or simply values) in list *data*, which is supplied as part of the
dictionary for each participating reaction, along with *dbse* and *rxn* keys
in argument *data*. The plot is labeled with *title* and each tier with
an element of *labels* and plotted at *xlimit* from the zero-line. If
*color* is None, slats are black, if 'sapt', colors are taken from *color*
key in *data* [0, 1]. Summary statistics *mae* are plotted on the
overbound side and relative statistics *mape* on the underbound side.
HTML code for mouseover if mousetext or mouselink or mouseimag specified
based on recipe of Andrew Dalke from
http://www.dalkescientific.com/writings/diary/archive/2005/04/24/interactive_html.html
"""
import random
import hashlib
import matplotlib.pyplot as plt
import numpy as np # only needed for missing data with mouseiness
# initialize tiers/wefts
Nweft = len(labels)
lenS = 0.2
gapT = 0.04
positions = range(-1, -1 * Nweft - 1, -1)
posnS = []
for weft in range(Nweft):
posnS.extend([positions[weft] + lenS, positions[weft] - lenS, None])
posnT = []
for weft in range(Nweft - 1):
posnT.extend([positions[weft] - lenS - gapT, positions[weft + 1] + lenS + gapT, None])
posnM = []
# initialize plot
fht = Nweft * 0.8
#fig, ax = plt.subplots(figsize=(12, fht))
fig, ax = plt.subplots(figsize=(11, fht))
plt.subplots_adjust(left=0.01, right=0.99, hspace=0.3)
plt.xlim([-xlimit, xlimit])
plt.ylim([-1 * Nweft - 1, 0])
plt.yticks([])
ax.set_frame_on(False)
if labeled:
ax.set_xticks([-0.5 * xlimit, -0.25 * xlimit, 0.0, 0.25 * xlimit, 0.5 * xlimit])
else:
ax.set_xticks([])
for tick in ax.xaxis.get_major_ticks():
tick.tick1line.set_markersize(0)
tick.tick2line.set_markersize(0)
# label plot and tiers
if labeled:
ax.text(-0.9 * xlimit, -0.25, title,
verticalalignment='bottom', horizontalalignment='left',
family='Times New Roman', weight='bold', fontsize=12)
for weft in labels:
ax.text(-0.9 * xlimit, -(1.2 + labels.index(weft)), weft,
verticalalignment='bottom', horizontalalignment='left',
family='Times New Roman', weight='bold', fontsize=18)
# plot reaction errors and threads
for rxn in data:
# preparation
xvals = rxn['data']
clr = segment_color(color, rxn['color'] if 'color' in rxn else None)
slat = []
for weft in range(Nweft):
slat.extend([xvals[weft], xvals[weft], None])
thread = []
for weft in range(Nweft - 1):
thread.extend([xvals[weft], xvals[weft + 1], None])
# plotting
if Nweft == 1:
ax.plot(slat, posnS, '|', color=clr, markersize=20.0, mew=1.5, solid_capstyle='round')
else:
ax.plot(slat, posnS, color=clr, linewidth=1.0, solid_capstyle='round')
ax.plot(thread, posnT, color=clr, linewidth=0.5, solid_capstyle='round', alpha=0.3)
# converting into screen coordinates for image map
# block not working for py3 or up-to-date mpl. better ways for html image map nowadays
#npxvals = [np.nan if val is None else val for val in xvals]
#xyscreen = ax.transData.transform(zip(npxvals, positions))
#xscreen, yscreen = zip(*xyscreen)
#posnM.extend(zip([rxn['db']] * Nweft, [rxn['sys']] * Nweft,
# npxvals, [rxn['show']] * Nweft, xscreen, yscreen))
# labeling
if not(mousetext or mouselink or mouseimag):
if labeled and len(data) < 200:
try:
toplblposn = next(item for item in xvals if item is not None)
botlblposn = next(item for item in reversed(xvals) if item is not None)
except StopIteration:
pass
else:
ax.text(toplblposn, -0.75 + 0.6 * random.random(), rxn['sys'],
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', fontsize=8)
ax.text(botlblposn, -1 * Nweft - 0.75 + 0.6 * random.random(), rxn['sys'],
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', fontsize=8)
# plot trimmings
if mae is not None:
ax.plot([-x for x in mae], positions, 's', color='black')
if labeled:
if mape is not None: # equivalent to MAE for a 10 kcal/mol IE
ax.plot([0.025 * x for x in mape], positions, 'o', color='black')
plt.axvline(0, color='#cccc00')
# save and show
pltuid = title + '_' + ('lbld' if labeled else 'bare') + '_' + hashlib.sha1((title + repr(labels) + repr(xlimit)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='thread_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
files_saved[ext.lower()] = savefile
if view:
plt.show()
if not (mousetext or mouselink or mouseimag):
plt.close()
return files_saved, None
else:
dpi = 80
img_width = fig.get_figwidth() * dpi
img_height = fig.get_figheight() * dpi
htmlcode = """<SCRIPT>\n"""
htmlcode += """function mouseshow(db, rxn, val, show) {\n"""
if mousetext or mouselink:
htmlcode += """ var cid = document.getElementById("cid");\n"""
if mousetext:
htmlcode += """ cid.innerHTML = %s;\n""" % (mousetext)
if mouselink:
htmlcode += """ cid.href = %s;\n""" % (mouselink)
if mouseimag:
htmlcode += """ var cmpd_img = document.getElementById("cmpd_img");\n"""
htmlcode += """ cmpd_img.src = %s;\n""" % (mouseimag)
htmlcode += """}\n"""
htmlcode += """</SCRIPT>\n"""
if mousediv:
htmlcode += """%s\n""" % (mousediv[0])
if mousetitle:
htmlcode += """%s <BR>""" % (mousetitle)
htmlcode += """<h4>Mouseover</h4><a id="cid"></a><br>\n"""
if mouseimag:
htmlcode += """<div class="text-center">"""
htmlcode += """<IMG ID="cmpd_img" WIDTH="%d" HEIGHT="%d">\n""" % (200, 160)
htmlcode += """</div>"""
if mousediv:
htmlcode += """%s\n""" % (mousediv[1])
#htmlcode += """<IMG SRC="%s" ismap usemap="#points" WIDTH="%d" HEIGHT="%d">\n""" % \
# (pltfile + '.png', img_width, img_height)
htmlcode += """<IMG SRC="%s" ismap usemap="#points" WIDTH="%d">\n""" % \
(pltfile + '.png', img_width)
htmlcode += """<MAP name="points">\n"""
# generating html image map code
# points sorted to avoid overlapping map areas that can overwhelm html for SSI
# y=0 on top for html and on bottom for mpl, so flip the numbers
posnM.sort(key=lambda tup: tup[2])
posnM.sort(key=lambda tup: tup[3])
last = (0, 0)
for dbse, rxn, val, show, x, y in posnM:
if val is None or val is np.nan:
continue
now = (int(x), int(y))
if now == last:
htmlcode += """<!-- map overlap! %s-%s %+.2f skipped -->\n""" % (dbse, rxn, val)
else:
htmlcode += """<AREA shape="rect" coords="%d,%d,%d,%d" onmouseover="javascript:mouseshow('%s', '%s', '%+.2f', '%s');">\n""" % \
(x - 2, img_height - y - 20,
x + 2, img_height - y + 20,
dbse, rxn, val, show)
last = now
htmlcode += """</MAP>\n"""
plt.close()
return files_saved, htmlcode
def ternary(sapt, title='', labeled=True, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Takes array of arrays *sapt* in form [elst, indc, disp] and builds formatted
two-triangle ternary diagrams. Either fully-readable or dotsonly depending
on *labeled*. Saves in formats *graphicsformat*.
"""
import hashlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.path import Path
import matplotlib.patches as patches
# initialize plot
fig, ax = plt.subplots(figsize=(6, 3.6))
plt.xlim([-0.75, 1.25])
plt.ylim([-0.18, 1.02])
plt.xticks([])
plt.yticks([])
ax.set_aspect('equal')
if labeled:
# form and color ternary triangles
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
pathPos = Path([(0., 0.), (1., 0.), (0.5, 0.866), (0., 0.)], codes)
pathNeg = Path([(0., 0.), (-0.5, 0.866), (0.5, 0.866), (0., 0.)], codes)
ax.add_patch(patches.PathPatch(pathPos, facecolor='white', lw=2))
ax.add_patch(patches.PathPatch(pathNeg, facecolor='#fff5ee', lw=2))
# form and color HB/MX/DD dividing lines
ax.plot([0.667, 0.5], [0., 0.866], color='#eeb4b4', lw=0.5)
ax.plot([-0.333, 0.5], [0.577, 0.866], color='#eeb4b4', lw=0.5)
ax.plot([0.333, 0.5], [0., 0.866], color='#7ec0ee', lw=0.5)
ax.plot([-0.167, 0.5], [0.289, 0.866], color='#7ec0ee', lw=0.5)
# label corners
ax.text(1.0, -0.15, u'Elst (\u2212)',
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', weight='bold', fontsize=18)
ax.text(0.5, 0.9, u'Ind (\u2212)',
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', weight='bold', fontsize=18)
ax.text(0.0, -0.15, u'Disp (\u2212)',
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', weight='bold', fontsize=18)
ax.text(-0.5, 0.9, u'Elst (+)',
verticalalignment='bottom', horizontalalignment='center',
family='Times New Roman', weight='bold', fontsize=18)
xvals = []
yvals = []
cvals = []
for sys in sapt:
[elst, indc, disp] = sys
# calc ternary posn and color
Ftop = abs(indc) / (abs(elst) + abs(indc) + abs(disp))
Fright = abs(elst) / (abs(elst) + abs(indc) + abs(disp))
xdot = 0.5 * Ftop + Fright
ydot = 0.866 * Ftop
cdot = 0.5 + (xdot - 0.5) / (1. - Ftop)
if elst > 0.:
xdot = 0.5 * (Ftop - Fright)
ydot = 0.866 * (Ftop + Fright)
#print elst, indc, disp, '', xdot, ydot, cdot
xvals.append(xdot)
yvals.append(ydot)
cvals.append(cdot)
sc = ax.scatter(xvals, yvals, c=cvals, s=15, marker="o", \
cmap=mpl.cm.jet, edgecolor='none', vmin=0, vmax=1, zorder=10)
# remove figure outline
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
# save and show
pltuid = title + '_' + ('lbld' if labeled else 'bare') + '_' + hashlib.sha1((title + repr(sapt)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='tern_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight',
frameon=False, dpi=450, edgecolor='none', pad_inches=0.0)
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
#def thread_mouseover_web(pltfile, dbid, dbname, xmin, xmax, mcdats, labels, titles):
# """Saves a plot with name *pltfile* with a slat representation of
# the modelchems errors in *mcdat*. Mouseover shows geometry and error
# from *labels* based on recipe of Andrew Dalke from
# http://www.dalkescientific.com/writings/diary/archive/2005/04/24/interactive_html.html
#
# """
# from matplotlib.backends.backend_agg import FigureCanvasAgg
# import matplotlib
# import sapt_colors
#
# cmpd_width = 200
# cmpd_height = 160
#
# nplots = len(mcdats)
# fht = nplots * 0.8
# fht = nplots * 0.8 * 1.4
# fig = matplotlib.figure.Figure(figsize=(12.0, fht))
# fig.subplots_adjust(left=0.01, right=0.99, hspace=0.3, top=0.8, bottom=0.2)
# img_width = fig.get_figwidth() * 80
# img_height = fig.get_figheight() * 80
#
# htmlcode = """
#<SCRIPT>
#function mouseandshow(name, id, db, dbname) {
# var cid = document.getElementById("cid");
# cid.innerHTML = name;
# cid.href = "fragmentviewer.py?name=" + id + "&dataset=" + db;
# var cmpd_img = document.getElementById("cmpd_img");
# cmpd_img.src = dbname + "/dimers/" + id + ".png";
#}
#</SCRIPT>
#
#Distribution of Fragment Errors in Interaction Energy (kcal/mol)<BR>
#Mouseover:<BR><a id="cid"></a><br>
#<IMG SRC="scratch/%s" ismap usemap="#points" WIDTH="%d" HEIGHT="%d">
#<IMG ID="cmpd_img" WIDTH="%d" HEIGHT="%d">
#<MAP name="points">
#""" % (pltfile, img_width, img_height, cmpd_width, cmpd_height)
#
# for item in range(nplots):
# print '<br><br><br><br><br><br>'
# mcdat = mcdats[item]
# label = labels[item]
# tttle = titles[item]
#
# erdat = np.array(mcdat)
# # No masked_array because interferes with html map
# #erdat = np.ma.masked_array(mcdat, mask=mask)
# yvals = np.ones(len(mcdat))
# y = np.array([sapt_colors.sapt_colors[dbname][i] for i in label])
#
# ax = fig.add_subplot(nplots, 1, item + 1)
# sc = ax.scatter(erdat, yvals, c=y, s=3000, marker="|", cmap=matplotlib.cm.jet, vmin=0, vmax=1)
# ax.set_title(tttle, fontsize=8)
# ax.set_yticks([])
# lp = ax.plot([0, 0], [0.9, 1.1], color='#cccc00', lw=2)
# ax.set_ylim([0.95, 1.05])
# ax.text(xmin + 0.3, 1.0, stats(erdat), fontsize=7, family='monospace', verticalalignment='center')
# if item + 1 == nplots:
# ax.set_xticks([-12.0, -8.0, -4.0, -2.0, -1.0, 0.0, 1.0, 2.0, 4.0, 8.0, 12.0])
# for tick in ax.xaxis.get_major_ticks():
# tick.tick1line.set_markersize(0)
# tick.tick2line.set_markersize(0)
# else:
# ax.set_xticks([])
# ax.set_frame_on(False)
# ax.set_xlim([xmin, xmax])
#
# # Convert the data set points into screen space coordinates
# #xyscreencoords = ax.transData.transform(zip(erdat, yvals))
# xyscreencoords = ax.transData.transform(zip(erdat, yvals))
# xcoords, ycoords = zip(*xyscreencoords)
#
# # HTML image coordinates have y=0 on the top. Matplotlib
# # has y=0 on the bottom. We'll need to flip the numbers
# for cid, x, y, er in zip(label, xcoords, ycoords, erdat):
# htmlcode += """<AREA shape="rect" coords="%d,%d,%d,%d" onmouseover="javascript:mouseandshow('%s %+.2f', '%s', %s, '%s');">\n""" % \
# (x - 2, img_height - y - 20, x + 2, img_height - y + 20, cid, er, cid, dbid, dbname)
#
# htmlcode += "</MAP>\n"
# canvas = FigureCanvasAgg(fig)
# canvas.print_figure('scratch/' + title, dpi=80, transparent=True)
#
# #plt.savefig('mplflat_' + title + '.pdf', bbox_inches='tight', transparent=True, format='PDF')
# #plt.savefig(os.environ['HOME'] + os.sep + 'mplflat_' + title + '.pdf', bbox_inches='tight', transparent=T rue, format='PDF')
#
# return htmlcode
def composition_tile(db, aa1, aa2):
"""Takes dictionary *db* of label, error pairs and amino acids *aa1*
and *aa2* and returns a square array of all errors for that amino
acid pair, buffered by zeros.
"""
import re
import numpy as np
bfdbpattern = re.compile("\d\d\d([A-Z][A-Z][A-Z])-\d\d\d([A-Z][A-Z][A-Z])-\d")
tiles = []
for key, val in db.items():
bfdbname = bfdbpattern.match(key)
if (bfdbname.group(1) == aa1 and bfdbname.group(2) == aa2) or \
(bfdbname.group(2) == aa1 and bfdbname.group(1) == aa2):
tiles.append(val)
if not tiles:
# fill in background when no data. only sensible for neutral center colormaps
tiles = [0]
dim = int(np.ceil(np.sqrt(len(tiles))))
pad = dim * dim - len(tiles)
tiles += [0] * pad
return np.reshape(np.array(tiles), (dim, dim))
def iowa(mcdat, mclbl, title='', xtitle='', xlimit=2.0, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Saves a plot with (extensionless) name *pltfile* with an Iowa
representation of the modelchems errors in *mcdat* for BBI/SSI-style
*labels*.
"""
import numpy as np
import hashlib
import matplotlib
import matplotlib.pyplot as plt
aa = ['ARG', 'HIE', 'LYS', 'ASP', 'GLU', 'SER', 'THR', 'ASN', 'GLN', 'CYS', 'MET', 'GLY', 'ALA', 'VAL', 'ILE', 'LEU', 'PRO', 'PHE', 'TYR', 'TRP']
#aa = ['ILE', 'LEU', 'ASP', 'GLU', 'PHE']
err = dict(zip(mclbl, mcdat))
# handle for frame, overall axis
fig, axt = plt.subplots(figsize=(6, 6))
#axt.set_xticks([]) # for quick nolabel, whiteback
#axt.set_yticks([]) # for quick nolabel, whiteback
axt.set_xticks(np.arange(len(aa)) + 0.3, minor=False)
axt.set_yticks(np.arange(len(aa)) + 0.3, minor=False)
axt.invert_yaxis()
axt.xaxis.tick_top() # comment for quick nolabel, whiteback
axt.set_xticklabels(aa, minor=False, rotation=60, size='small') # comment for quick nolabel, whiteback
axt.set_yticklabels(aa, minor=False, size='small') # comment for quick nolabel, whiteback
axt.xaxis.set_tick_params(width=0, length=0)
axt.yaxis.set_tick_params(width=0, length=0)
#axt.set_title('%s' % (title), fontsize=16, verticalalignment='bottom')
#axt.text(10.0, -1.5, title, horizontalalignment='center', fontsize=16)
# nill spacing between 20x20 heatmaps
plt.subplots_adjust(hspace=0.001, wspace=0.001)
index = 1
for aa1 in aa:
for aa2 in aa:
cb = composition_tile(err, aa1, aa2)
ax = matplotlib.axes.Subplot(fig, len(aa), len(aa), index)
fig.add_subplot(ax)
heatmap = ax.pcolor(cb, vmin=-xlimit, vmax=xlimit, cmap=plt.cm.PRGn)
ax.set_xticks([])
ax.set_yticks([])
index += 1
#plt.title(title)
axt.axvline(x=4.8, linewidth=5, color='k')
axt.axvline(x=8.75, linewidth=5, color='k')
axt.axvline(x=11.6, linewidth=5, color='k')
axt.axhline(y=4.8, linewidth=5, color='k')
axt.axhline(y=8.75, linewidth=5, color='k')
axt.axhline(y=11.6, linewidth=5, color='k')
axt.set_zorder(100)
# save and show
pltuid = title + '_' + hashlib.sha1((title + str(xlimit)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='iowa_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight')
#plt.savefig(savefile, transparent=False, format=ext, bbox_inches='tight') # for quick nolabel, whiteback
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
def liliowa(mcdat, title='', xlimit=2.0, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Saves a plot with a heatmap representation of *mcdat*.
"""
import numpy as np
import hashlib
import matplotlib
import matplotlib.pyplot as plt
# handle for frame, overall axis
fig, axt = plt.subplots(figsize=(1, 1))
axt.set_xticks([])
axt.set_yticks([])
axt.invert_yaxis()
axt.xaxis.set_tick_params(width=0, length=0)
axt.yaxis.set_tick_params(width=0, length=0)
axt.set_aspect('equal')
# remove figure outline
axt.spines['top'].set_visible(False)
axt.spines['right'].set_visible(False)
axt.spines['bottom'].set_visible(False)
axt.spines['left'].set_visible(False)
tiles = mcdat
dim = int(np.ceil(np.sqrt(len(tiles))))
pad = dim * dim - len(tiles)
tiles += [0] * pad
cb = np.reshape(np.array(tiles), (dim, dim))
heatmap = axt.pcolor(cb, vmin=-xlimit, vmax=xlimit, cmap=plt.cm.PRGn)
# save and show
pltuid = title + '_' + hashlib.sha1((title + str(xlimit)).encode()).hexdigest()
pltfile = expand_saveas(saveas, pltuid, def_prefix='liliowa_', relpath=relpath)
files_saved = {}
for ext in graphicsformat:
savefile = pltfile + '.' + ext.lower()
plt.savefig(savefile, transparent=True, format=ext, bbox_inches='tight',
frameon=False, pad_inches=0.0)
files_saved[ext.lower()] = savefile
if view:
plt.show()
plt.close()
return files_saved
if __name__ == "__main__":
merge_dats = [
{'show':'a', 'db':'HSG', 'sys':'1', 'data':[0.3508, 0.1234, 0.0364, 0.0731, 0.0388]},
{'show':'b', 'db':'HSG', 'sys':'3', 'data':[0.2036, -0.0736, -0.1650, -0.1380, -0.1806]},
#{'show':'', 'db':'S22', 'sys':'14', 'data':[np.nan, -3.2144, np.nan, np.nan, np.nan]},
{'show':'c', 'db':'S22', 'sys':'14', 'data':[None, -3.2144, None, None, None]},
{'show':'d', 'db':'S22', 'sys':'15', 'data':[-1.5090, -2.5263, -2.9452, -2.8633, -3.1059]},
{'show':'e', 'db':'S22', 'sys':'22', 'data':[0.3046, -0.2632, -0.5070, -0.4925, -0.6359]}]
threads(merge_dats, labels=['d', 't', 'dt', 'q', 'tq'], color='sapt',
title='MP2-CPa[]z', mae=[0.25, 0.5, 0.5, 0.3, 1.0], mape=[20.1, 25, 15, 5.5, 3.6])
more_dats = [
{'mc':'MP2-CP-adz', 'data':[1.0, 0.8, 1.4, 1.6]},
{'mc':'MP2-CP-adtz', 'data':[0.6, 0.2, 0.4, 0.6]},
None,
{'mc':'MP2-CP-adzagain', 'data':[1.0, 0.8, 1.4, 1.6]}]
bars(more_dats, title='asdf')
single_dats = [
{'dbse':'HSG', 'sys':'1', 'data':[0.3508]},
{'dbse':'HSG', 'sys':'3', 'data':[0.2036]},
{'dbse':'S22', 'sys':'14', 'data':[None]},
{'dbse':'S22', 'sys':'15', 'data':[-1.5090]},
{'dbse':'S22', 'sys':'22', 'data':[0.3046]}]
#flat(single_dats, color='sapt', title='fg_MP2_adz', mae=0.25, mape=20.1)
flat([{'sys': '1', 'color': 0.6933450559423702, 'data': [0.45730000000000004]}, {'sys': '2', 'color': 0.7627027688599753, 'data': [0.6231999999999998]}, {'sys': '3', 'color': 0.7579958735528617, 'data': [2.7624999999999993]}, {'sys': '4', 'color': 0.7560883254421639, 'data': [2.108600000000001]}, {'sys': '5', 'color': 0.7515161912065955, 'data': [2.2304999999999993]}, {'sys': '6', 'color': 0.7235223893438876, 'data': [1.3782000000000014]}, {'sys': '7', 'color': 0.7120099024225569, 'data': [1.9519000000000002]}, {'sys': '8', 'color': 0.13721565059144678, 'data': [0.13670000000000004]}, {'sys': '9', 'color': 0.3087395095814767, 'data': [0.2966]}, {'sys': '10', 'color': 0.25493207637105103, 'data': [-0.020199999999999996]}, {'sys': '11', 'color': 0.24093814608979347, 'data': [-1.5949999999999998]}, {'sys': '12', 'color': 0.3304746631959777, 'data': [-1.7422000000000004]}, {'sys': '13', 'color': 0.4156050644764822, 'data': [0.0011999999999989797]}, {'sys': '14', 'color': 0.2667207259626991, 'data': [-2.6083999999999996]}, {'sys': '15', 'color': 0.3767053567641695, 'data': [-1.5090000000000003]}, {'sys': '16', 'color': 0.5572641509433963, 'data': [0.10749999999999993]}, {'sys': '17', 'color': 0.4788598239641578, 'data': [0.29669999999999996]}, {'sys': '18', 'color': 0.3799031371351281, 'data': [0.10209999999999964]}, {'sys': '19', 'color': 0.5053227185999078, 'data': [0.16610000000000014]}, {'sys': '20', 'color': 0.2967660584483015, 'data': [-0.37739999999999974]}, {'sys': '21', 'color': 0.38836460733750316, 'data': [-0.4712000000000005]}, {'sys': '22', 'color': 0.5585849893078809, 'data': [0.30460000000000065]}, {'sys': 'BzBz_PD36-1.8', 'color': 0.1383351040559965, 'data': [-1.1921]}, {'sys': 'BzBz_PD34-2.0', 'color': 0.23086034843049832, 'data': [-1.367]}, {'sys': 'BzBz_T-5.2', 'color': 0.254318060864096, 'data': [-0.32230000000000025]}, {'sys': 'BzBz_T-5.1', 'color': 0.26598486566733337, 'data': [-0.3428]}, {'sys': 'BzBz_T-5.0', 'color': 0.28011258347610224, 'data': [-0.36060000000000025]}, {'sys': 'PyPy_S2-3.9', 'color': 0.14520332101084785, 'data': [-0.9853000000000001]}, {'sys': 'PyPy_S2-3.8', 'color': 0.1690757103699542, 'data': [-1.0932]}, {'sys': 'PyPy_S2-3.5', 'color': 0.25615734567417053, 'data': [-1.4617]}, {'sys': 'PyPy_S2-3.7', 'color': 0.19566550224566906, 'data': [-1.2103999999999995]}, {'sys': 'PyPy_S2-3.6', 'color': 0.22476748600170826, 'data': [-1.3333]}, {'sys': 'BzBz_PD32-2.0', 'color': 0.31605681987208084, 'data': [-1.6637]}, {'sys': 'BzBz_T-4.8', 'color': 0.31533827331543723, 'data': [-0.38759999999999994]}, {'sys': 'BzBz_T-4.9', 'color': 0.2966146678069063, 'data': [-0.3759999999999999]}, {'sys': 'BzH2S-3.6', 'color': 0.38284814928043304, 'data': [-0.1886000000000001]}, {'sys': 'BzBz_PD32-1.7', 'color': 0.3128835191478639, 'data': [-1.8703999999999998]}, {'sys': 'BzMe-3.8', 'color': 0.24117892478245323, 'data': [-0.034399999999999986]}, {'sys': 'BzMe-3.9', 'color': 0.22230903086047088, 'data': [-0.046499999999999986]}, {'sys': 'BzH2S-3.7', 'color': 0.36724255203373696, 'data': [-0.21039999999999992]}, {'sys': 'BzMe-3.6', 'color': 0.284901522674611, 'data': [0.007099999999999884]}, {'sys': 'BzMe-3.7', 'color': 0.2621086166558813, 'data': [-0.01770000000000005]}, {'sys': 'BzBz_PD32-1.9', 'color': 0.314711251903219, 'data': [-1.7353999999999998]}, {'sys': 'BzBz_PD32-1.8', 'color': 0.3136181753200793, 'data': [-1.8039999999999998]}, {'sys': 'BzH2S-3.8', 'color': 0.3542001591399945, 'data': [-0.22230000000000016]}, {'sys': 'BzBz_PD36-1.9', 'color': 0.14128552184232473, 'data': [-1.1517]}, {'sys': 'BzBz_S-3.7', 'color': 0.08862098445220466, 'data': [-1.3414]}, {'sys': 'BzH2S-4.0', 'color': 0.33637540012259076, 'data': [-0.2265999999999999]}, {'sys': 'BzBz_PD36-1.5', 'color': 0.13203548045236127, 'data': [-1.3035]}, {'sys': 'BzBz_S-3.8', 'color': 0.0335358832178858, 'data': [-1.2022]}, {'sys': 'BzBz_S-3.9', 'color': 0.021704594689389095, 'data': [-1.0747]}, {'sys': 'PyPy_T3-5.1', 'color': 0.3207725129126432, 'data': [-0.2958000000000003]}, {'sys': 'PyPy_T3-5.0', 'color': 0.3254925304351165, 'data': [-0.30710000000000015]}, {'sys': 'BzBz_PD36-1.7', 'color': 0.13577087141986593, 'data': [-1.2333000000000003]}, {'sys': 'PyPy_T3-4.8', 'color': 0.3443704059902452, 'data': [-0.32010000000000005]}, {'sys': 'PyPy_T3-4.9', 'color': 0.3333442013628509, 'data': [-0.3158999999999996]}, {'sys': 'PyPy_T3-4.7', 'color': 0.35854000505665756, 'data': [-0.31530000000000014]}, {'sys': 'BzBz_PD36-1.6', 'color': 0.13364651314909243, 'data': [-1.2705000000000002]}, {'sys': 'BzMe-4.0', 'color': 0.20560117919562013, 'data': [-0.05389999999999984]}, {'sys': 'MeMe-3.6', 'color': 0.16934865900383142, 'data': [0.18420000000000003]}, {'sys': 'MeMe-3.7', 'color': 0.1422332591197123, 'data': [0.14680000000000004]}, {'sys': 'MeMe-3.4', 'color': 0.23032794290360467, 'data': [0.29279999999999995]}, {'sys': 'MeMe-3.5', 'color': 0.19879551978386897, 'data': [0.23260000000000003]}, {'sys': 'MeMe-3.8', 'color': 0.11744404936205816, 'data': [0.11680000000000001]}, {'sys': 'BzBz_PD34-1.7', 'color': 0.22537382457222138, 'data': [-1.5286999999999997]}, {'sys': 'BzBz_PD34-1.6', 'color': 0.22434088042760192, 'data': [-1.5754000000000001]}, {'sys': 'BzBz_PD32-2.2', 'color': 0.3189891685300601, 'data': [-1.5093999999999999]}, {'sys': 'BzBz_S-4.1', 'color': 0.10884135031532088, 'data': [-0.8547000000000002]}, {'sys': 'BzBz_S-4.0', 'color': 0.06911476296747143, 'data': [-0.9590000000000001]}, {'sys': 'BzBz_PD34-1.8', 'color': 0.22685419834431494, 'data': [-1.476]}, {'sys': 'BzBz_PD34-1.9', 'color': 0.2287079261672095, 'data': [-1.4223999999999997]}, {'sys': 'BzH2S-3.9', 'color': 0.3439077006047999, 'data': [-0.22739999999999982]}, {'sys': 'FaNNFaNN-4.1', 'color': 0.7512716174974567, 'data': [1.7188999999999997]}, {'sys': 'FaNNFaNN-4.0', 'color': 0.7531388297328865, 'data': [1.9555000000000007]}, {'sys': 'FaNNFaNN-4.3', 'color': 0.7478064149182957, 'data': [1.2514000000000003]}, {'sys': 'FaNNFaNN-4.2', 'color': 0.7493794908838113, 'data': [1.4758000000000013]}, {'sys': 'FaOOFaON-4.0', 'color': 0.7589275618320565, 'data': [2.0586]}, {'sys': 'FaOOFaON-3.7', 'color': 0.7619465815742713, 'data': [3.3492999999999995]}, {'sys': 'FaOOFaON-3.9', 'color': 0.7593958895631474, 'data': [2.4471000000000007]}, {'sys': 'FaOOFaON-3.8', 'color': 0.7605108059280967, 'data': [2.8793999999999986]}, {'sys': 'FaONFaON-4.1', 'color': 0.7577459277014137, 'data': [1.8697999999999997]}, {'sys': 'FaOOFaON-3.6', 'color': 0.7633298028299997, 'data': [3.847599999999998]}, {'sys': 'FaNNFaNN-3.9', 'color': 0.7548200901251662, 'data': [2.2089]}, {'sys': 'FaONFaON-3.8', 'color': 0.7582294603551467, 'data': [2.967699999999999]}, {'sys': 'FaONFaON-3.9', 'color': 0.7575285282217349, 'data': [2.578900000000001]}, {'sys': 'FaONFaON-4.2', 'color': 0.7594549221042256, 'data': [1.5579999999999998]}, {'sys': 'FaOOFaNN-3.6', 'color': 0.7661655616885379, 'data': [3.701599999999999]}, {'sys': 'FaOOFaNN-3.7', 'color': 0.7671068376007428, 'data': [3.156500000000001]}, {'sys': 'FaOOFaNN-3.8', 'color': 0.766947626251711, 'data': [2.720700000000001]}, {'sys': 'FaONFaNN-3.9', 'color': 0.7569836601896789, 'data': [2.4281000000000006]}, {'sys': 'FaONFaNN-3.8', 'color': 0.758024548462959, 'data': [2.7561999999999998]}, {'sys': 'FaOOFaOO-3.6', 'color': 0.7623422640217077, 'data': [3.851800000000001]}, {'sys': 'FaOOFaOO-3.7', 'color': 0.7597430792159379, 'data': [3.2754999999999974]}, {'sys': 'FaOOFaOO-3.4', 'color': 0.7672554950739594, 'data': [5.193299999999999]}, {'sys': 'FaOOFaOO-3.5', 'color': 0.764908813123865, 'data': [4.491900000000001]}, {'sys': 'FaONFaNN-4.2', 'color': 0.7549212942233738, 'data': [1.534699999999999]}, {'sys': 'FaONFaNN-4.0', 'color': 0.7559404310956357, 'data': [2.1133000000000024]}, {'sys': 'FaONFaNN-4.1', 'color': 0.7551574698775625, 'data': [1.813900000000002]}, {'sys': 'FaONFaON-4.0', 'color': 0.7572064604483282, 'data': [2.2113999999999994]}, {'sys': 'FaOOFaOO-3.8', 'color': 0.7573810956831686, 'data': [2.7634000000000007]}, {'sys': '1', 'color': 0.2784121805328983, 'data': [0.3508]}, {'sys': '2', 'color': 0.22013842798900166, 'data': [-0.034600000000000186]}, {'sys': '3', 'color': 0.12832496088281312, 'data': [0.20360000000000023]}, {'sys': '4', 'color': 0.6993695033529733, 'data': [1.9092000000000002]}, {'sys': '5', 'color': 0.7371192790053749, 'data': [1.656600000000001]}, {'sys': '6', 'color': 0.5367033190796172, 'data': [0.27970000000000006]}, {'sys': '7', 'color': 0.3014220615964802, 'data': [0.32289999999999974]}, {'sys': '8', 'color': 0.01605867807629261, 'data': [0.12199999999999994]}, {'sys': '9', 'color': 0.6106300539083558, 'data': [0.3075999999999999]}, {'sys': '10', 'color': 0.6146680031333968, 'data': [0.6436000000000002]}, {'sys': '11', 'color': 0.6139747851721759, 'data': [0.4551999999999996]}, {'sys': '12', 'color': 0.32122739401126593, 'data': [0.44260000000000005]}, {'sys': '13', 'color': 0.24678148099136055, 'data': [-0.11789999999999967]}, {'sys': '14', 'color': 0.23700950710597016, 'data': [0.42689999999999995]}, {'sys': '15', 'color': 0.23103396678138563, 'data': [0.3266]}, {'sys': '16', 'color': 0.1922070769654413, 'data': [0.0696000000000001]}, {'sys': '17', 'color': 0.19082151944747366, 'data': [0.11159999999999992]}, {'sys': '18', 'color': 0.2886200282444196, 'data': [0.4114]}, {'sys': '19', 'color': 0.23560171133945224, 'data': [-0.1392]}, {'sys': '20', 'color': 0.3268270751294533, 'data': [0.5593]}, {'sys': '21', 'color': 0.7324460869158442, 'data': [0.6806000000000001]}],
color='sapt', title='MP2-CP-adz', mae=1.21356003247, mape=24.6665886087, xlimit=4.0)
lin_dats = [-0.5, -0.4, -0.3, 0, .5, .8, 5]
lin_labs = ['008ILE-012LEU-1', '012LEU-085ASP-1', '004GLU-063LEU-2',
'011ILE-014PHE-1', '027GLU-031LEU-1', '038PHE-041ILE-1', '199LEU-202GLU-1']
iowa(lin_dats, lin_labs, title='ttl', xlimit=0.5)
figs = [0.22, 0.41, 0.14, 0.08, 0.47,
0, 0.38, 0.22, 0.10, 0.20,
0, 0, 0.13, 0.07, 0.25,
0, 0, 0, 0.06, 0.22,
0, 0, 0, 0, 0.69]
liliowa(figs, saveas='SSI-default-MP2-CP-aqz', xlimit=1.0)
disthist(lin_dats)
valerrdata = [{'color': 0.14255710779686612, 'db': 'NBC1', 'sys': 'BzBz_S-3.6', 'error': [0.027999999999999803], 'mcdata': -1.231, 'bmdata': -1.259, 'axis': 3.6}, {'color': 0.08862098445220466, 'db': 'NBC1', 'sys': 'BzBz_S-3.7', 'error': [0.02300000000000013], 'mcdata': -1.535, 'bmdata': -1.558, 'axis': 3.7}, {'color': 0.246634626511043, 'db': 'NBC1', 'sys': 'BzBz_S-3.4', 'error': [0.04200000000000001], 'mcdata': 0.189, 'bmdata': 0.147, 'axis': 3.4}, {'color': 0.19526236766857613, 'db': 'NBC1', 'sys': 'BzBz_S-3.5', 'error': [0.03500000000000003], 'mcdata': -0.689, 'bmdata': -0.724, 'axis': 3.5}, {'color': 0.3443039102164425, 'db': 'NBC1', 'sys': 'BzBz_S-3.2', 'error': [0.05999999999999961], 'mcdata': 3.522, 'bmdata': 3.462, 'axis': 3.2}, {'color': 0.29638827303466814, 'db': 'NBC1', 'sys': 'BzBz_S-3.3', 'error': [0.050999999999999934], 'mcdata': 1.535, 'bmdata': 1.484, 'axis': 3.3}, {'color': 0.42859228971962615, 'db': 'NBC1', 'sys': 'BzBz_S-6.0', 'error': [0.0020000000000000018], 'mcdata': -0.099, 'bmdata': -0.101, 'axis': 6.0}, {'color': 0.30970751839224836, 'db': 'NBC1', 'sys': 'BzBz_S-5.0', 'error': [0.0040000000000000036], 'mcdata': -0.542, 'bmdata': -0.546, 'axis': 5.0}, {'color': 0.3750832778147902, 'db': 'NBC1', 'sys': 'BzBz_S-5.5', 'error': [0.0030000000000000027], 'mcdata': -0.248, 'bmdata': -0.251, 'axis': 5.5}, {'color': 0.0335358832178858, 'db': 'NBC1', 'sys': 'BzBz_S-3.8', 'error': [0.019000000000000128], 'mcdata': -1.674, 'bmdata': -1.693, 'axis': 3.8}, {'color': 0.021704594689389095, 'db': 'NBC1', 'sys': 'BzBz_S-3.9', 'error': [0.016000000000000014], 'mcdata': -1.701, 'bmdata': -1.717, 'axis': 3.9}, {'color': 0.22096255119953187, 'db': 'NBC1', 'sys': 'BzBz_S-4.5', 'error': [0.008000000000000007], 'mcdata': -1.058, 'bmdata': -1.066, 'axis': 4.5}, {'color': 0.10884135031532088, 'db': 'NBC1', 'sys': 'BzBz_S-4.1', 'error': [0.01200000000000001], 'mcdata': -1.565, 'bmdata': -1.577, 'axis': 4.1}, {'color': 0.06911476296747143, 'db': 'NBC1', 'sys': 'BzBz_S-4.0', 'error': [0.014000000000000012], 'mcdata': -1.655, 'bmdata': -1.669, 'axis': 4.0}, {'color': 0.14275218373289067, 'db': 'NBC1', 'sys': 'BzBz_S-4.2', 'error': [0.01100000000000012], 'mcdata': -1.448, 'bmdata': -1.459, 'axis': 4.2}, {'color': 0.4740372133275638, 'db': 'NBC1', 'sys': 'BzBz_S-6.5', 'error': [0.0010000000000000009], 'mcdata': -0.028, 'bmdata': -0.029, 'axis': 6.5}, {'color': 0.6672504378283713, 'db': 'NBC1', 'sys': 'BzBz_S-10.0', 'error': [0.0], 'mcdata': 0.018, 'bmdata': 0.018, 'axis': 10.0}]
valerr({'cat': valerrdata},
color='sapt', xtitle='Rang', title='aggh', graphicsformat=['png'])
| lgpl-3.0 |
feranick/SpectralMachine | Archive/20170609c/SpectraLearnPredict.py | 1 | 60594 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
**********************************************************
*
* SpectraLearnPredict
* Perform Machine Learning on Raman spectra.
* version: 20170609c
*
* Uses: Deep Neural Networks, TensorFlow, SVM, PCA, K-Means
*
* By: Nicola Ferralis <[email protected]>
*
***********************************************************
'''
print(__doc__)
import matplotlib
if matplotlib.get_backend() == 'TkAgg':
matplotlib.use('Agg')
import numpy as np
import sys, os.path, getopt, glob, csv
from os.path import exists, splitext
from os import rename
from datetime import datetime, date
import random
#***************************************************************
''' Spectra normalization, preprocessing, model selection '''
#***************************************************************
class preprocDef:
Ynorm = True # Normalize spectra (True: recommended)
fullYnorm = False # Normalize considering full range (False: recommended)
StandardScalerFlag = True # Standardize features by removing the mean and scaling to unit variance (sklearn)
YnormTo = 1
YnormX = 1600
YnormXdelta = 30
enRestrictRegion = False
enLim1 = 450 # for now use indexes rather than actual Energy
enLim2 = 550 # for now use indexes rather than actual Energy
scrambleNoiseFlag = False # Adds random noise to spectra (False: recommended)
scrambleNoiseOffset = 0.1
if StandardScalerFlag:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
#**********************************************
''' Calculation by limited number of points '''
#**********************************************
cherryPickEnPoint = False # False recommended
enSel = [1050, 1150, 1220, 1270, 1330, 1410, 1480, 1590, 1620, 1650]
enSelDelta = [2, 2, 2, 2, 10, 2, 2, 15, 5, 2]
#enSel = [1220, 1270, 1590]
#enSelDelta = [2, 2, 30]
if(cherryPickEnPoint == True):
enRestrictRegion = False
print(' Calculation by limited number of points: ENABLED ')
print(' THIS IS AN EXPERIMENTAL FEATURE \n')
print(' Restricted range: DISABLED')
#**********************************************
''' Deep Neural Networks - sklearn'''
#**********************************************
class nnDef:
runNN = True
alwaysRetrain = False
subsetCrossValid = False
percentCrossValid = 0.10 # proportion of TEST data for cross validation
iterCrossValid = 2
numNeurons = 200 #default = 200
# Optimizers: lbfgs (default), adam, sgd
nnOptimizer = "lbfgs"
# activation functions: http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html
# identity, logistic (sigmoid), tanh, relu
activation_function = "tanh"
MLPRegressor = False
# threshold in % of probabilities for listing prediction results
thresholdProbabilityPred = 0.001
plotNN = True
nnClassReport = False
#***********************************************************
''' Deep Neural Networks - tensorflow via DNNClassifier'''
#***********************************************************
class dnntfDef:
runDNNTF = True
alwaysRetrain = False
subsetCrossValid = False
percentCrossValid = 0.10 # proportion of TEST data for cross validation
iterCrossValid = 1
numNeurons = 200 # number of neurons per layer
numHidlayers = 1 # number of hidden layer
# Optimizers: Adagrad (recommended), Adam, Ftrl, Momentum, RMSProp, SGD
# https://www.tensorflow.org/api_guides/python/train
nnOptimizer = "Adagrad"
# activation functions: https://www.tensorflow.org/api_guides/python/nn
# relu, relu6, crelu, elu, softplus, softsign, dropout, bias_add
# sigmoid, tanh
activation_function = "tanh"
trainingSteps = 1000 #number of training steps
# threshold in % of probabilities for listing prediction results
thresholdProbabilityPred = 0.01
logCheckpoint = False
#*************************************************
# Setup variables and definitions- do not change.
#*************************************************
hidden_layers = [numNeurons]*numHidlayers
if runDNNTF == True:
import tensorflow as tf
if activation_function == "sigmoid" or activation_function == "tanh":
actFn = "tf."+activation_function
else:
actFn = "tf.nn."+activation_function
activationFn = eval(actFn)
#**********************************************
''' Support Vector Machines'''
#**********************************************
class svmDef:
runSVM = True
alwaysRetrain = False
subsetCrossValid = False
percentCrossValid = 0.10 # proportion of TEST data for cross validation
iterCrossValid = 2
# threshold in % of probabilities for listing prediction results
thresholdProbabilitySVMPred = 3
''' Training algorithm for SVM
Use either 'linear' or 'rbf'
('rbf' for large number of features) '''
Cfactor = 20
kernel = 'rbf'
showClasses = False
plotSVM = True
svmClassReport = False
#**********************************************
''' Principal component analysis (PCA) '''
#**********************************************
class pcaDef:
runPCA = False
customNumPCAComp = True
numPCAcomponents = 2
#**********************************************
''' K-means '''
#**********************************************
class kmDef:
runKM = False
customNumKMComp = False
numKMcomponents = 20
plotKM = False
plotKMmaps = True
#**********************************************
''' TensorFlow '''
#**********************************************
class tfDef:
runTF = False
alwaysRetrain = False
alwaysImprove = False # alwaysRetrain must be True for this to work
subsetCrossValid = True
percentCrossValid = 0.1 # proportion of TEST data for cross validation
iterCrossValid = 2
# threshold in % of probabilities for listing prediction results
thresholdProbabilityTFPred = 30
decayLearnRate = True
learnRate = 0.75
plotMapTF = True
plotClassDistribTF = False
enableTensorboard = False
#**********************************************
''' Plotting '''
#**********************************************
class plotDef:
showProbPlot = False
showPCAPlots = True
createTrainingDataPlot = False
showTrainingDataPlot = False
plotAllSpectra = True # Set to false for extremely large training sets
if plotAllSpectra == False:
stepSpectraPlot = 100 # steps in the number of spectra to be plotted
#**********************************************
''' Multiprocessing '''
#**********************************************
multiproc = False
#**********************************************
''' Main '''
#**********************************************
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "fatmbkph:", ["file", "accuracy", "traintf", "map", "batch", "kmaps", "pca", "help"])
except:
usage()
sys.exit(2)
if opts == []:
usage()
sys.exit(2)
print(" Using training file: ", sys.argv[2],"\n")
for o, a in opts:
if o in ("-f" , "--file"):
try:
LearnPredictFile(sys.argv[2], sys.argv[3])
except:
usage()
sys.exit(2)
if o in ("-a" , "--accuracy"):
print('\033[1m Running in cross validation mode for accuracy determination...\033[0m\n')
nnDef.alwaysRetrain = True
nnDef.subsetCrossValid = True
dnntfDef.alwaysRetrain = True
dnntfDef.subsetCrossValid = True
dnntfDef.logCheckpoint = True
svmDef.alwaysRetrain = True
svmDef.subsetCrossValid = True
tfDef.alwaysRetrain = True
tfDef.subsetCrossValid = True
try:
LearnPredictFile(sys.argv[2], sys.argv[3])
except:
usage()
sys.exit(2)
if o in ("-t" , "--traintf"):
if len(sys.argv) > 3:
numRuns = int(sys.argv[3])
else:
numRuns = 1
preprocDef.scrambleNoiseFlag = False
try:
TrainTF(sys.argv[2], int(numRuns))
except:
usage()
sys.exit(2)
if o in ("-m" , "--map"):
try:
LearnPredictMap(sys.argv[2], sys.argv[3])
except:
usage()
sys.exit(2)
if o in ("-b" , "--batch"):
try:
LearnPredictBatch(sys.argv[2])
except:
usage()
sys.exit(2)
if o in ("-p" , "--pca"):
if len(sys.argv) > 3:
numPCAcomp = int(sys.argv[3])
else:
numPCAcomp = pcaDef.numPCAcomponents
try:
runPCA(sys.argv[2], numPCAcomp)
except:
usage()
sys.exit(2)
if o in ("-k" , "--kmaps"):
if len(sys.argv) > 3:
numKMcomp = int(sys.argv[3])
else:
numKMcomp = kmDef.numKMcomponents
try:
KmMap(sys.argv[2], numKMcomp)
except:
usage()
sys.exit(2)
#**********************************************
''' Learn and Predict - File'''
#**********************************************
def LearnPredictFile(learnFile, sampleFile):
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Run PCA '''
if pcaDef.runPCA == True:
runPCAmain(A, Cl, En)
''' Open prediction file '''
R, Rx = readPredFile(sampleFile)
''' Preprocess prediction data '''
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
R, Rorig = preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, 0)
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
runNN(A, Cl, R, learnFileRoot)
''' Run Neural Network - TensorFlow'''
if dnntfDef.runDNNTF == True:
runDNNTF(A, Cl, R, learnFileRoot)
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
runSVM(A, Cl, En, R, learnFileRoot)
''' Tensorflow '''
if tfDef.runTF == True:
runTFbasic(A,Cl,R, learnFileRoot)
''' Plot Training Data '''
if plotDef.createTrainingDataPlot == True:
plotTrainData(A, En, R, plotDef.plotAllSpectra, learnFileRoot)
''' Run K-Means '''
if kmDef.runKM == True:
runKMmain(A, Cl, En, R, Aorig, Rorig)
#**********************************************
''' Process - Batch'''
#**********************************************
def LearnPredictBatch(learnFile):
summary_filename = 'summary' + str(datetime.now().strftime('_%Y-%m-%d_%H-%M-%S.csv'))
makeHeaderSummary(summary_filename, learnFile)
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
if multiproc == True:
import multiprocessing as mp
p = mp.Pool()
for f in glob.glob('*.txt'):
if (f != learnFile):
p.apply_async(processSingleBatch, args=(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile))
p.close()
p.join()
else:
for f in glob.glob('*.txt'):
if (f != learnFile):
processSingleBatch(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile)
def processSingleBatch(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile):
print(' Processing file: \033[1m' + f + '\033[0m\n')
R, Rx = readPredFile(f)
summaryFile = [f]
''' Preprocess prediction data '''
R, Rorig = preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, 0)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
nnPred, nnProb = runNN(A, Cl, R, learnFileRoot)
summaryFile.extend([nnPred, nnProb])
nnDef.alwaysRetrain = False
''' Run Neural Network - TensorFlow'''
if dnntfDef.runDNNTF == True:
dnntfPred, dnntfProb = runDNNTF(A, Cl, R, learnFileRoot)
summaryFile.extend([nnPred, nnProb])
dnntfDef.alwaysRetrain = False
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
svmPred, svmProb = runSVM(A, Cl, En, R, learnFileRoot)
summaryFile.extend([svmPred, svmProb])
svmDef.alwaysRetrain = False
''' Tensorflow '''
if tfDef.runTF == True:
tfPred, tfProb, tfAccur = runTFbasic(A,Cl,R, learnFileRoot)
summaryFile.extend([tfPred, tfProb, tfAccur])
tfDef.tfalwaysRetrain = False
''' Run K-Means '''
if kmDef.runKM == True:
kmDef.plotKM = False
kmPred = runKMmain(A, Cl, En, R, Aorig, Rorig)
summaryFile.extend([kmPred])
with open(summary_filename, "a") as sum_file:
csv_out=csv.writer(sum_file)
csv_out.writerow(summaryFile)
#**********************************************
''' Learn and Predict - Maps'''
#**********************************************
def LearnPredictMap(learnFile, mapFile):
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Open prediction map '''
X, Y, R, Rx = readPredMap(mapFile)
type = 0
i = 0;
svmPred = nnPred = tfPred = kmPred = np.empty([X.shape[0]])
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, type)
print(' Processing map...' )
for r in R[:]:
r, rorig = preProcessNormPredData(r, Rx, A, En, Cl, YnormXind, type)
type = 1
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
nnPred[i], temp = runNN(A, Cl, r, learnFileRoot)
saveMap(mapFile, 'NN', 'HC', nnPred[i], X[i], Y[i], True)
nnDef.alwaysRetrain = False
''' Run Neural Network - TensorFlow'''
if nnDef.runNN == True:
dnntfPred[i], temp = runDNNTF(A, Cl, r, learnFileRoot)
saveMap(mapFile, 'DNN-TF', 'HC', dnntfPred[i], X[i], Y[i], True)
dnnDef.alwaysRetrain = False
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
svmPred[i], temp = runSVM(A, Cl, En, r, learnFileRoot)
saveMap(mapFile, 'svm', 'HC', svmPred[i], X[i], Y[i], True)
svmDef.alwaysRetrain = False
''' Tensorflow '''
if tfDef.runTF == True:
tfPred[i], temp, temp = runTFbasic(A,Cl,r, learnFileRoot)
saveMap(mapFile, 'TF', 'HC', tfPred[i], X[i], Y[i], True)
tfDef.alwaysRetrain = False
''' Run K-Means '''
if kmDef.runKM == True:
kmDef.plotKM = False
kmPred[i] = runKMmain(A, Cl, En, r, Aorig, rorig)
saveMap(mapFile, 'KM', 'HC', kmPred[i], X[i], Y[i], True)
i+=1
if nnDef.plotNN == True and nnDef.runNN == True:
plotMaps(X, Y, nnPred, 'Deep Neural networks - sklearn')
if nnDef.plotNN == True and nnDef.runNN == True:
plotMaps(X, Y, dnntfPred, 'Deep Neural networks - tensorFlow')
if svmDef.plotSVM == True and svmDef.runSVM == True:
plotMaps(X, Y, svmPred, 'SVM')
if tfDef.plotMapTF == True and tfDef.runTF == True:
plotMaps(X, Y, tfPred, 'TensorFlow')
if kmDef.plotKMmaps == True and kmDef.runKM == True:
plotMaps(X, Y, kmPred, 'K-Means Prediction')
#********************************************************************************
''' Run Neural Network - sklearn '''
#********************************************************************************
def runNN(A, Cl, R, Root):
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.externals import joblib
if nnDef.MLPRegressor is False:
Root+"/DNN-TF_"
nnTrainedData = Root + '.nnModelC.pkl'
else:
nnTrainedData = Root + '.nnModelR.pkl'
print('==========================================================================\n')
print(' Running Neural Network: multi-layer perceptron (MLP)')
print(' Number of neurons: Hidden layers:', nnDef.numNeurons)
print(' Optimizer:',nnDef.nnOptimizer,', Activation Fn:',nnDef.activation_function)
try:
if nnDef.alwaysRetrain == False:
with open(nnTrainedData):
print(' Opening NN training model...\n')
clf = joblib.load(nnTrainedData)
else:
raise ValueError('Force NN retraining.')
except:
#**********************************************
''' Retrain training data if not available'''
#**********************************************
if nnDef.MLPRegressor is False:
print(' Retraining NN model using MLP Classifier...')
clf = MLPClassifier(solver=nnDef.nnOptimizer, alpha=1e-5, activation = nnDef.activation_function,
hidden_layer_sizes=(nnDef.numNeurons,), random_state=1)
else:
print(' Retraining NN model using MLP Regressor...')
clf = MLPRegressor(solver=nnDef.nnOptimizer, alpha=1e-5, hidden_layer_sizes=(nnDef.numNeurons,), random_state=1)
Cl = np.array(Cl,dtype=float)
if nnDef.subsetCrossValid == True:
print(" Iterating training using: ",str(nnDef.percentCrossValid*100), "% as test subset, iterating",str(nnDef.iterCrossValid)," time(s) ...\n")
for i in range(nnDef.iterCrossValid):
As, Cls, As_cv, Cls_cv = formatSubset(A, Cl, nnDef.percentCrossValid)
clf.fit(As, Cls)
if nnDef.MLPRegressor is False:
print(' Mean accuracy: ',100*clf.score(As_cv,Cls_cv),'%')
else:
print(' Coefficient of determination R^2: ',clf.score(As_cv,Cls_cv))
else:
print(" Training on the full training dataset\n")
clf.fit(A, Cl)
joblib.dump(clf, nnTrainedData)
if nnDef.MLPRegressor is False:
prob = clf.predict_proba(R)[0].tolist()
rosterPred = np.where(clf.predict_proba(R)[0]>nnDef.thresholdProbabilityPred/100)[0]
print('\n ==============================')
print(' \033[1mNN\033[0m - Probability >',str(nnDef.thresholdProbabilityPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*clf.predict_proba(R)[0][rosterPred][i])))
print(' ==============================')
predValue = clf.predict(R)[0]
predProb = round(100*max(prob),4)
print('\033[1m' + '\n Predicted classifier value (Deep Neural Networks - sklearn) = ' + str(predValue) +
' (probability = ' + str(predProb) + '%)\033[0m\n')
else:
Cl = np.array(Cl,dtype=float)
predValue = clf.predict(R)[0]
predProb = clf.score(A,Cl)
print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - sklearn) = ' + str('{:.3f}'.format(predValue)) +
' (R^2 = ' + str('{:.5f}'.format(predProb)) + ')\033[0m\n')
#**************************************
''' Neural Networks Classification Report '''
#**************************************
if nnDef.nnClassReport == True:
print(' Neural Networks Classification Report\n')
runClassReport(clf, A, Cl)
#*************************
''' Plot probabilities '''
#*************************
if plotDef.showProbPlot == True:
if nnDef.MLPRegressor is False:
plotProb(clf, R)
return predValue, predProb
#********************************************************************************
''' TensorFlow '''
''' Run SkFlow - DNN Classifier '''
''' https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNClassifier'''
#********************************************************************************
''' Run DNNClassifier model training and evaluation via TensorFlow-skflow '''
#********************************************************************************
def runDNNTF(A, Cl, R, Root):
print('==========================================================================\n')
print(' Running Deep Neural Networks: DNNClassifier - TensorFlow...')
print(' Hidden layers:', dnntfDef.hidden_layers)
print(' Optimizer:',dnntfDef.nnOptimizer,', Activation function:',dnntfDef.activation_function)
import tensorflow as tf
import tensorflow.contrib.learn as skflow
from sklearn import preprocessing
if dnntfDef.logCheckpoint ==True:
tf.logging.set_verbosity(tf.logging.INFO)
if dnntfDef.alwaysRetrain == False:
model_directory = Root + "/DNN-TF_" + str(dnntfDef.numHidlayers)+"x"+str(dnntfDef.numNeurons)
print("\n Training model saved in: ", model_directory, "\n")
else:
model_directory = None
print("\n Training model not saved\n")
#**********************************************
''' Initialize Estimator and training data '''
#**********************************************
print(' Initializing TensorFlow...')
tf.reset_default_graph()
le = preprocessing.LabelEncoder()
Cl2 = le.fit_transform(Cl)
feature_columns = skflow.infer_real_valued_columns_from_input(A.astype(np.float32))
clf = skflow.DNNClassifier(feature_columns=feature_columns, hidden_units=dnntfDef.hidden_layers,
optimizer=dnntfDef.nnOptimizer, n_classes=np.unique(Cl).size,
activation_fn=dnntfDef.activationFn, model_dir=model_directory)
print("\n Number of training steps:",dnntfDef.trainingSteps)
#**********************************************
''' Train '''
#**********************************************
if dnntfDef.subsetCrossValid == True:
print(" Iterating training using: ",str(dnntfDef.percentCrossValid*100), "% as test subset, iterating",str(dnntfDef.iterCrossValid)," time(s) ...\n")
for i in range(dnntfDef.iterCrossValid):
As, Cl2s, As_cv, Cl2s_cv = formatSubset(A, Cl2, dnntfDef.percentCrossValid)
clf.fit(input_fn=lambda: input_fn(As, Cl2s), steps=dnntfDef.trainingSteps)
accuracy_score = clf.evaluate(input_fn=lambda: input_fn(As_cv, Cl2s_cv), steps=1)
print("\n Accuracy: {:.2f}%".format(100*accuracy_score["accuracy"]))
print(" Loss: {:.2f}".format(accuracy_score["loss"]))
print(" Global step: {:.2f}\n".format(accuracy_score["global_step"]))
else:
print(" Training on the full training dataset\n")
clf.fit(input_fn=lambda: input_fn(A, Cl2), steps=dnntfDef.trainingSteps)
#**********************************************
''' Predict '''
#**********************************************
def input_fn_predict():
x = tf.constant(R.astype(np.float32))
return x
pred_class = list(clf.predict_classes(input_fn=input_fn_predict))[0]
predValue = le.inverse_transform(pred_class)
prob = list(clf.predict_proba(input_fn=input_fn_predict))[0]
predProb = round(100*prob[pred_class],2)
rosterPred = np.where(prob>dnntfDef.thresholdProbabilityPred/100)[0]
print('\n ================================')
print(' \033[1mDNN-TF\033[0m - Probability >',str(dnntfDef.thresholdProbabilityPred),'%')
print(' ================================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*prob[rosterPred][i])))
print(' ================================')
print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - TensorFlow) = ' + predValue +
' (probability = ' + str(predProb) + '%)\033[0m\n')
return predValue, predProb
#**********************************************
''' Format input data for Estimator '''
#**********************************************
def input_fn(A, Cl2):
import tensorflow as tf
x = tf.constant(A.astype(np.float32))
y = tf.constant(Cl2)
return x,y
#********************************************************************************
''' Run SVM '''
#********************************************************************************
def runSVM(A, Cl, En, R, Root):
from sklearn import svm
from sklearn.externals import joblib
svmTrainedData = Root + '.svmModel.pkl'
print('==========================================================================\n')
print(' Running Support Vector Machine (kernel: ' + svmDef.kernel + ')...')
try:
if svmDef.alwaysRetrain == False:
with open(svmTrainedData):
print(' Opening SVM training model...\n')
clf = joblib.load(svmTrainedData)
else:
raise ValueError('Force retraining SVM model')
except:
#**********************************************
''' Retrain training model if not available'''
#**********************************************
print(' Retraining SVM data...')
clf = svm.SVC(C = svmDef.Cfactor, decision_function_shape = 'ovr', probability=True)
if svmDef.subsetCrossValid == True:
print(" Iterating training using: ",str(nnDef.percentCrossValid*100), "% as test subset, iterating",str(nnDef.iterCrossValid)," time(s) ...\n")
for i in range(svmDef.iterCrossValid):
As, Cls, As_cv, Cls_cv = formatSubset(A, Cl, svmDef.percentCrossValid)
clf.fit(As, Cls)
print(' Mean accuracy: ',100*clf.score(As_cv,Cls_cv),'%')
else:
print(" Training on the full training dataset\n")
clf.fit(A,Cl)
Z = clf.decision_function(A)
print('\n Number of classes = ' + str(Z.shape[1]))
joblib.dump(clf, svmTrainedData)
if svmDef.showClasses == True:
print(' List of classes: ' + str(clf.classes_))
R_pred = clf.predict(R)
prob = clf.predict_proba(R)[0].tolist()
rosterPred = np.where(clf.predict_proba(R)[0]>svmDef.thresholdProbabilitySVMPred/100)[0]
print('\n ==============================')
print(' \033[1mSVM\033[0m - Probability >',str(svmDef.thresholdProbabilitySVMPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.1f}'.format(100*clf.predict_proba(R)[0][rosterPred][i])))
print(' ==============================')
print('\033[1m' + '\n Predicted value (SVM) = ' + str(R_pred[0]) + ' (probability = ' +
str(round(100*max(prob),1)) + '%)\033[0m\n')
#**************************************
''' SVM Classification Report '''
#**************************************
if svmDef.svmClassReport == True:
print(' SVM Classification Report \n')
runClassReport(clf, A, Cl)
#*************************
''' Plot probabilities '''
#*************************
if plotDef.showProbPlot == True:
plotProb(clf, R)
return R_pred[0], round(100*max(prob),1)
#********************************************************************************
''' Run PCA '''
''' Transform data:
pca.fit(data).transform(data)
Loading Vectors (eigenvectors):
pca.components_
Eigenvalues:
pca.explained_variance_ratio
'''
#********************************************************************************
def runPCA(learnFile, numPCAcomponents):
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from matplotlib import cm
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
print('==========================================================================\n')
print(' Running PCA...\n')
print(' Number of unique identifiers in training data: ' + str(np.unique(Cl).shape[0]))
if pcaDef.customNumPCAComp == False:
numPCAcomp = np.unique(Cl).shape[0]
else:
numPCAcomp = numPCAcomponents
print(' Number of Principal components: ' + str(numPCAcomp) + '\n')
pca = PCA(n_components=numPCAcomp)
A_r = pca.fit(A).transform(A)
for i in range(0,pca.components_.shape[0]):
print(' Score PC ' + str(i) + ': ' + '{0:.0f}%'.format(pca.explained_variance_ratio_[i] * 100))
print('')
if plotDef.showPCAPlots == True:
print(' Plotting Loadings and score plots... \n')
#***************************
''' Plotting Loadings '''
#***************************
for i in range(0,pca.components_.shape[0]):
plt.plot(En, pca.components_[i,:], label='PC' + str(i) + ' ({0:.0f}%)'.format(pca.explained_variance_ratio_[i] * 100))
plt.plot((En[0], En[En.shape[0]-1]), (0.0, 0.0), 'k--')
plt.title('Loadings plot')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Principal component')
plt.legend()
plt.figure()
#***************************
''' Plotting Scores '''
#***************************
Cl_ind = np.zeros(len(Cl))
Cl_labels = np.zeros(0)
ind = np.zeros(np.unique(Cl).shape[0])
for i in range(len(Cl)):
if (np.in1d(Cl[i], Cl_labels, invert=True)):
Cl_labels = np.append(Cl_labels, Cl[i])
for i in range(len(Cl)):
Cl_ind[i] = np.where(Cl_labels == Cl[i])[0][0]
colors = [ cm.jet(x) for x in np.linspace(0, 1, ind.shape[0]) ]
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter(A_r[Cl_ind==i,0], A_r[Cl_ind==i,1], color=color, alpha=.8, lw=2, label=target_name)
plt.title('Score plot')
plt.xlabel('PC 0 ({0:.0f}%)'.format(pca.explained_variance_ratio_[0] * 100))
plt.ylabel('PC 1 ({0:.0f}%)'.format(pca.explained_variance_ratio_[1] * 100))
plt.figure()
plt.title('Score box plot')
plt.xlabel('Principal Component')
plt.ylabel('Score')
for j in range(pca.components_.shape[0]):
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter([j+1]*len(A_r[Cl_ind==i,j]), A_r[Cl_ind==i,j], color=color, alpha=.8, lw=2, label=target_name)
plt.boxplot(A_r)
plt.figure()
#******************************
''' Plotting Scores vs H:C '''
#******************************
for j in range(pca.components_.shape[0]):
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter(np.asarray(Cl)[Cl_ind==i], A_r[Cl_ind==i,j], color=color, alpha=.8, lw=2, label=target_name)
plt.xlabel('H:C elemental ratio')
plt.ylabel('PC ' + str(j) + ' ({0:.0f}%)'.format(pca.explained_variance_ratio_[j] * 100))
plt.figure()
plt.show()
#********************
''' Run K-Means '''
#********************
def runKMmain(A, Cl, En, R, Aorig, Rorig):
from sklearn.cluster import KMeans
print('==========================================================================\n')
print(' Running K-Means...')
print(' Number of unique identifiers in training data: ' + str(np.unique(Cl).shape[0]))
if kmDef.customNumKMComp == False:
numKMcomp = np.unique(Cl).shape[0]
else:
numKMcomp = kmDef.numKMcomponents
kmeans = KMeans(n_clusters=numKMcomp, random_state=0).fit(A)
'''
for i in range(0, numKMcomp):
print('\n Class: ' + str(i) + '\n ',end="")
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == i:
print(' ' + str(Cl[j]), end="")
'''
print('\n ==============================')
print(' \033[1mKM\033[0m - Predicted class: \033[1m',str(kmeans.predict(R)[0]),'\033[0m')
print(' ==============================')
print(' Prediction')
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == 22:
print(' ' + str(Cl[j]))
print(' ==============================\n')
if kmDef.plotKM == True:
import matplotlib.pyplot as plt
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == kmeans.predict(R)[0]:
plt.plot(En, Aorig[j,:])
plt.plot(En, Rorig[0,:], linewidth = 2, label='Predict')
plt.title('K-Means')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Intensity')
plt.legend()
plt.show()
return kmeans.predict(R)[0]
#**********************************************
''' K-Means - Maps'''
#**********************************************
def KmMap(mapFile, numKMcomp):
''' Open prediction map '''
X, Y, R, Rx = readPredMap(mapFile)
type = 0
i = 0;
R, Rx, Rorig = preProcessNormMap(R, Rx, type)
from sklearn.cluster import KMeans
print(' Running K-Means...')
print(' Number of classes: ' + str(numKMcomp))
kmeans = KMeans(n_clusters=kmDef.numKMcomponents, random_state=0).fit(R)
kmPred = np.empty([R.shape[0]])
for i in range(0, R.shape[0]):
kmPred[i] = kmeans.predict(R[i,:].reshape(1,-1))[0]
saveMap(mapFile, 'KM', 'Class', int(kmPred[i]), X[i], Y[i], True)
if kmPred[i] in kmeans.labels_:
if os.path.isfile(saveMapName(mapFile, 'KM', 'Class_'+ str(int(kmPred[i]))+'-'+str(np.unique(kmeans.labels_).shape[0]), False)) == False:
saveMap(mapFile, 'KM', 'Class_'+ str(int(kmPred[i])) + '-'+str(np.unique(kmeans.labels_).shape[0]) , '\t'.join(map(str, Rx)), ' ', ' ', False)
saveMap(mapFile, 'KM', 'Class_'+ str(int(kmPred[i])) + '-'+str(np.unique(kmeans.labels_).shape[0]) , '\t'.join(map(str, R[1,:])), X[i], Y[i], False)
if kmDef.plotKM == True:
plotMaps(X, Y, kmPred, 'K-Means')
#************************************
''' Read Learning file '''
#************************************
def readLearnFile(learnFile):
try:
with open(learnFile, 'r') as f:
M = np.loadtxt(f, unpack =False)
except:
print('\033[1m' + ' Map data file not found \n' + '\033[0m')
return
En = np.delete(np.array(M[0,:]),np.s_[0:1],0)
M = np.delete(M,np.s_[0:1],0)
Cl = ['{:.2f}'.format(x) for x in M[:,0]]
A = np.delete(M,np.s_[0:1],1)
Atemp = A[:,range(len(preprocDef.enSel))]
if preprocDef.cherryPickEnPoint == True and preprocDef.enRestrictRegion == False:
enPoints = list(preprocDef.enSel)
enRange = list(preprocDef.enSel)
for i in range(0, len(preprocDef.enSel)):
enRange[i] = np.where((En<float(preprocDef.enSel[i]+preprocDef.enSelDelta[i])) & (En>float(preprocDef.enSel[i]-preprocDef.enSelDelta[i])))[0].tolist()
for j in range(0, A.shape[0]):
Atemp[j,i] = A[j,A[j,enRange[i]].tolist().index(max(A[j, enRange[i]].tolist()))+enRange[i][0]]
enPoints[i] = int(np.average(enRange[i]))
A = Atemp
En = En[enPoints]
if type == 0:
print( ' Cheery picking points in the spectra\n')
# Find index corresponding to energy value to be used for Y normalization
if preprocDef.fullYnorm == True:
YnormXind = np.where(En>0)[0].tolist()
else:
YnormXind_temp = np.where((En<float(preprocDef.YnormX+preprocDef.YnormXdelta)) & (En>float(preprocDef.YnormX-preprocDef.YnormXdelta)))[0].tolist()
if YnormXind_temp == []:
print( ' Renormalization region out of requested range. Normalizing over full range...\n')
YnormXind = np.where(En>0)[0].tolist()
else:
YnormXind = YnormXind_temp
print(' Number of datapoints = ' + str(A.shape[0]))
print(' Size of each datapoint = ' + str(A.shape[1]) + '\n')
return En, Cl, A, YnormXind
#**********************************************
''' Open prediction file '''
#**********************************************
def readPredFile(sampleFile):
try:
with open(sampleFile, 'r') as f:
print(' Opening sample data for prediction...')
Rtot = np.loadtxt(f, unpack =True)
except:
print('\033[1m' + '\n Sample data file not found \n ' + '\033[0m')
return
R=Rtot[1,:]
Rx=Rtot[0,:]
if preprocDef.cherryPickEnPoint == True and preprocDef.enRestrictRegion == False:
Rtemp = R[range(len(preprocDef.enSel))]
enPoints = list(preprocDef.enSel)
enRange = list(preprocDef.enSel)
for i in range(0, len(preprocDef.enSel)):
enRange[i] = np.where((Rx<float(preprocDef.enSel[i]+preprocDef.enSelDelta[i])) & (Rx>float(preprocDef.enSel[i]-preprocDef.enSelDelta[i])))[0].tolist()
Rtemp[i] = R[R[enRange[i]].tolist().index(max(R[enRange[i]].tolist()))+enRange[i][0]]
enPoints[i] = int(np.average(enRange[i]))
R = Rtemp
Rx = Rx[enPoints]
return R, Rx
#**********************************************************************************
''' Preprocess Learning data '''
#**********************************************************************************
def preProcessNormLearningData(A, En, Cl, YnormXind, type):
print(' Processing Training data file... ')
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
if preprocDef.scrambleNoiseFlag == True:
print(' Adding random noise to training set \n')
scrambleNoise(A, preprocDef.scrambleNoiseOffset)
Aorig = np.copy(A)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
if preprocDef.fullYnorm == False:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
else:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; to max intensity in spectra')
for i in range(0,A.shape[0]):
if(np.amin(A[i]) <= 0):
A[i,:] = A[i,:] - np.amin(A[i,:]) + 0.00001
A[i,:] = np.multiply(A[i,:], preprocDef.YnormTo/A[i,A[i][YnormXind].tolist().index(max(A[i][YnormXind].tolist()))+YnormXind[0]])
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
A = preprocDef.scaler.fit_transform(A)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
Aorig = Aorig[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
if(preprocDef.cherryPickEnPoint == True):
print( ' Using selected spectral points:')
print(En)
else:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return A, Cl, En, Aorig
#**********************************************************************************
''' Preprocess Prediction data '''
#**********************************************************************************
def preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, type):
print(' Processing Prediction data file... ')
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
if(R.shape[0] != A.shape[1]):
if type == 0:
print('\033[1m' + ' WARNING: Different number of datapoints for the x-axis\n for training (' + str(A.shape[1]) + ') and sample (' + str(R.shape[0]) + ') data.\n Reformatting x-axis of sample data...\n' + '\033[0m')
R = np.interp(En, Rx, R)
R = R.reshape(1,-1)
Rorig = np.copy(R)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
if preprocDef.fullYnorm == False:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
else:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; to max intensity in spectra')
if(np.amin(R) <= 0):
print(' Spectra max below zero detected')
R[0,:] = R[0,:] - np.amin(R[0,:]) + 0.00001
R[0,:] = np.multiply(R[0,:], preprocDef.YnormTo/R[0,R[0][YnormXind].tolist().index(max(R[0][YnormXind].tolist()))+YnormXind[0]])
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
R = preprocDef.scaler.transform(R)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
R = R[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
if(preprocDef.cherryPickEnPoint == True):
print( ' Using selected spectral points:')
print(En)
else:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return R, Rorig
#**********************************************************************************
''' Preprocess prediction data '''
#**********************************************************************************
def preProcessNormMap(A, En, type):
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
# Find index corresponding to energy value to be used for Y normalization
if preprocDef.fullYnorm == False:
YnormXind = np.where((En<float(preprocDef.YnormX+preprocDef.YnormXdelta)) & (En>float(preprocDef.YnormX-preprocDef.YnormXdelta)))[0].tolist()
else:
YnormXind = np.where(En>0)[0].tolist()
Aorig = np.copy(A)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
for i in range(0,A.shape[0]):
A[i,:] = np.multiply(A[i,:], preprocDef.YnormTo/np.amax(A[i]))
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
A = preprocDef.scaler.fit_transform(A)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
Aorig = Aorig[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return A, En, Aorig
####################################################################
''' Format subset of training data '''
####################################################################
def formatSubset(A, Cl, percent):
from sklearn.model_selection import train_test_split
A_train, A_cv, Cl_train, Cl_cv = \
train_test_split(A, Cl, test_size=percent, random_state=42)
return A_train, Cl_train, A_cv, Cl_cv
####################################################################
''' Open map files '''
####################################################################
def readPredMap(mapFile):
try:
with open(mapFile, 'r') as f:
En = np.array(f.readline().split(), dtype=np.dtype(float))
A = np.loadtxt(f, unpack =False)
except:
print('\033[1m' + ' Map data file not found \n' + '\033[0m')
return
X = A[:,0]
Y = A[:,1]
A = np.delete(A, np.s_[0:2], 1)
print(' Shape map: ' + str(A.shape))
return X, Y, A, En
####################################################################
''' Save map files '''
####################################################################
def saveMap(file, type, extension, s, x1, y1, comma):
inputFile = saveMapName(file, type, extension, comma)
with open(inputFile, "a") as coord_file:
if comma==True:
coord_file.write('{:},'.format(x1))
coord_file.write('{:},'.format(y1))
else:
coord_file.write('{:}\t'.format(x1))
coord_file.write('{:}\t'.format(y1))
coord_file.write('{:}\n'.format(s))
def saveMapName(file, type, extension, comma):
if comma==True:
extension2 = '_map.csv'
else:
extension2 = '_map.txt'
return os.path.splitext(file)[0] + '_' + type + '-' + extension + extension2
#************************************
''' Plot Probabilities'''
#************************************
def plotProb(clf, R):
prob = clf.predict_proba(R)[0].tolist()
print(' Probabilities of this sample within each class: \n')
for i in range(0,clf.classes_.shape[0]):
print(' ' + str(clf.classes_[i]) + ': ' + str(round(100*prob[i],2)) + '%')
import matplotlib.pyplot as plt
print('\n Stand by: Plotting probabilities for each class... \n')
plt.title('Probability density per class')
for i in range(0, clf.classes_.shape[0]):
plt.scatter(clf.classes_[i], round(100*prob[i],2), label='probability', c = 'red')
plt.grid(True)
plt.xlabel('Class')
plt.ylabel('Probability [%]')
plt.show()
#************************************
''' Plot Training data'''
#************************************
def plotTrainData(A, En, R, plotAllSpectra, learnFileRoot):
import matplotlib.pyplot as plt
if plotDef.plotAllSpectra == True:
step = 1
learnFileRoot = learnFileRoot + '_full-set'
else:
step = plotDef.stepSpectraPlot
learnFileRoot = learnFileRoot + '_partial-' + str(step)
print(' Plotting Training dataset in: ' + learnFileRoot + '.png\n')
if preprocDef.Ynorm ==True:
plt.title('Normalized Training Data')
else:
plt.title('Training Data')
for i in range(0,A.shape[0], step):
plt.plot(En, A[i,:], label='Training data')
plt.plot(En, R[0,:], linewidth = 4, label='Sample data')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Raman Intensity [arb. units]')
plt.savefig(learnFileRoot + '.png', dpi = 160, format = 'png') # Save plot
if plotDef.showTrainingDataPlot == True:
plt.show()
plt.close()
#************************************
''' Plot Processed Maps'''
#************************************
def plotMaps(X, Y, A, label):
print(' Plotting ' + label + ' Map...\n')
import scipy.interpolate
xi = np.linspace(min(X), max(X))
yi = np.linspace(min(Y), max(Y))
xi, yi = np.meshgrid(xi, yi)
rbf = scipy.interpolate.Rbf(Y, -X, A, function='linear')
zi = rbf(xi, yi)
import matplotlib.pyplot as plt
plt.imshow(zi, vmin=A.min(), vmax=A.max(), origin='lower',label='data',
extent=[X.min(), X.max(), Y.min(), Y.max()])
plt.title(label)
plt.xlabel('X [um]')
plt.ylabel('Y [um]')
plt.show()
####################################################################
''' Make header, if absent, for the summary file '''
####################################################################
def makeHeaderSummary(file, learnFile):
if os.path.isfile(file) == False:
summaryHeader1 = ['Training File:', learnFile]
summaryHeader2 = ['File','SVM-HC','SVM-Prob%', 'NN-HC', 'NN-Prob%', 'TF-HC', 'TF-Prob%', 'TF-Accuracy%']
with open(file, "a") as sum_file:
csv_out=csv.writer(sum_file)
csv_out.writerow(summaryHeader1)
csv_out.writerow(summaryHeader2)
#************************************
''' Lists the program usage '''
#************************************
def usage():
print('\n Usage:\n')
print(' Single files:')
print(' python3 SpectraLearnPredict.py -f <learningfile> <spectrafile> \n')
print(' Single files with cross-validation for accuracy determination: ')
print(' python3 SpectraLearnPredict.py -a <learningfile> <spectrafile> \n')
print(' Maps (formatted for Horiba LabSpec):')
print(' python3 SpectraLearnPredict.py -m <learningfile> <spectramap> \n')
print(' Batch txt files:')
print(' python3 SpectraLearnPredict.py -b <learningfile> \n')
print(' K-means on maps:')
print(' python3 SpectraLearnPredict.py -k <spectramap> <number_of_classes>\n')
print(' Principal component analysis on spectral collection files: ')
print(' python3 SpectraLearnPredict.py -p <spectrafile> <#comp>\n')
print(' Run tensorflow training only:')
print(' python3 SpectraLearnPredict.py -t <learningfile> <# iterations>\n')
print(' Requires python 3.x. Not compatible with python 2.x\n')
#************************************
''' Info on Classification Report '''
#************************************
def runClassReport(clf, A, Cl):
from sklearn.metrics import classification_report
y_pred = clf.predict(A)
print(classification_report(Cl, y_pred, target_names=clf.classes_))
print(' Precision is the probability that, given a classification result for a sample,\n' +
' the sample actually belongs to that class. Recall (Accuracy) is the probability that a \n' +
' sample will be correctly classified for a given class. f1-score combines both \n' +
' accuracy and precision to give a single measure of relevancy of the classifier results.\n')
#************************************
''' Introduce Noise in Data '''
#************************************
def scrambleNoise(A, offset):
from random import uniform
for i in range(A.shape[1]):
A[:,i] += offset*uniform(-1,1)
#********************************************************************************
''' Tensorflow '''
''' https://www.tensorflow.org/get_started/mnist/beginners'''
#********************************************************************************
''' Setup training-only via TensorFlow '''
#********************************************************************************
def TrainTF(learnFile, numRuns):
learnFileRoot = os.path.splitext(learnFile)[0]
summary_filename = learnFileRoot + '_summary-TF-training' + str(datetime.now().strftime('_%Y-%m-%d_%H-%M-%S.log'))
tfDef.alwaysRetrain = True
tfDef.alwaysImprove = True
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
En_temp = En
Cl_temp = Cl
A_temp = A
with open(summary_filename, "a") as sum_file:
sum_file.write(str(datetime.now().strftime('Training started: %Y-%m-%d %H:%M:%S\n')))
if preprocDef.scrambleNoiseFlag == True:
sum_file.write(' Using Noise scrambler (offset: ' + str(preprocDef.scrambleNoiseOffset) + ')\n\n')
sum_file.write('\nIteration\tAccuracy %\t Prediction\t Probability %\n')
index = random.randint(0,A.shape[0]-1)
R = A[index,:]
if preprocDef.scrambleNoiseFlag == False:
A_temp, Cl_temp, En_temp, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
''' Plot Training Data '''
if plotDef.createTrainingDataPlot == True:
plotTrainData(A, En, R.reshape(1,-1), plotDef.plotAllSpectra, learnFileRoot)
for i in range(numRuns):
print(' Running tensorflow training iteration: ' + str(i+1) + '\n')
''' Preprocess prediction data '''
if preprocDef.scrambleNoiseFlag == True:
A_temp, Cl_temp, En_temp, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
R_temp, Rorig = preProcessNormPredData(R, En, A_temp, En_temp, Cl_temp, YnormXind, 0)
print(' Using random spectra from training dataset as evaluation file ')
tfPred, tfProb, tfAccur = runTensorFlow(A_temp,Cl_temp,R_temp,learnFileRoot)
with open(summary_filename, "a") as sum_file:
sum_file.write(str(i+1) + '\t{:10.2f}\t'.format(tfAccur) + str(tfPred) + '\t{:10.2f}\n'.format(tfProb))
print(' Nominal class for prediction spectra:', str(index+1), '\n')
with open(summary_filename, "a") as sum_file:
sum_file.write(str(datetime.now().strftime('\nTraining ended: %Y-%m-%d %H:%M:%S\n')))
print(' Completed ' + str(numRuns) + ' Training iterations. \n')
#********************************************************************************
''' Format vectors of unique labels '''
#********************************************************************************
def formatClass(rootFile, Cl):
import sklearn.preprocessing as pp
print('==========================================================================\n')
print(' Running basic TensorFlow. Creating class data in binary form...')
Cl2 = pp.LabelBinarizer().fit_transform(Cl)
import matplotlib.pyplot as plt
plt.hist([float(x) for x in Cl], bins=np.unique([float(x) for x in Cl]), edgecolor="black")
plt.xlabel('Class')
plt.ylabel('Occurrances')
plt.title('Class distibution')
plt.savefig(rootFile + '_ClassDistrib.png', dpi = 160, format = 'png') # Save plot
if tfDef.plotClassDistribTF == True:
print(' Plotting Class distibution \n')
plt.show()
return Cl2
#********************************************************************************
''' Run basic model training and evaluation via TensorFlow '''
#********************************************************************************
def runTFbasic(A, Cl, R, Root):
import tensorflow as tf
tfTrainedData = Root + '.tfmodel'
Cl2 = formatClass(Root, Cl)
print(' Initializing TensorFlow...')
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, A.shape[1]])
W = tf.Variable(tf.zeros([A.shape[1], np.unique(Cl).shape[0]]))
b = tf.Variable(tf.zeros(np.unique(Cl).shape[0]))
y_ = tf.placeholder(tf.float32, [None, np.unique(Cl).shape[0]])
# The raw formulation of cross-entropy can be numerically unstable
#y = tf.nn.softmax(tf.matmul(x, W) + b)
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), axis=[1]))
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
y = tf.matmul(x,W) + b
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
if tfDef.decayLearnRate == True:
print(' Using decaying learning rate, start at:',tfDef.learnRate, '\n')
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = tfDef.learnRate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.96, staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy, global_step=global_step)
else:
print(' Using fix learning rate:', tfDef.learnRate, '\n')
train_step = tf.train.GradientDescentOptimizer(tfDef.learnRate).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
if tfDef.enableTensorboard == True:
writer = tf.summary.FileWriter(".", sess.graph)
print('\n Saving graph. Accessible via tensorboard. \n')
saver = tf.train.Saver()
accur = 0
try:
if tfDef.alwaysRetrain == False:
print(' Opening TF training model from:', tfTrainedData)
saver.restore(sess, './' + tfTrainedData)
print('\n Model restored.\n')
else:
raise ValueError(' Force TF model retraining.')
except:
init = tf.global_variables_initializer()
sess.run(init)
if os.path.isfile(tfTrainedData + '.meta') & tfDef.alwaysImprove == True:
print('\n Improving TF model...')
saver.restore(sess, './' + tfTrainedData)
else:
print('\n Rebuildind TF model...')
if tfDef.subsetCrossValid == True:
print(' Iterating training using subset (' + str(tfDef.percentCrossValid*100) + '%), ' + str(tfDef.iterCrossValid) + ' times ...')
for i in range(tfDef.iterCrossValid):
As, Cl2s, As_cv, Cl2s_cv = formatSubset(A, Cl2, tfDef.percentCrossValid)
summary = sess.run(train_step, feed_dict={x: As, y_: Cl2s})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accur = 100*accuracy.eval(feed_dict={x:As_cv, y_:Cl2s_cv})
else:
summary = sess.run(train_step, feed_dict={x: A, y_: Cl2})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accur = 100*accuracy.eval(feed_dict={x:A, y_:Cl2})
save_path = saver.save(sess, tfTrainedData)
print(' Model saved in file: %s\n' % save_path)
print('\033[1m Accuracy: ' + str('{:.3f}'.format(accur)) + '%\n\033[0m')
if tfDef.enableTensorboard == True:
writer.close()
res1 = sess.run(y, feed_dict={x: R})
res2 = sess.run(tf.argmax(y, 1), feed_dict={x: R})
sess.close()
rosterPred = np.where(res1[0]>tfDef.thresholdProbabilityTFPred)[0]
print(' ==============================')
print(' \033[1mTF\033[0m - Probability >',str(tfDef.thresholdProbabilityTFPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.1f}'.format(res1[0][rosterPred][i])))
print(' ==============================\n')
print('\033[1m Predicted value (TF): ' + str(np.unique(Cl)[res2][0]) + ' (Probability: ' + str('{:.1f}'.format(res1[0][res2][0])) + '%)\n' + '\033[0m' )
return np.unique(Cl)[res2][0], res1[0][res2][0], accur
#************************************
''' Main initialization routine '''
#************************************
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
DucQuang1/BuildingMachineLearningSystemsWithPython | ch06/02_tuning.py | 22 | 5484 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.naive_bayes import MultinomialNB
phase = "02"
def create_ngram_model(params=None):
tfidf_ngrams = TfidfVectorizer(ngram_range=(1, 3),
analyzer="word", binary=False)
clf = MultinomialNB()
pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__stop_words=[None, "english"],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print(clf)
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(vect__ngram_range=(1, 2),
vect__min_df=1,
vect__stop_words=None,
vect__smooth_idf=False,
vect__use_idf=False,
vect__sublinear_tf=True,
vect__binary=False,
clf__alpha=0.01,
)
best_clf = create_ngram_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_ngram_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
| mit |
mariusvniekerk/ibis | ibis/impala/metadata.py | 1 | 8500 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import StringIO
import pandas as pd
def parse_metadata(descr_table):
parser = MetadataParser(descr_table)
return parser.parse()
def _noop(tup):
return None
def _item_converter(i):
def _get_item(converter=None):
def _converter(tup):
result = tup[i]
if converter is not None:
result = converter(result)
return result
return _converter
return _get_item
_get_type = _item_converter(1)
_get_comment = _item_converter(2)
def _try_timestamp(x):
try:
ts = pd.Timestamp(x)
return ts.to_pydatetime()
except (ValueError, TypeError):
return x
def _try_unix_timestamp(x):
try:
ts = pd.Timestamp.fromtimestamp(int(x))
return ts.to_pydatetime()
except (ValueError, TypeError):
return x
def _try_boolean(x):
try:
x = x.lower()
if x in ('true', 'yes'):
return True
elif x in ('false', 'no'):
return False
return x
except (ValueError, TypeError):
return x
def _try_int(x):
try:
return int(x)
except (ValueError, TypeError):
return x
class MetadataParser(object):
"""
A simple state-ish machine to parse the results of DESCRIBE FORMATTED
"""
def __init__(self, table):
self.table = table
self.tuples = list(self.table.itertuples(index=False))
def _reset(self):
self.pos = 0
self.schema = None
self.partitions = None
self.info = None
self.storage = None
def _next_tuple(self):
if self.pos == len(self.tuples):
raise StopIteration
result = self.tuples[self.pos]
self.pos += 1
return result
def parse(self):
self._reset()
self._parse()
return TableMetadata(self.schema, self.info, self.storage,
partitions=self.partitions)
def _parse(self):
self.schema = self._parse_schema()
next_section = self._next_tuple()
if 'partition' in next_section[0].lower():
self._parse_partitions()
else:
self._parse_info()
def _parse_partitions(self):
self.partitions = self._parse_schema()
next_section = self._next_tuple()
if 'table information' not in next_section[0].lower():
raise ValueError('Table information not present')
self._parse_info()
def _parse_schema(self):
tup = self._next_tuple()
if 'col_name' not in tup[0]:
raise ValueError('DESCRIBE FORMATTED did not return '
'the expected results: {0}'
.format(tup))
self._next_tuple()
# Use for both main schema and partition schema (if any)
schema = []
while True:
tup = self._next_tuple()
if tup[0].strip() == '':
break
schema.append((tup[0], tup[1]))
return schema
def _parse_info(self):
self.info = {}
while True:
tup = self._next_tuple()
orig_key = tup[0].strip(':')
key = _clean_param_name(tup[0])
if key == '' or key.startswith('#'):
# section is done
break
if key == 'table parameters':
self._parse_table_parameters()
elif key in self._info_cleaners:
result = self._info_cleaners[key](tup)
self.info[orig_key] = result
else:
self.info[orig_key] = tup[1]
if 'storage information' not in key:
raise ValueError('Storage information not present')
self._parse_storage_info()
_info_cleaners = {
'database': _get_type(),
'owner': _get_type(),
'createtime': _get_type(_try_timestamp),
'lastaccesstime': _get_type(_try_timestamp),
'protect mode': _get_type(),
'retention': _get_type(_try_int),
'location': _get_type(),
'table type': _get_type()
}
def _parse_table_parameters(self):
params = self._parse_nested_params(self._table_param_cleaners)
self.info['Table Parameters'] = params
_table_param_cleaners = {
'external': _try_boolean,
'column_stats_accurate': _try_boolean,
'numfiles': _try_int,
'totalsize': _try_int,
'stats_generated_via_stats_task': _try_boolean,
'numrows': _try_int,
'transient_lastddltime': _try_unix_timestamp,
}
def _parse_storage_info(self):
self.storage = {}
while True:
# end of the road
try:
tup = self._next_tuple()
except StopIteration:
break
orig_key = tup[0].strip(':')
key = _clean_param_name(tup[0])
if key == '' or key.startswith('#'):
# section is done
break
if key == 'storage desc params':
self._parse_storage_desc_params()
elif key in self._storage_cleaners:
result = self._storage_cleaners[key](tup)
self.storage[orig_key] = result
else:
self.storage[orig_key] = tup[1]
_storage_cleaners = {
'compressed': _get_type(_try_boolean),
'num buckets': _get_type(_try_int),
}
def _parse_storage_desc_params(self):
params = self._parse_nested_params(self._storage_param_cleaners)
self.storage['Desc Params'] = params
_storage_param_cleaners = {}
def _parse_nested_params(self, cleaners):
params = {}
while True:
try:
tup = self._next_tuple()
except StopIteration:
break
if pd.isnull(tup[1]):
break
key, value = tup[1:]
if key.lower() in cleaners:
cleaner = cleaners[key.lower()]
value = cleaner(value)
params[key] = value
return params
def _clean_param_name(x):
return x.strip().strip(':').lower()
def _get_meta(attr, key):
@property
def f(self):
data = getattr(self, attr)
if isinstance(key, list):
result = data
for k in key:
if k not in result:
raise KeyError(k)
result = result[k]
return result
else:
return data[key]
return f
class TableMetadata(object):
"""
Container for the parsed and wrangled results of DESCRIBE FORMATTED for
easier Ibis use (and testing).
"""
def __init__(self, schema, info, storage, partitions=None):
self.schema = schema
self.info = info
self.storage = storage
self.partitions = partitions
def __repr__(self):
import pprint
# Quick and dirty for now
buf = StringIO()
buf.write(str(type(self)))
buf.write('\n')
data = {
'schema': self.schema,
'info': self.info,
'storage info': self.storage
}
if self.partitions is not None:
data['partition schema'] = self.partitions
pprint.pprint(data, stream=buf)
return buf.getvalue()
@property
def is_partitioned(self):
return self.partitions is not None
create_time = _get_meta('info', 'CreateTime')
location = _get_meta('info', 'Location')
owner = _get_meta('info', 'Owner')
num_rows = _get_meta('info', ['Table Parameters', 'numRows'])
hive_format = _get_meta('storage', 'InputFormat')
tbl_properties = _get_meta('info', 'Table Parameters')
serde_properties = _get_meta('storage', 'Desc Params')
class TableInfo(object):
pass
class TableStorageInfo(object):
pass
| apache-2.0 |
DavidMcDonald1993/ghsom | effective_matplotlib.py | 1 | 2881 |
# coding: utf-8
# In[56]:
get_ipython().magic(u'matplotlib inline')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
df = pd.read_excel("https://github.com/chris1610/pbpython/blob/master/data/sample-salesv3.xlsx?raw=true")
df.head(n=10)
# In[57]:
top_10 = (df.groupby('name')['ext price', 'quantity'].agg({'ext price': 'sum', 'quantity': 'count'})
.sort_values(by='ext price', ascending=False))[:10].reset_index()
top_10.rename(columns={'name': 'Name', 'ext price': 'Sales', 'quantity': 'Purchases'}, inplace=True)
# In[58]:
top_10
# In[59]:
plt.style.available
# In[73]:
plt.style.use("ggplot")
# In[74]:
def currency(x, pos):
'The two args are the value and tick position'
if x >= 1000000:
return '${:1.1f}M'.format(x*1e-6)
return '${:1.0f}K'.format(x*1e-3)
# In[75]:
fig, ax = plt.subplots(figsize=(5, 6))
top_10.plot(kind='barh', y="Sales", x="Name", ax=ax)
ax.set_xlim([-10000, 140000])
ax.set(title="2014 Revenue", xlabel="Total Revenue", ylabel="Customer")
formatter = FuncFormatter(currency)
ax.xaxis.set_major_formatter(formatter)
ax.legend().set_visible(False)
# plt.show()
# In[76]:
# Create the figure and the axes
fig, ax = plt.subplots()
# Plot the data and get the averaged
top_10.plot(kind='barh', y="Sales", x="Name", ax=ax)
avg = top_10['Sales'].mean()
# Set limits and labels
ax.set_xlim([-10000, 140000])
ax.set(title='2014 Revenue', xlabel='Total Revenue', ylabel='Customer')
# Add a line for the average
ax.axvline(x=avg, color='b', label='Average', linestyle='--', linewidth=1)
# Annotate the new customers
for cust in [3, 5, 8]:
ax.text(115000, cust, "New Customer")
# Format the currency
formatter = FuncFormatter(currency)
ax.xaxis.set_major_formatter(formatter)
# Hide the legend
ax.legend().set_visible(False)
# plt.show()
# In[77]:
# Get the figure and the axes
fig, (ax0, ax1) = plt.subplots(nrows=1,ncols=2, sharey=True, figsize=(7, 4))
top_10.plot(kind='barh', y="Sales", x="Name", ax=ax0)
ax0.set_xlim([0, 140000])
ax0.set_xticks(np.arange(0, 150000, 50000))
formatter = FuncFormatter(currency)
ax0.xaxis.set_major_formatter(formatter)
ax0.set(title='Revenue', xlabel='Total Revenue', ylabel='Customers')
# Plot the average as a vertical line
avg = top_10['Sales'].mean()
ax0.axvline(x=avg, color='b', label='Average', linestyle='--', linewidth=1)
# Repeat for the unit plot
top_10.plot(kind='barh', y="Purchases", x="Name", ax=ax1)
avg = top_10['Purchases'].mean()
ax1.set(title='Units', xlabel='Total Units')
ax1.axvline(x=avg, color='b', label='Average', linestyle='--', linewidth=1)
# Title the figure
fig.suptitle('2014 Sales Analysis', fontsize=14, fontweight='bold');
# Hide the legends
ax1.legend().set_visible(False)
ax0.legend().set_visible(False)
# In[45]:
fig.canvas.get_supported_filetypes()
| gpl-2.0 |
jpautom/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 127 | 7477 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
gdetor/SI-RF-Structure | Statistics/protocols.py | 1 | 6518 | # Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and
# Nicolas P. Rougier ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# -----------------------------------------------------------------------------
# Structure of Receptive Fields in Area 3b of Primary Somatosensory Cortex in
# the Alert Monkey - James J. DiCarlo, Kenneth O. Johnson, and Steven S. Hsiao
# The Journal of Neuroscience, April 1, 1998, 18(7):2626-2645
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def g(x,sigma = 0.1):
return np.exp(-x**2/sigma**2)
def fromdistance(fn, shape, center=None, dtype=float):
def distance(*args):
d = 0
for i in range(len(shape)):
d += ((args[i]-center[i])/float(max(1,shape[i]-1)))**2
return np.sqrt(d)/np.sqrt(len(shape))
if center == None:
center = np.array(list(shape))//2
return fn(np.fromfunction(distance,shape,dtype=dtype))
def Gaussian(shape,center,sigma=0.5):
def g(x):
return np.exp(-x**2/sigma**2)
return fromdistance(g,shape,center)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# Seed for reproductibility
# -------------------------
np.random.seed(12345)
# Standard units
# --------------
second = 1.0
millisecond = 1e-3 * second
ms = millisecond
minute = 60 * second
meter = 1.0
millimeter = 1e-3 * meter
mm = millimeter
micrometer = 1e-6 * meter
# Simulation parameters
# ---------------------
dots_number = 750
drum_length = 250*mm
drum_width = 30*mm
drum_shift = 200*micrometer
drum_velocity = 40*mm / second
simulation_time = 30*second
sampling_rate = 5*ms
dt = sampling_rate
skinpatch = 10*mm,10*mm # width x height
# Generate the drum pattern
# -------------------------
drum = np.zeros( (dots_number,2) )
drum[:,0] = np.random.uniform(0,drum_length,dots_number)
drum[:,1] = np.random.uniform(0,drum_width, dots_number)
drum_x,drum_y = drum[:,0], drum[:,1]
dots = []
n = 0
for t in np.arange(0.0,simulation_time,dt):
z = t * drum_velocity
x = z % (drum_length - skinpatch[0])
y = int(z / (drum_length - skinpatch[0])) * drum_shift
# Maybe this should be adjusted since a stimulus lying outside the skin
# patch may still have influence on the input (for example, if it lies
# very near the border)
xmin, xmax = x, x+skinpatch[0]
ymin, ymax = y, y+skinpatch[1]
# Get dots contained on the skin patch (and normalize coordinates)
d = drum[(drum_x > (xmin)) *
(drum_x < (xmax)) *
(drum_y > (ymin)) *
(drum_y < (ymax))]
d -= (x,y)
d /= skinpatch[0],skinpatch[1]
dots.extend((d*5).tolist())
dots = np.array(dots)
n = len(dots)
X ,Y = dots[:,0], dots[:,1]
plt.figure(figsize = (20,8))
ax = plt.subplot(132, aspect=1)
ax.scatter(X, Y, s=1, edgecolor='none', facecolor='k')
ax.set_xlim(0,5)
ax.set_xlabel("mm")
ax.set_ylim(0,5)
ax.set_ylabel("mm")
ax.set_title("Drum Protocol (30 seconds), %d stimuli" % n)
ax.text(0.25, 4.75, 'B', weight='bold', fontsize=32, color='k',
ha='left', va='top')
ax = plt.subplot(131, aspect=1)
X = np.random.uniform(0,5,50000)
Y = np.random.uniform(0,5,50000)
ax.scatter(X, Y, s=1, edgecolor='none', facecolor='k')
ax.set_xlim(0,5)
ax.set_xlabel("mm")
ax.set_ylim(0,5)
ax.set_ylabel("mm")
ax.set_title("Training Protocol, %d stimuli" % 50000)
ax.text(0.25, 4.75, 'A', weight='bold', fontsize=32, color='k',
ha='left', va='top')
ax = plt.subplot(133, aspect=1)
XY = np.zeros((25000/2,2))
d = 5.0/4
for i in range(len(XY)):
x,y = np.random.uniform(0,5,2)
while d < x < 5-d and d < y < 5-d:
x,y = np.random.uniform(0,5,2)
XY[i] = x,y
X,Y = XY[:,0], XY[:,1]
ax.scatter(X, Y, s=1, edgecolor='none', facecolor='k')
ax.text(0.25, 4.75, 'C', weight='bold', fontsize=32, color='k',
ha='left', va='top')
X = d+np.random.uniform(0,2.5,25000/2)
Y = d+np.random.uniform(0,2.5,25000/2)
ax.scatter(X, Y, s=1, edgecolor='none', facecolor='k')
ax.set_xlim(0,5)
ax.set_xlabel("mm")
ax.set_ylim(0,5)
ax.set_ylabel("mm")
ax.set_title("RoI Protocol, %d stimuli" % 25000)
plt.savefig("protocols.png", dpi=200)
plt.show()
| gpl-3.0 |
dataplumber/nexus | analysis/webservice/algorithms/doms/histogramplot.py | 1 | 3377 |
import BaseDomsHandler
import ResultsStorage
import string
from cStringIO import StringIO
import matplotlib.mlab as mlab
from multiprocessing import Process, Manager
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
PARAMETER_TO_FIELD = {
"sst": "sea_water_temperature",
"sss": "sea_water_salinity"
}
PARAMETER_TO_UNITS = {
"sst": "($^\circ$C)",
"sss": "(g/L)"
}
class DomsHistogramPlotQueryResults(BaseDomsHandler.DomsQueryResults):
def __init__(self, x, parameter, primary, secondary, args=None, bounds=None, count=None, details=None, computeOptions=None, executionId=None, plot=None):
BaseDomsHandler.DomsQueryResults.__init__(self, results=x, args=args, details=details, bounds=bounds, count=count, computeOptions=computeOptions, executionId=executionId)
self.__primary = primary
self.__secondary = secondary
self.__x = x
self.__parameter = parameter
self.__plot = plot
def toImage(self):
return self.__plot
def render(d, x, primary, secondary, parameter, norm_and_curve=False):
fig, ax = plt.subplots()
fig.suptitle(string.upper("%s vs. %s" % (primary, secondary)), fontsize=14, fontweight='bold')
n, bins, patches = plt.hist(x, 50, normed=norm_and_curve, facecolor='green', alpha=0.75)
if norm_and_curve:
mean = np.mean(x)
variance = np.var(x)
sigma = np.sqrt(variance)
y = mlab.normpdf(bins, mean, sigma)
l = plt.plot(bins, y, 'r--', linewidth=1)
ax.set_title('n = %d' % len(x))
units = PARAMETER_TO_UNITS[parameter] if parameter in PARAMETER_TO_UNITS else PARAMETER_TO_UNITS["sst"]
ax.set_xlabel("%s - %s %s" % (primary, secondary, units))
if norm_and_curve:
ax.set_ylabel("Probability per unit difference")
else:
ax.set_ylabel("Frequency")
plt.grid(True)
sio = StringIO()
plt.savefig(sio, format='png')
d['plot'] = sio.getvalue()
def renderAsync(x, primary, secondary, parameter, norm_and_curve):
manager = Manager()
d = manager.dict()
p = Process(target=render, args=(d, x, primary, secondary, parameter, norm_and_curve))
p.start()
p.join()
return d['plot']
def createHistogramPlot(id, parameter, norm_and_curve=False):
with ResultsStorage.ResultsRetrieval() as storage:
params, stats, data = storage.retrieveResults(id)
primary = params["primary"]
secondary = params["matchup"][0]
x = createHistTable(data, secondary, parameter)
plot = renderAsync(x, primary, secondary, parameter, norm_and_curve)
r = DomsHistogramPlotQueryResults(x=x, parameter=parameter, primary=primary, secondary=secondary,
args=params, details=stats,
bounds=None, count=None, computeOptions=None, executionId=id, plot=plot)
return r
def createHistTable(results, secondary, parameter):
x = []
field = PARAMETER_TO_FIELD[parameter] if parameter in PARAMETER_TO_FIELD else PARAMETER_TO_FIELD["sst"]
for entry in results:
for match in entry["matches"]:
if match["source"] == secondary:
if field in entry and field in match:
a = entry[field]
b = match[field]
x.append((a - b))
return x | apache-2.0 |
probml/pyprobml | scripts/lms_demo.py | 1 | 6196 | # SGD on linear regression aka least mean squares
# Written by Duane Rich
# Based on https://github.com/probml/pmtk3/blob/master/demos/LMSdemoSimple.m
import numpy as np
import matplotlib.pyplot as plt
from pyprobml_utils import save_fig
#from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
plt.rcParams["figure.figsize"] = (5,5) # width x height
np.random.seed(0)
#Generating synthetic data:
N = 21
wTrue = np.array([1.45, 0.92])
X = np.random.uniform(-2, 2, N)
X = np.column_stack((np.ones(N), X))
y = wTrue[0] * X[:, 0] + wTrue[1] * X[:, 1] + np.random.normal(0, .1, N)
#Plot SSE surface over parameter space.
v = np.arange(-1, 3, .1)
W0, W1 = np.meshgrid(v, v)
SS = np.array([sum((w0 * X[:, 0] + w1 * X[:, 1] - y) ** 2) for w0, w1 in zip(np.ravel(W0), np.ravel(W1))])
SS = SS.reshape(W0.shape)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(W0, W1, SS)
save_fig('lmsSSE.pdf')
plt.draw()
#Mean SE with gradient and Hessian:
def LinregLossScaled(w, X, y):
Xt = np.transpose(X)
XtX = Xt.dot(X)
N = X.shape[0]
err = X.dot(w) - y
f = np.mean(err*err)
g = (1/N) * Xt.dot(err)
H = (1/N) * XtX
return f, g, H
#Starting point from to search for optimal parameters
w0 = np.array([-0.5, 2])
# Determine loss at optimal param values:
def funObj(w):
out,_,_ = LinregLossScaled(w, X, y)
return out
res = minimize(funObj, w0, method='L-BFGS-B')
wOpt = res.x
fopt = funObj(wOpt)
# fopt,_ ,_ = LinregLossScaled(wTrue, X, y)
#Options for stochastic gradient descent
opts = {}
opts['batchsize'] = 1
opts['verbose'] = True
opts['storeParamTrace'] = True
opts['storeFvalTrace'] = True
opts['storeStepTrace'] = True
opts['maxUpdates'] = 30
opts['eta0'] = 0.5
opts['t0'] = 3
#Breaks the matrix X and vector y into batches
def batchify(X, y, batchsize):
nTrain = X.shape[0]
batchdata = []
batchlabels = []
for i in range(0, nTrain, batchsize):
nxt = min(i+batchsize, nTrain+1)
batchdata.append(X[i:nxt, :])
batchlabels.append(y[i:nxt])
return batchdata, batchlabels
def stochgradSimple(objFun, w0, X, y, *args, **kwargs):
#Stochastic gradient descent.
#Algorithm works by breaking up the data into batches. It
#determines a gradient for each batch and moves the current
#choice of parameters in that direction. The extent to which
#we move in that direction is determined by our shrinking
#stepsize over time.
#Includes options for the batchsize, total number of sweeps over
#the data (maxepoch), total number of batches inspected (maxUpdated,
#whether the algo should print updates as it progresses, options
#controlling what infomation we keep track of,and parameters to
#determine how the step size shinks over time.
#Default options
batchsize = kwargs['batchsize'] if 'batchsize' in kwargs else 10
maxepoch = kwargs['maxepoch'] if 'maxepoch' in kwargs else 500
maxUpdates = kwargs['maxUpdates'] if 'maxUpdates' in kwargs else 1000
verbose = kwargs['verbose'] if 'verbose' in kwargs else False
storeParamTrace = kwargs['storeParamTrace'] if 'storeParamTrace' in kwargs else False
storeFvalTrace = kwargs['storeFvalTrace'] if 'storeFvalTrace' in kwargs else False
storeStepTrace = kwargs['storeStepTrace'] if 'storeStepTrace' in kwargs else False
t0 = kwargs['t0'] if 't0' in kwargs else 1
eta0 = kwargs['eta0'] if 'eta0' in kwargs else 0.1
stepSizeFn = kwargs['stepSizeFn'] if 'stepSizeFn' in kwargs else lambda x: eta0*t0/(x+t0)
#Turn the data into batches
batchdata, batchlabels = batchify(X, y, batchsize);
num_batches = len(batchlabels)
if verbose:
print('%d batches of size %d\n' %(num_batches, batchsize))
w = w0
trace = {}
trace['fvalMinibatch'] = []
trace['params'] = []
trace['stepSize'] = []
# Main loop:
nupdates = 1
for epoch in range(1, maxepoch+1):
if verbose:
print('epoch %d\n' % epoch)
for b in range(num_batches):
bdata = batchdata[b]
blabels = batchlabels[b]
if verbose and b % 100 == 0:
print('epoch %d batch %d nupdates %d\n' %(epoch, b, nupdates))
fb, g, _ = objFun(w, bdata, blabels, *args)
eta = stepSizeFn(nupdates)
w = w - eta*g #steepest descent
nupdates += 1
if storeParamTrace:
#Storing the history of the parameters may take a lot of space
trace['params'].append(w)
if storeFvalTrace:
trace['fvalMinibatch'].append(fb)
if storeStepTrace:
trace['stepSize'].append(eta)
if nupdates > maxUpdates:
break
if nupdates > maxUpdates:
break
return w, trace
w, trace = stochgradSimple(LinregLossScaled, w0, X, y, **opts)
def stochgradTracePostprocess(objFun, trace, X, y, *args):
#This is to determine the losses for each set of parameters
#chosen over the parameter path.
fvalhist = []
for t in range(len(trace)):
fval,_ ,_ = objFun(trace[t], X, y, *args)
fvalhist.append(fval)
return fvalhist
print(w)
whist = np.asarray(trace['params'])
#Parameter trajectory
if True:
fig, ax = plt.subplots()
ax.set_title('black line = LMS trajectory towards LS soln (red cross)')
CS = plt.contour(W0, W1, SS)
plt.plot(wOpt[0], wOpt[1], 'x', color='r', ms=10, mew=5)
plt.plot(whist[:, 0], whist[:, 1], 'ko-', lw=2)
save_fig('lmsTraj.pdf')
plt.draw()
#Loss values over the parameter path compared to the optimal loss.
if True:
fvalhist = np.asarray(stochgradTracePostprocess(LinregLossScaled, trace['params'], X, y))
fig, ax = plt.subplots()
ax.set_title('RSS vs iteration')
plt.plot(fvalhist,'ko-', lw=2)
plt.axhline(fopt)
save_fig('lmsRssHist.pdf')
plt.draw()
#Stepsize graph if desired:
if True:
stephist = np.asarray(trace['stepSize'])
fig, ax = plt.subplots()
ax.set_title('Stepsize vs iteration')
plt.plot(stephist,'ko-', lw=2)
save_fig('lmsStepSizeHist.pdf')
plt.draw()
plt.show()
| mit |
zerothi/sids | sisl/supercell.py | 1 | 37042 | """ Define a supercell
This class is the basis of many different objects.
"""
import math
import warnings
from numbers import Integral
import numpy as np
from numpy import dot
from ._internal import set_module
from . import _plot as plt
from . import _array as _a
from .utils.mathematics import fnorm
from .shape.prism4 import Cuboid
from .quaternion import Quaternion
from ._math_small import cross3, dot3
from ._supercell import cell_invert, cell_reciprocal
__all__ = ['SuperCell', 'SuperCellChild']
@set_module("sisl")
class SuperCell:
r""" A cell class to retain lattice vectors and a supercell structure
The supercell structure is comprising the *primary* unit-cell and neighbouring
unit-cells. The number of supercells is given by the attribute `nsc` which
is a vector with 3 elements, one per lattice vector. It describes *how many*
times the primary unit-cell is extended along the i'th lattice vector.
For ``nsc[i] == 3`` the supercell is made up of 3 unit-cells. One *behind*, the
primary unit-cell and one *after*.
Parameters
----------
cell : array_like
the lattice parameters of the unit cell (the actual cell
is returned from `tocell`.
nsc : array_like of int
number of supercells along each latticevector
origo : (3,) of float
the origo of the supercell.
Attributes
----------
cell : (3, 3) of float
the lattice vectors (``cell[i, :]`` is the i'th vector)
"""
# We limit the scope of this SuperCell object.
__slots__ = ('cell', '_origo', 'volume', 'nsc', 'n_s', '_sc_off', '_isc_off')
def __init__(self, cell, nsc=None, origo=None):
if nsc is None:
nsc = [1, 1, 1]
# If the length of cell is 6 it must be cell-parameters, not
# actual cell coordinates
self.cell = self.tocell(cell)
if origo is None:
self._origo = _a.zerosd(3)
else:
self._origo = _a.arrayd(origo)
if self._origo.size != 3:
raise ValueError("Origo *must* be 3 numbers.")
# Set the volume
self._update_vol()
self.nsc = _a.onesi(3)
# Set the super-cell
self.set_nsc(nsc=nsc)
@property
def length(self):
""" Length of each lattice vector """
return fnorm(self.cell)
@property
def origo(self):
""" Origo for the cell """
return self._origo
@origo.setter
def origo(self, origo):
""" Set origo """
self._origo[:] = origo
def area(self, ax0, ax1):
""" Calculate the area spanned by the two axis `ax0` and `ax1` """
return (cross3(self.cell[ax0, :], self.cell[ax1, :]) ** 2).sum() ** 0.5
def toCuboid(self, orthogonal=False):
""" A cuboid with vectors as this unit-cell and center with respect to its origo
Parameters
----------
orthogonal : bool, optional
if true the cuboid has orthogonal sides such that the entire cell is contained
"""
if not orthogonal:
return Cuboid(self.cell.copy(), self.center() + self.origo)
def find_min_max(cmin, cmax, new):
for i in range(3):
cmin[i] = min(cmin[i], new[i])
cmax[i] = max(cmax[i], new[i])
cmin = self.cell.min(0)
cmax = self.cell.max(0)
find_min_max(cmin, cmax, self.cell[[0, 1], :].sum(0))
find_min_max(cmin, cmax, self.cell[[0, 2], :].sum(0))
find_min_max(cmin, cmax, self.cell[[1, 2], :].sum(0))
find_min_max(cmin, cmax, self.cell.sum(0))
return Cuboid(cmax - cmin, self.center() + self.origo)
def parameters(self, rad=False):
r""" Cell parameters of this cell in 3 lengths and 3 angles
Notes
-----
Since we return the length and angles between vectors it may not be possible to
recreate the same cell. Only in the case where the first lattice vector *only*
has a Cartesian :math:`x` component will this be the case
Parameters
----------
rad : bool, optional
whether the angles are returned in radians (otherwise in degree)
Returns
-------
float
length of first lattice vector
float
length of second lattice vector
float
length of third lattice vector
float
angle between b and c vectors
float
angle between a and c vectors
float
angle between a and b vectors
"""
if rad:
f = 1.
else:
f = 180 / np.pi
# Calculate length of each lattice vector
cell = self.cell.copy()
abc = fnorm(cell)
from math import acos
cell = cell / abc.reshape(-1, 1)
alpha = acos(dot3(cell[1, :], cell[2, :])) * f
beta = acos(dot3(cell[0, :], cell[2, :])) * f
gamma = acos(dot3(cell[0, :], cell[1, :])) * f
return abc[0], abc[1], abc[2], alpha, beta, gamma
def _update_vol(self):
self.volume = abs(dot3(self.cell[0, :], cross3(self.cell[1, :], self.cell[2, :])))
def _fill(self, non_filled, dtype=None):
""" Return a zero filled array of length 3 """
if len(non_filled) == 3:
return non_filled
# Fill in zeros
# This will purposefully raise an exception
# if the dimensions of the periodic ones
# are not consistent.
if dtype is None:
try:
dtype = non_filled.dtype
except Exception:
dtype = np.dtype(non_filled[0].__class__)
if dtype == np.dtype(int):
# Never go higher than int32 for default
# guesses on integer lists.
dtype = np.int32
f = np.zeros(3, dtype)
i = 0
if self.nsc[0] > 1:
f[0] = non_filled[i]
i += 1
if self.nsc[1] > 1:
f[1] = non_filled[i]
i += 1
if self.nsc[2] > 1:
f[2] = non_filled[i]
return f
def _fill_sc(self, supercell_index):
""" Return a filled supercell index by filling in zeros where needed """
return self._fill(supercell_index, dtype=np.int32)
def set_nsc(self, nsc=None, a=None, b=None, c=None):
""" Sets the number of supercells in the 3 different cell directions
Parameters
----------
nsc : list of int, optional
number of supercells in each direction
a : integer, optional
number of supercells in the first unit-cell vector direction
b : integer, optional
number of supercells in the second unit-cell vector direction
c : integer, optional
number of supercells in the third unit-cell vector direction
"""
if not nsc is None:
for i in range(3):
if not nsc[i] is None:
self.nsc[i] = nsc[i]
if a:
self.nsc[0] = a
if b:
self.nsc[1] = b
if c:
self.nsc[2] = c
# Correct for misplaced number of unit-cells
for i in range(3):
if self.nsc[i] == 0:
self.nsc[i] = 1
if np.sum(self.nsc % 2) != 3:
raise ValueError(
"Supercells has to be of un-even size. The primary cell counts " +
"one, all others count 2")
# We might use this very often, hence we store it
self.n_s = _a.prodi(self.nsc)
self._sc_off = _a.zerosi([self.n_s, 3])
self._isc_off = _a.zerosi(self.nsc)
n = self.nsc
# We define the following ones like this:
def ret_range(val):
i = val // 2
return range(-i, i+1)
x = ret_range(n[0])
y = ret_range(n[1])
z = ret_range(n[2])
i = 0
for iz in z:
for iy in y:
for ix in x:
if ix == 0 and iy == 0 and iz == 0:
continue
# Increment index
i += 1
# The offsets for the supercells in the
# sparsity pattern
self._sc_off[i, 0] = ix
self._sc_off[i, 1] = iy
self._sc_off[i, 2] = iz
self._update_isc_off()
def _update_isc_off(self):
""" Internal routine for updating the supercell indices """
for i in range(self.n_s):
d = self.sc_off[i, :]
self._isc_off[d[0], d[1], d[2]] = i
@property
def sc_off(self):
""" Integer supercell offsets """
return self._sc_off
@sc_off.setter
def sc_off(self, sc_off):
""" Set the supercell offset """
self._sc_off[:, :] = _a.arrayi(sc_off, order='C')
self._update_isc_off()
@property
def isc_off(self):
""" Internal indexed supercell ``[ia, ib, ic] == i`` """
return self._isc_off
def __iter__(self):
""" Iterate the supercells and the indices of the supercells """
yield from enumerate(self.sc_off)
def copy(self, cell=None, origo=None):
""" A deepcopy of the object
Parameters
----------
cell : array_like
the new cell parameters
origo : array_like
the new origo
"""
if origo is None:
origo = self.origo.copy()
if cell is None:
copy = self.__class__(np.copy(self.cell), nsc=np.copy(self.nsc), origo=origo)
else:
copy = self.__class__(np.copy(cell), nsc=np.copy(self.nsc), origo=origo)
# Ensure that the correct super-cell information gets carried through
if not np.allclose(copy.sc_off, self.sc_off):
copy.sc_off = self.sc_off
return copy
def fit(self, xyz, axis=None, tol=0.05):
""" Fit the supercell to `xyz` such that the unit-cell becomes periodic in the specified directions
The fitted supercell tries to determine the unit-cell parameters by solving a set of linear equations
corresponding to the current supercell vectors.
>>> numpy.linalg.solve(self.cell.T, xyz.T)
It is important to know that this routine will *only* work if at least some of the atoms are
integer offsets of the lattice vectors. I.e. the resulting fit will depend on the translation
of the coordinates.
Parameters
----------
xyz : array_like ``shape(*, 3)``
the coordinates that we will wish to encompass and analyze.
axis : None or array_like
if ``None`` equivalent to ``[0, 1, 2]``, else only the cell-vectors
along the provided axis will be used
tol : float
tolerance (in Angstrom) of the positions. I.e. we neglect coordinates
which are not within the radius of this magnitude
"""
# In case the passed coordinates are from a Geometry
from .geometry import Geometry
if isinstance(xyz, Geometry):
xyz = xyz.xyz[:, :]
cell = np.copy(self.cell[:, :])
# Get fractional coordinates to get the divisions in the current cell
x = dot(xyz, self.icell.T)
# Now we should figure out the correct repetitions
# by rounding to integer positions of the cell vectors
ix = np.rint(x)
# Figure out the displacements from integers
# Then reduce search space by removing those coordinates
# that are more than the tolerance.
dist = np.sqrt((dot(cell.T, (x - ix).T) ** 2).sum(0))
idx = (dist <= tol).nonzero()[0]
if len(idx) == 0:
raise ValueError('Could not fit the cell parameters to the coordinates '
'due to insufficient accuracy (try increase the tolerance)')
# Reduce problem to allowed values below the tolerance
x = x[idx, :]
ix = ix[idx, :]
# Reduce to total repetitions
ireps = np.amax(ix, axis=0) - np.amin(ix, axis=0) + 1
# Only repeat the axis requested
if isinstance(axis, Integral):
axis = [axis]
# Reduce the non-set axis
if not axis is None:
for ax in [0, 1, 2]:
if ax not in axis:
ireps[ax] = 1
# Enlarge the cell vectors
cell[0, :] *= ireps[0]
cell[1, :] *= ireps[1]
cell[2, :] *= ireps[2]
return self.copy(cell)
def swapaxes(self, a, b):
""" Swap axis `a` and `b` in a new `SuperCell`
If ``swapaxes(0,1)`` it returns the 0 in the 1 values.
"""
# Create index vector
idx = _a.arrayi([0, 1, 2])
idx[b] = a
idx[a] = b
# There _can_ be errors when sc_off isn't created by sisl
return self.__class__(np.copy(self.cell[idx, :], order='C'),
nsc=self.nsc[idx],
origo=np.copy(self.origo[idx], order='C'))
def plane(self, ax1, ax2, origo=True):
""" Query point and plane-normal for the plane spanning `ax1` and `ax2`
Parameters
----------
ax1 : int
the first axis vector
ax2 : int
the second axis vector
origo : bool, optional
whether the plane intersects the origo or the opposite corner of the
unit-cell.
Returns
-------
normal_V : numpy.ndarray
planes normal vector (pointing outwards with regards to the cell)
p : numpy.ndarray
a point on the plane
Examples
--------
All 6 faces of the supercell can be retrieved like this:
>>> sc = SuperCell(4)
>>> n1, p1 = sc.plane(0, 1, True)
>>> n2, p2 = sc.plane(0, 1, False)
>>> n3, p3 = sc.plane(0, 2, True)
>>> n4, p4 = sc.plane(0, 2, False)
>>> n5, p5 = sc.plane(1, 2, True)
>>> n6, p6 = sc.plane(1, 2, False)
However, for performance critical calculations it may be advantageous to
do this:
>>> sc = SuperCell(4)
>>> uc = sc.cell.sum(0)
>>> n1, p1 = sc.plane(0, 1)
>>> n2 = -n1
>>> p2 = p1 + uc
>>> n3, p3 = sc.plane(0, 2)
>>> n4 = -n3
>>> p4 = p3 + uc
>>> n5, p5 = sc.plane(1, 2)
>>> n6 = -n5
>>> p6 = p5 + uc
Secondly, the variables ``p1``, ``p3`` and ``p5`` are always ``[0, 0, 0]`` and
``p2``, ``p4`` and ``p6`` are always ``uc``.
Hence this may be used to further reduce certain computations.
"""
cell = self.cell
n = cross3(cell[ax1, :], cell[ax2, :])
# Normalize
n /= dot3(n, n) ** 0.5
# Now we need to figure out if the normal vector
# is pointing outwards
# Take the cell center
up = cell.sum(0)
# Calculate the distance from the plane to the center of the cell
# If d is positive then the normal vector is pointing towards
# the center, so rotate 180
if dot3(n, up / 2) > 0.:
n *= -1
if origo:
return n, _a.zerosd([3])
# We have to reverse the normal vector
return -n, up
def __mul__(self, m):
""" Implement easy repeat function
Parameters
----------
m : int or array_like of length 3
a single integer may be regarded as [m, m, m].
A list will expand the unit-cell along the equivalent lattice vector.
Returns
-------
SuperCell
enlarged supercell
"""
# Simple form
if isinstance(m, Integral):
return self.tile(m, 0).tile(m, 1).tile(m, 2)
sc = self.copy()
for i, r in enumerate(m):
sc = sc.tile(r, i)
return sc
@property
def icell(self):
""" Returns the reciprocal (inverse) cell for the `SuperCell`.
Note: The returned vectors are still in ``[0, :]`` format
and not as returned by an inverse LAPACK algorithm.
"""
return cell_invert(self.cell)
@property
def rcell(self):
""" Returns the reciprocal cell for the `SuperCell` with ``2*np.pi``
Note: The returned vectors are still in [0, :] format
and not as returned by an inverse LAPACK algorithm.
"""
return cell_reciprocal(self.cell)
def cell_length(self, length):
""" Calculate cell vectors such that they each have length `length`
Parameters
----------
length : float or array_like
length for cell vectors, if an array it corresponds to the individual
vectors and it must have length 3
Returns
-------
numpy.ndarray
cell-vectors with prescribed length
"""
length = _a.asarrayd(length)
if length.size == 1:
length = np.tile(length, 3)
if length.size != 3:
raise ValueError(self.__class__.__name__ + '.cell_length length parameter should be a single '
'float, or an array of 3 values.')
return self.cell * (length.ravel() / self.length).reshape(3, 1)
def rotate(self, angle, v, only='abc', rad=False):
""" Rotates the supercell, in-place by the angle around the vector
One can control which cell vectors are rotated by designating them
individually with ``only='[abc]'``.
Parameters
----------
angle : float
the angle of which the geometry should be rotated
v : array_like
the vector around the rotation is going to happen
``v = [1,0,0]`` will rotate in the ``yz`` plane
rad : bool, optional
Whether the angle is in radians (True) or in degrees (False)
only : ('abc'), str, optional
only rotate the designated cell vectors.
"""
# flatten => copy
vn = _a.asarrayd(v).flatten()
vn /= fnorm(vn)
q = Quaternion(angle, vn, rad=rad)
q /= q.norm() # normalize the quaternion
cell = np.copy(self.cell)
if 'a' in only:
cell[0, :] = q.rotate(self.cell[0, :])
if 'b' in only:
cell[1, :] = q.rotate(self.cell[1, :])
if 'c' in only:
cell[2, :] = q.rotate(self.cell[2, :])
return self.copy(cell)
def offset(self, isc=None):
""" Returns the supercell offset of the supercell index """
if isc is None:
return _a.arrayd([0, 0, 0])
return dot(isc, self.cell)
def add(self, other):
""" Add two supercell lattice vectors to each other
Parameters
----------
other : SuperCell, array_like
the lattice vectors of the other supercell to add
"""
if not isinstance(other, SuperCell):
other = SuperCell(other)
cell = self.cell + other.cell
origo = self.origo + other.origo
nsc = np.where(self.nsc > other.nsc, self.nsc, other.nsc)
return self.__class__(cell, nsc=nsc, origo=origo)
def __add__(self, other):
return self.add(other)
__radd__ = __add__
def add_vacuum(self, vacuum, axis):
""" Add vacuum along the `axis` lattice vector
Parameters
----------
vacuum : float
amount of vacuum added, in Ang
axis : int
the lattice vector to add vacuum along
"""
cell = np.copy(self.cell)
d = cell[axis, :].copy()
# normalize to get direction vector
cell[axis, :] += d * (vacuum / fnorm(d))
return self.copy(cell)
def sc_index(self, sc_off):
""" Returns the integer index in the sc_off list that corresponds to `sc_off`
Returns the index for the supercell in the global offset.
Parameters
----------
sc_off : (3,) or list of (3,)
super cell specification. For each axis having value ``None`` all supercells
along that axis is returned.
"""
def _assert(m, v):
if np.any(np.abs(v) > m):
raise ValueError("Requesting a non-existing supercell index")
hsc = self.nsc // 2
if len(sc_off) == 0:
return _a.arrayi([[]])
elif isinstance(sc_off[0], np.ndarray):
_assert(hsc[0], sc_off[:, 0])
_assert(hsc[1], sc_off[:, 1])
_assert(hsc[2], sc_off[:, 2])
return self._isc_off[sc_off[:, 0], sc_off[:, 1], sc_off[:, 2]]
elif isinstance(sc_off[0], (tuple, list)):
# We are dealing with a list of lists
sc_off = np.asarray(sc_off)
_assert(hsc[0], sc_off[:, 0])
_assert(hsc[1], sc_off[:, 1])
_assert(hsc[2], sc_off[:, 2])
return self._isc_off[sc_off[:, 0], sc_off[:, 1], sc_off[:, 2]]
# Fall back to the other routines
sc_off = self._fill_sc(sc_off)
if sc_off[0] is not None and sc_off[1] is not None and sc_off[2] is not None:
_assert(hsc[0], sc_off[0])
_assert(hsc[1], sc_off[1])
_assert(hsc[2], sc_off[2])
return self._isc_off[sc_off[0], sc_off[1], sc_off[2]]
# We build it because there are 'none'
if sc_off[0] is None:
idx = _a.arangei(self.n_s)
else:
idx = (self.sc_off[:, 0] == sc_off[0]).nonzero()[0]
if not sc_off[1] is None:
idx = idx[(self.sc_off[idx, 1] == sc_off[1]).nonzero()[0]]
if not sc_off[2] is None:
idx = idx[(self.sc_off[idx, 2] == sc_off[2]).nonzero()[0]]
return idx
def scale(self, scale):
""" Scale lattice vectors
Does not scale `origo`.
Parameters
----------
scale : ``float``
the scale factor for the new lattice vectors
"""
return self.copy(self.cell * scale)
def tile(self, reps, axis):
""" Extend the unit-cell `reps` times along the `axis` lattice vector
Notes
-----
This is *exactly* equivalent to the `repeat` routine.
Parameters
----------
reps : int
number of times the unit-cell is repeated along the specified lattice vector
axis : int
the lattice vector along which the repetition is performed
"""
cell = np.copy(self.cell)
nsc = np.copy(self.nsc)
origo = np.copy(self.origo)
cell[axis, :] *= reps
# Only reduce the size if it is larger than 5
if nsc[axis] > 3 and reps > 1:
# This is number of connections for the primary cell
h_nsc = nsc[axis] // 2
# The new number of supercells will then be
nsc[axis] = max(1, int(math.ceil(h_nsc / reps))) * 2 + 1
return self.__class__(cell, nsc=nsc, origo=origo)
def repeat(self, reps, axis):
""" Extend the unit-cell `reps` times along the `axis` lattice vector
Notes
-----
This is *exactly* equivalent to the `tile` routine.
Parameters
----------
reps : int
number of times the unit-cell is repeated along the specified lattice vector
axis : int
the lattice vector along which the repetition is performed
"""
return self.tile(reps, axis)
def cut(self, seps, axis):
""" Cuts the cell into several different sections. """
cell = np.copy(self.cell)
cell[axis, :] /= seps
return self.copy(cell)
def append(self, other, axis):
""" Appends other `SuperCell` to this grid along axis """
cell = np.copy(self.cell)
cell[axis, :] += other.cell[axis, :]
return self.copy(cell)
def prepend(self, other, axis):
""" Prepends other `SuperCell` to this grid along axis
For a `SuperCell` object this is equivalent to `append`.
"""
return self.append(other, axis)
def move(self, v):
""" Appends additional space to the object """
# check which cell vector resembles v the most,
# use that
cell = np.copy(self.cell)
p = np.empty([3], np.float64)
cl = fnorm(cell)
for i in range(3):
p[i] = abs(np.sum(cell[i, :] * v)) / cl[i]
cell[np.argmax(p), :] += v
return self.copy(cell)
translate = move
def center(self, axis=None):
""" Returns center of the `SuperCell`, possibly with respect to an axis """
if axis is None:
return self.cell.sum(0) * 0.5
return self.cell[axis, :] * 0.5
@classmethod
def tocell(cls, *args):
r""" Returns a 3x3 unit-cell dependent on the input
1 argument
a unit-cell along Cartesian coordinates with side-length
equal to the argument.
3 arguments
the diagonal components of a Cartesian unit-cell
6 arguments
the cell parameters give by :math:`a`, :math:`b`, :math:`c`,
:math:`\alpha`, :math:`\beta` and :math:`\gamma` (angles
in degrees).
9 arguments
a 3x3 unit-cell.
Parameters
----------
*args : float
May be either, 1, 3, 6 or 9 elements.
Note that the arguments will be put into an array and flattened
before checking the number of arguments.
Examples
--------
>>> cell_1_1_1 = SuperCell.tocell(1.)
>>> cell_1_2_3 = SuperCell.tocell(1., 2., 3.)
>>> cell_1_2_3 = SuperCell.tocell([1., 2., 3.]) # same as above
"""
# Convert into true array (flattened)
args = _a.asarrayd(args).ravel()
nargs = len(args)
# A square-box
if nargs == 1:
return np.diag([args[0]] * 3)
# Diagonal components
if nargs == 3:
return np.diag(args)
# Cell parameters
if nargs == 6:
cell = _a.zerosd([3, 3])
a = args[0]
b = args[1]
c = args[2]
alpha = args[3]
beta = args[4]
gamma = args[5]
from math import sqrt, cos, sin, pi
pi180 = pi / 180.
cell[0, 0] = a
g = gamma * pi180
cg = cos(g)
sg = sin(g)
cell[1, 0] = b * cg
cell[1, 1] = b * sg
b = beta * pi180
cb = cos(b)
sb = sin(b)
cell[2, 0] = c * cb
a = alpha * pi180
d = (cos(a) - cb * cg) / sg
cell[2, 1] = c * d
cell[2, 2] = c * sqrt(sb ** 2 - d ** 2)
return cell
# A complete cell
if nargs == 9:
return args.copy().reshape(3, 3)
raise ValueError(
"Creating a unit-cell has to have 1, 3 or 6 arguments, please correct.")
def is_orthogonal(self):
""" Returns true if the cell vectors are orthogonal """
# Convert to unit-vector cell
cell = np.copy(self.cell)
cl = fnorm(cell)
cell[0, :] = cell[0, :] / cl[0]
cell[1, :] = cell[1, :] / cl[1]
cell[2, :] = cell[2, :] / cl[2]
i_s = dot3(cell[0, :], cell[1, :]) < 0.001
i_s = dot3(cell[0, :], cell[2, :]) < 0.001 and i_s
i_s = dot3(cell[1, :], cell[2, :]) < 0.001 and i_s
return i_s
def parallel(self, other, axis=(0, 1, 2)):
""" Returns true if the cell vectors are parallel to `other`
Parameters
----------
other : SuperCell
the other object to check whether the axis are parallel
axis : int or array_like
only check the specified axis (default to all)
"""
axis = _a.asarrayi(axis).ravel()
# Convert to unit-vector cell
for i in axis:
a = self.cell[i, :] / fnorm(self.cell[i, :])
b = other.cell[i, :] / fnorm(other.cell[i, :])
if abs(dot3(a, b) - 1) > 0.001:
return False
return True
def angle(self, i, j, rad=False):
""" The angle between two of the cell vectors
Parameters
----------
i : int
the first cell vector
j : int
the second cell vector
rad : bool, optional
whether the returned value is in radians
"""
n = fnorm(self.cell[[i, j], :])
ang = math.acos(dot3(self.cell[i, :], self.cell[j, :]) / (n[0] * n[1]))
if rad:
return ang
return math.degrees(ang)
@staticmethod
def read(sile, *args, **kwargs):
""" Reads the supercell from the `Sile` using ``Sile.read_supercell``
Parameters
----------
sile : Sile, str or pathlib.Path
a `Sile` object which will be used to read the supercell
if it is a string it will create a new sile using `sisl.io.get_sile`.
"""
# This only works because, they *must*
# have been imported previously
from sisl.io import get_sile, BaseSile
if isinstance(sile, BaseSile):
return sile.read_supercell(*args, **kwargs)
else:
with get_sile(sile) as fh:
return fh.read_supercell(*args, **kwargs)
def equal(self, other, tol=1e-4):
""" Check whether two supercell are equivalent
Parameters
----------
tol : float, optional
tolerance value for the cell vectors and origo
"""
if not isinstance(other, (SuperCell, SuperCellChild)):
return False
for tol in [1e-2, 1e-3, 1e-4]:
same = np.allclose(self.cell, other.cell, atol=tol)
same = same and np.allclose(self.nsc, other.nsc)
same = same and np.allclose(self.origo, other.origo, atol=tol)
return same
def __str__(self):
""" Returns a string representation of the object """
# Create format for lattice vectors
s = ',\n '.join(['ABC'[i] + '=[{:.3f}, {:.3f}, {:.3f}]'.format(*self.cell[i]) for i in (0, 1, 2)])
return self.__class__.__name__ + ('{{nsc: [{:} {:} {:}],\n ' + s + ',\n}}').format(*self.nsc)
def __repr__(self):
a, b, c, alpha, beta, gamma = map(lambda r: round(r, 3), self.parameters())
return f"<{self.__module__}.{self.__class__.__name__} a={a}, b={b}, c={c}, α={alpha}, β={beta}, γ={gamma}, nsc={self.nsc}>"
def __eq__(self, other):
""" Equality check """
return self.equal(other)
def __ne__(self, b):
""" In-equality check """
return not (self == b)
# Create pickling routines
def __getstate__(self):
""" Returns the state of this object """
return {'cell': self.cell, 'nsc': self.nsc, 'sc_off': self.sc_off, 'origo': self.origo}
def __setstate__(self, d):
""" Re-create the state of this object """
self.__init__(d['cell'], d['nsc'], d['origo'])
self.sc_off = d['sc_off']
def __plot__(self, axis=None, axes=False, *args, **kwargs):
""" Plot the supercell in a specified ``matplotlib.Axes`` object.
Parameters
----------
axis : array_like, optional
only plot a subset of the axis, defaults to all axis
axes : bool or matplotlib.Axes, optional
the figure axes to plot in (if ``matplotlib.Axes`` object).
If ``True`` it will create a new figure to plot in.
If ``False`` it will try and grap the current figure and the current axes.
"""
# Default dictionary for passing to newly created figures
d = dict()
# Try and default the color and alpha
if 'color' not in kwargs and len(args) == 0:
kwargs['color'] = 'k'
if 'alpha' not in kwargs:
kwargs['alpha'] = 0.5
if axis is None:
axis = [0, 1, 2]
# Ensure we have a new 3D Axes3D
if len(axis) == 3:
d['projection'] = '3d'
axes = plt.get_axes(axes, **d)
# Create vector objects
o = self.origo
v = []
for a in axis:
v.append(np.vstack((o[axis], o[axis] + self.cell[a, axis])))
v = np.array(v)
if axes.__class__.__name__.startswith('Axes3D'):
# We should plot in 3D plots
for vv in v:
axes.plot(vv[:, 0], vv[:, 1], vv[:, 2], *args, **kwargs)
v0, v1 = v[0], v[1] - o
axes.plot(v0[1, 0] + v1[:, 0], v0[1, 1] + v1[:, 1], v0[1, 2] + v1[:, 2], *args, **kwargs)
axes.set_zlabel('Ang')
else:
for vv in v:
axes.plot(vv[:, 0], vv[:, 1], *args, **kwargs)
v0, v1 = v[0], v[1] - o[axis]
axes.plot(v0[1, 0] + v1[:, 0], v0[1, 1] + v1[:, 1], *args, **kwargs)
axes.plot(v1[1, 0] + v0[:, 0], v1[1, 1] + v0[:, 1], *args, **kwargs)
axes.set_xlabel('Ang')
axes.set_ylabel('Ang')
return axes
class SuperCellChild:
""" Class to be inherited by using the ``self.sc`` as a `SuperCell` object
Initialize by a `SuperCell` object and get access to several different
routines directly related to the `SuperCell` class.
"""
def set_nsc(self, *args, **kwargs):
""" Set the number of super-cells in the `SuperCell` object
See `set_nsc` for allowed parameters.
See Also
--------
SuperCell.set_nsc : the underlying called method
"""
self.sc.set_nsc(*args, **kwargs)
def set_supercell(self, sc):
""" Overwrites the local supercell """
if sc is None:
# Default supercell is a simple
# 1x1x1 unit-cell
self.sc = SuperCell([1., 1., 1.])
elif isinstance(sc, SuperCell):
self.sc = sc
elif isinstance(sc, SuperCellChild):
self.sc = sc.sc
else:
# The supercell is given as a cell
self.sc = SuperCell(sc)
# Loop over attributes in this class
# if it inherits SuperCellChild, we call
# set_sc on that too.
# Sadly, getattr fails for @property methods
# which forces us to use try ... except
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for a in dir(self):
try:
if isinstance(getattr(self, a), SuperCellChild):
getattr(self, a).set_supercell(self.sc)
except:
pass
set_sc = set_supercell
@property
def volume(self):
""" Returns the inherent `SuperCell` objects `vol` """
return self.sc.volume
def area(self, ax0, ax1):
""" Calculate the area spanned by the two axis `ax0` and `ax1` """
return (cross3(self.sc.cell[ax0, :], self.sc.cell[ax1, :]) ** 2).sum() ** 0.5
@property
def cell(self):
""" Returns the inherent `SuperCell` objects `cell` """
return self.sc.cell
@property
def icell(self):
""" Returns the inherent `SuperCell` objects `icell` """
return self.sc.icell
@property
def rcell(self):
""" Returns the inherent `SuperCell` objects `rcell` """
return self.sc.rcell
@property
def origo(self):
""" Returns the inherent `SuperCell` objects `origo` """
return self.sc.origo
@property
def n_s(self):
""" Returns the inherent `SuperCell` objects `n_s` """
return self.sc.n_s
@property
def nsc(self):
""" Returns the inherent `SuperCell` objects `nsc` """
return self.sc.nsc
@property
def sc_off(self):
""" Returns the inherent `SuperCell` objects `sc_off` """
return self.sc.sc_off
@property
def isc_off(self):
""" Returns the inherent `SuperCell` objects `isc_off` """
return self.sc.isc_off
def add_vacuum(self, vacuum, axis):
""" Add vacuum along the `axis` lattice vector
Parameters
----------
vacuum : float
amount of vacuum added, in Ang
axis : int
the lattice vector to add vacuum along
"""
copy = self.copy()
copy.set_supercell(self.sc.add_vacuum(vacuum, axis))
return copy
def _fill(self, non_filled, dtype=None):
return self.sc._fill(non_filled, dtype)
def _fill_sc(self, supercell_index):
return self.sc._fill_sc(supercell_index)
def sc_index(self, *args, **kwargs):
""" Call local `SuperCell` object `sc_index` function """
return self.sc.sc_index(*args, **kwargs)
def is_orthogonal(self):
""" Return true if all cell vectors are linearly independent"""
return self.sc.is_orthogonal()
| lgpl-3.0 |
meduz/scikit-learn | sklearn/manifold/tests/test_mds.py | 99 | 1873 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.manifold import mds
from sklearn.utils.testing import assert_raises
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
ericmjl/influenza-reassortment | source_pair.py | 1 | 4298 | import networkx as nx
import numpy as np
import pickle as pkl
import pandas as pd
import tables as tb
import sys
from itertools import combinations
class SourcePairSearcher(object):
"""
SourcePairSearcher
Identifies isolates for which a source pair search will be performed.
"""
def __init__(self, handle, isolate_num, segment_stores):
super(SourcePairSearcher, self).__init__()
self.handle = handle
# Open access the list of isolates for which source pairs are to be found.
with open('{0} Isolates for Source Pair Search.pkllist'.format(self.handle), 'r') as f:
self.sink = pkl.load(f)[isolate_num]
print(self.sink)
self.isolate_num = isolate_num
self.G = nx.read_gpickle('{0} Initialized Graph.pkl'.format(self.handle))
# self.hdf5store = tb.open_file('{0} Segment Affmats.h5'.format(self.handle))
self.older_nodes = []
self.segment_stores = segment_stores
self.maxpwi = 0
self.sources = dict()
def run(self):
self.get_nodes_earlier_in_time()
for n in range(1, 5):
combs = self.segment_combinations(n)
for comb in combs:
print('Currently on combination:')
print('{0}'.format(comb))
comb1 = self.sum_subset_segment_pwis(comb[0])
comb2 = self.sum_subset_segment_pwis(comb[1])
sumpwi = comb1.max() + comb2.max()
print("Sum PWI: {0}".format(sumpwi))
if sumpwi < self.maxpwi:
pass
else:
filtered1 = comb1[comb1 == comb1.max()]
filtered2 = comb2[comb2 == comb2.max()]
if sumpwi > self.maxpwi and not np.isnan(sumpwi):
self.sources = dict()
self.maxpwi = sumpwi
self.sources[comb[0]] = [i for i in filtered1.index]
self.sources[comb[1]] = [i for i in filtered2.index]
print(self.maxpwi)
self.add_edges()
self.extract_nodes()
self.save_graph()
def add_edges(self):
for segs, isolates in self.sources.items():
for source in isolates:
d = {'edge_type':'reassortant', 'pwi':self.maxpwi, 'segments':dict()}
for s in segs:
d['segments'][s] = None
self.G.add_edge(source, self.sink, attr_dict=d)
def save_graph(self):
nx.write_gpickle(self.G, 'reassortant_edges/{0} Reassortant Edges {1}.pkl'.format(self.handle, self.isolate_num))
def extract_nodes(self):
nodes_to_extract = set()
for n1, n2 in self.G.edges():
nodes_to_extract.add(n1)
nodes_to_extract.add(n2)
self.G = self.G.subgraph(nodes_to_extract)
def get_segment_store(self, segment):
"""
This helper function gets the particular store from the hdf5 set of stores.
"""
self.segment_stores[segment] = pd.read_hdf('{0} Segment Affmats.h5'.format(self.handle), key='segment{0}'.format(segment))
def segment_combinations(self, n):
"""
Here:
n = number of segments from first source.
Therefore, logically:
!n = complement of segments from second source.
"""
segments = set(range(1,9))
return [(tuple(set(i)), tuple(segments.difference(i))) for i in combinations(segments, n)]
def get_nodes_earlier_in_time(self):
print('Getting earlier nodes...')
isolate_date = self.G.node[self.sink]['collection_date']
self.older_nodes = [n for n, d in self.G.nodes(data=True) if d['collection_date'] < isolate_date]
def get_col(self, segment):
"""
Gets the column of PWIs for the sink as a Pandas dataframe, filtered only to
older nodes.
"""
df = self.segment_stores[segment].loc[self.older_nodes,self.sink]
print(df)
return df
def sum_subset_segment_pwis(self, segments):
"""
Returns the summed PWIs for a given subset of segments
"""
sumpwis = None
for i, segment in enumerate(segments):
pwis = self.get_col(segment)
if i == 0:
sumpwis = pwis
if i > 0:
sumpwis = sumpwis + pwis
sumpwis
return sumpwis
if __name__ == '__main__':
handle = sys.argv[1]
start = int(sys.argv[2])
end = int(sys.argv[3])
def get_segment_store(segment):
"""
This helper function gets the particular store from the hdf5 set of stores.
"""
return pd.read_hdf('{0} Segment Affmats.h5'.format(handle), key='segment{0}'.format(segment))
segment_stores = dict()
for segment in range(1,9):
print('Getting segment {0} store'.format(segment))
segment_stores[segment] = get_segment_store(segment)
for i in range(start, end):
sps = SourcePairSearcher(handle, i, segment_stores)
sps.run() | mit |
mnmnc/campephilus | modules/plotter/plot.py | 1 | 2808 | import matplotlib.pyplot as plt
class Plot:
def save(self, destination_filename="plotted.png", width=10, height=10, local_dpi=100):
fig = plt.gcf()
fig.set_size_inches(width, height)
plt.savefig(destination_filename, dpi=local_dpi)
def plot(self, xlist, ylist, marker_style='circle', def_color="r", def_alpha=0.5, mylinewidth=2.0):
if marker_style == "circle":
plt.plot(xlist, ylist, def_color+'o', alpha=def_alpha)
elif marker_style == "pixel":
plt.plot(xlist, ylist, def_color+',', alpha=def_alpha)
elif marker_style == "point":
plt.plot(xlist, ylist, def_color+'.', alpha=def_alpha)
elif marker_style == "x":
plt.plot(xlist, ylist, def_color+'x', alpha=def_alpha)
elif marker_style == "line":
plt.plot(xlist, ylist, def_color+'-', alpha=def_alpha, linewidth=mylinewidth)
elif marker_style == "triangle":
plt.plot(xlist, ylist, def_color+'^', alpha=def_alpha)
else:
plt.plot(xlist, ylist, def_color+'o', alpha=def_alpha)
def plot_with_sizes(self, xlist, ylist, sizes, marker_style='circle', def_color="r", def_alpha=0.5):
if marker_style == "circle":
plt.scatter(xlist, ylist, s=sizes, alpha=def_alpha)
elif marker_style == "pixel":
plt.scatter(xlist, ylist, s=sizes, alpha=def_alpha)
elif marker_style == "point":
plt.scatter(xlist, ylist, s=sizes, alpha=def_alpha)
elif marker_style == "x":
plt.scatter(xlist, ylist, s=sizes, alpha=def_alpha)
elif marker_style == "line":
plt.scatter(xlist, ylist, s=sizes, alpha=def_alpha)
elif marker_style == "triangle":
plt.scatter(xlist, ylist, s=sizes, alpha=def_alpha)
else:
plt.scatter(xlist, ylist, s=sizes, alpha=def_alpha)
def set_label(self, axis_name, label):
if axis_name == "x":
plt.xlabel(label)
elif axis_name == "y":
plt.ylabel(label)
else:
print("[ERR] Unknown label", label)
def set_title(self, title="Title", size=12):
font = {'fontname':'Courier New','fontsize':size}
plt.title(title, **font)
def set_text(self, x=0, y=0, text="Text missing"):
plt.text(x, y, text)
def set_axis_limit(self, min_x=0, max_x=100, min_y=0, max_y=100):
plt.axis([min_x, max_x, min_y, max_y])
def set_note(self, x,y, text_x, text_y, text):
"""
x,y - pointed end
text_x, text_y - location of the text
"""
plt.annotate(text, xy=(x, y),
xytext=(text_x, text_y),
arrowprops=dict(
facecolor='black',
shrink=0.08,
width=1.0,
headwidth=5.0,
alpha=0.3
)
)
def clear_plot(self):
plt.clf()
def main():
out_image = "D:\\out.png"
pl = Plot()
#plt.plot([1,2,3,4], [1,4,9,16], 'ro')
pl.set_axis_limit(10,10)
pl.plot([1,4,9,16], [1,2,3,4], "line", "b", 0.4)
#plot([8,4,9,16], [11,14,3,4], 'y^', ms=8.0, alpha=0.4)
f = plt.gcf()
pl.save(out_image)
pass
if __name__ == "__main__":
main() | apache-2.0 |
lesteve/sphinx-gallery | examples/plot_function_identifier.py | 1 | 1731 | # -*- coding: utf-8 -*-
"""
Identifying function names in a script
======================================
This demonstrates how Sphinx-Gallery identifies function names to figure out
which functions are called in the script and to which module do they belong.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
import os # noqa, analysis:ignore
import matplotlib.pyplot as plt
from sphinx_gallery.backreferences import identify_names
filename = os.__file__.replace('.pyc', '.py')
names = identify_names(filename)
figheight = len(names) + .5
fontsize = 20
###############################################################################
# Sphinx-Gallery examines both the executed code itself, as well as the
# documentation blocks (such as this one, or the top-level one),
# to find backreferences. This means that by writing :obj:`numpy.sin`
# and :obj:`numpy.exp` here, a backreference will be created even though
# they are not explicitly used in the code. This is useful in particular when
# functions return classes -- if you add them to the documented blocks of
# examples that use them, they will be shown in the backreferences.
fig = plt.figure(figsize=(7.5, 8))
for i, (name, obj) in enumerate(names.items()):
fig.text(0.55, (float(len(names)) - 0.5 - i) / figheight,
name,
ha="right",
size=fontsize,
transform=fig.transFigure,
bbox=dict(boxstyle='square', fc="w", ec="k"))
fig.text(0.6, (float(len(names)) - 0.5 - i) / figheight,
obj["module"],
ha="left",
size=fontsize,
transform=fig.transFigure,
bbox=dict(boxstyle='larrow', fc="w", ec="k"))
#
plt.draw()
plt.show()
| bsd-3-clause |
krosaen/ml-study | kaggle/forest-cover-type-prediction/preprocess.py | 1 | 1087 | from sklearn.preprocessing import StandardScaler
import functools
import operator
def make_preprocessor(td, column_summary):
# it's important to scale consistently on all preprocessing based on
# consistent scaling, so we do it once and keep ahold of it for all future
# scaling.
stdsc = StandardScaler()
stdsc.fit(td[column_summary['quantitative']])
def scale_q(df, column_summary):
df[column_summary['quantitative']] = stdsc.transform(df[column_summary['quantitative']])
return df, column_summary
def scale_binary_c(df, column_summary):
binary_cs = [['{}{}'.format(c, v) for v in vs] for c, vs in column_summary['categorical'].items()]
all_binary_cs = functools.reduce(operator.add, binary_cs)
df[all_binary_cs] = df[all_binary_cs].applymap(lambda x: 1 if x == 1 else -1)
return df, column_summary
def preprocess(df):
fns = [scale_q, scale_binary_c]
cs = column_summary
for fn in fns:
df, cs = fn(df, cs)
return df
return preprocess, column_summary
| mit |
yinwenpeng/rescale | examples/kinships.py | 9 | 2823 | #!/usr/bin/env python
import logging
logging.basicConfig(level=logging.INFO)
_log = logging.getLogger('Example Kinships')
import numpy as np
from numpy import dot, array, zeros, setdiff1d
from numpy.linalg import norm
from numpy.random import shuffle
from scipy.io.matlab import loadmat
from scipy.sparse import lil_matrix
from sklearn.metrics import precision_recall_curve, auc
from rescal import rescal_als
def predict_rescal_als(T):
A, R, _, _, _ = rescal_als(
T, 100, init='nvecs', conv=1e-3,
lambda_A=10, lambda_R=10
)
n = A.shape[0]
P = zeros((n, n, len(R)))
for k in range(len(R)):
P[:, :, k] = dot(A, dot(R[k], A.T))
return P
def normalize_predictions(P, e, k):
for a in range(e):
for b in range(e):
nrm = norm(P[a, b, :k])
if nrm != 0:
# round values for faster computation of AUC-PR
P[a, b, :k] = np.round_(P[a, b, :k] / nrm, decimals=3)
return P
def innerfold(T, mask_idx, target_idx, e, k, sz):
Tc = [Ti.copy() for Ti in T]
mask_idx = np.unravel_index(mask_idx, (e, e, k))
target_idx = np.unravel_index(target_idx, (e, e, k))
# set values to be predicted to zero
for i in range(len(mask_idx[0])):
Tc[mask_idx[2][i]][mask_idx[0][i], mask_idx[1][i]] = 0
# predict unknown values
P = predict_rescal_als(Tc)
P = normalize_predictions(P, e, k)
# compute area under precision recall curve
prec, recall, _ = precision_recall_curve(GROUND_TRUTH[target_idx], P[target_idx])
return auc(recall, prec)
if __name__ == '__main__':
# load data
mat = loadmat('data/alyawarradata.mat')
K = array(mat['Rs'], np.float32)
e, k = K.shape[0], K.shape[2]
SZ = e * e * k
# copy ground truth before preprocessing
GROUND_TRUTH = K.copy()
# construct array for rescal
T = [lil_matrix(K[:, :, i]) for i in range(k)]
_log.info('Datasize: %d x %d x %d | No. of classes: %d' % (
T[0].shape + (len(T),) + (k,))
)
# Do cross-validation
FOLDS = 10
IDX = list(range(SZ))
shuffle(IDX)
fsz = int(SZ / FOLDS)
offset = 0
AUC_train = zeros(FOLDS)
AUC_test = zeros(FOLDS)
for f in range(FOLDS):
idx_test = IDX[offset:offset + fsz]
idx_train = setdiff1d(IDX, idx_test)
shuffle(idx_train)
idx_train = idx_train[:fsz].tolist()
_log.info('Train Fold %d' % f)
AUC_train[f] = innerfold(T, idx_train + idx_test, idx_train, e, k, SZ)
_log.info('Test Fold %d' % f)
AUC_test[f] = innerfold(T, idx_test, idx_test, e, k, SZ)
offset += fsz
_log.info('AUC-PR Test Mean / Std: %f / %f' % (AUC_test.mean(), AUC_test.std()))
_log.info('AUC-PR Train Mean / Std: %f / %f' % (AUC_train.mean(), AUC_train.std()))
| gpl-3.0 |
havogt/serialbox2 | src/serialbox-python/sdb/sdbgui/popupaboutwidget.py | 2 | 2905 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
from PyQt5.QtCore import QT_VERSION_STR, Qt
from PyQt5.QtWidgets import QLabel, QVBoxLayout, QHBoxLayout, QPushButton, QSizePolicy
from sdbcore.logger import Logger
from sdbcore.version import Version
from sdbgui.pixmap import Pixmap
from sdbgui.popupwidget import PopupWidget
class PopupAboutWidget(PopupWidget):
def __init__(self, parent):
super().__init__(parent)
Logger.info("Showing about message box")
self.setWindowTitle("About sdb")
image = Pixmap("logo.png")
image_scaled = image.scaled(self.geometry().height(), self.geometry().width(),
Qt.KeepAspectRatio)
self.__widget_label_image = QLabel()
self.__widget_label_image.setPixmap(image_scaled)
about_txt = ("",
"sdb (%s)" % Version().sdb_version(),
"Serialbox (%s)" % Version().serialbox_version(),
"numpy (%s)" % Version().numpy_version(),
"matplotlib (%s)" % Version().matplotlib_version(),
"PyQt5 (%s)" % QT_VERSION_STR,
"IPython (%s)" % Version().ipython_version(),
"",
"Copyright (c) 2016-2017, Fabian Thuering",
"",
"All rights reserved.",
"",
"The program is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE "
"WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.",
"")
self.__widget_label_about_txt = QLabel()
self.__widget_label_about_txt.setText("\n".join(about_txt))
self.__widget_label_about_txt.setWordWrap(True)
hbox = QHBoxLayout()
hbox.addWidget(self.__widget_label_image)
hbox.addStretch(1)
hbox_button = QHBoxLayout()
hbox_button.addStretch(1)
cancel_button = QPushButton("Cancel")
cancel_button.clicked.connect(self.close)
hbox_button.addWidget(cancel_button)
vbox = QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self.__widget_label_about_txt)
vbox.addLayout(hbox_button)
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.setLayout(vbox)
self.show()
def keyPressEvent(self, QKeyEvent):
if QKeyEvent.key() == Qt.Key_Escape:
Logger.info("Closing about message box")
self.close()
| bsd-2-clause |
pradyu1993/scikit-learn | examples/neighbors/plot_classification.py | 7 | 1724 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print __doc__
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
pl.axis('tight')
pl.show()
| bsd-3-clause |
hongliuuuu/Results_Dis | finalMe/AMKLrbf.py | 1 | 6721 | from sklearn.kernel_approximation import (RBFSampler,Nystroem)
from sklearn.ensemble import RandomForestClassifier
import pandas
import numpy as np
import random
from sklearn.svm import SVC
from sklearn.metrics.pairwise import rbf_kernel,laplacian_kernel,chi2_kernel,linear_kernel,polynomial_kernel,cosine_similarity
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
def splitdata(X,Y,ratio,seed):
'''This function is to split the data into train and test data randomly and preserve the pos/neg ratio'''
n_samples = X.shape[0]
y = Y.astype(int)
y_bin = np.bincount(y)
classes = np.nonzero(y_bin)[0]
#fint the indices for each class
indices = []
for i in classes:
indice = []
for j in range(n_samples):
if y[j] == i:
indice.append(j)
indices.append(indice)
train_indices = []
for i in indices:
k = int(len(i)*ratio)
train_indices += (random.Random(seed).sample(i,k=k))
#find the unused indices
s = np.bincount(train_indices,minlength=n_samples)
mask = s==0
test_indices = np.arange(n_samples)[mask]
return train_indices,test_indices
def kn(X,Y):
d = np.dot(X,Y)
dx = np.sqrt(np.dot(X,X))
dy = np.sqrt(np.dot(Y,Y))
if(dx*dy==0):
print(X,Y)
k = pow((d/(dx*dy)+1),3)
return k
def similarity(X):
n_samples = X.shape[0]
dis = np.zeros((n_samples, n_samples))
for i in range(n_samples):
dis[i][i] = 0
for i in range(n_samples):
for j in range(i + 1, n_samples):
dis[i][j] = dis[j][i] = kn(X[i],X[j])
return dis
def Lsvm_patatune(train_x,train_y):
tuned_parameters = [
{'kernel': ['precomputed'], 'C': [0.01, 0.1, 1, 10, 100, 1000]}]
clf = GridSearchCV(SVC(C=1, probability=True), tuned_parameters, cv=5, n_jobs=1
) # SVC(probability=True)#SVC(kernel="linear", probability=True)
clf.fit(train_x, train_y)
return clf.best_params_['C']
url = './MyData.csv'
dataframe = pandas.read_csv(url)#, header=None)
array = dataframe.values
X = array[:,1:]
Y = pandas.read_csv('./MyDatalabel.csv')
Y = Y.values
Y = Y[:,1:]
Y = Y.transpose()
Y = np.ravel(Y)
n_samples = X.shape[0]
n_features = X.shape[1]
for i in range(len(Y)):
if Y[i] == 4:
Y[i]=1
#X = min_max_scaler.fit_transform(X)
#X1_features = similarity(X[:, 0:2])
#X2_features = similarity(X[:, 2:21])
#X3_features = similarity(X[:, 21:])
"""
e1 = []
X1_features = polynomial_kernel(X[:, 0:2])+linear_kernel(X[:, 0:2])+rbf_kernel(X[:, 0:2])+laplacian_kernel(X[:, 0:2])
X2_features = linear_kernel(X[:, 2:21])+polynomial_kernel(X[:, 2:21])+rbf_kernel(X[:, 2:21])+laplacian_kernel(X[:, 2:21])
X3_features = linear_kernel(X[:, 21:])+polynomial_kernel(X[:, 21:])+rbf_kernel(X[:, 21:])+laplacian_kernel(X[:, 21:])
X_features = (X1_features + X2_features + X3_features)
for l in range(10):
train_indices, test_indices = splitdata(X=X, Y=Y, ratio=0.7, seed=1000 + l)
X_features1 = np.transpose(X_features)
X_features2 = X_features1[train_indices]
X_features3 = np.transpose(X_features2)
clf = SVC(kernel='precomputed')
clf.fit(X_features3[train_indices], Y[train_indices])
e1.append(clf.score(X_features3[test_indices], Y[test_indices]))
s = "combination of %d_%d_%d" % (l, l, l)
if np.mean(e1) > big:
big = np.mean(e1)
print(np.mean(e1))
print(s)
testfile.write(s + ":%f" % (np.mean(e1)) + '\n')
"""
min_max_scaler = preprocessing.MinMaxScaler()
svm_X = min_max_scaler.fit_transform(X)
min_max_scaler = preprocessing.MinMaxScaler()
X_new = X
X = min_max_scaler.fit_transform(X)
for r in range(3):
if r==0:
R = 0.3
elif r==1:
R = 0.5
else:
R = 0.7
testfile = open("AMKLcombinationTest%f.txt" % R, 'w')
big = 0
mm = ""
err = 0
for i in range(5):
for j in range(5):
for k in range(5):
if(i==0):
X1_features = polynomial_kernel(X[:, 0:2])
elif(i==1):
X1_features = linear_kernel(X[:, 0:2])
elif(i==2):
X1_features = rbf_kernel(X[:, 0:2])
elif(i==3):
X1_features = laplacian_kernel(X[:, 0:2])
elif (i == 4):
X1_features = similarity(X_new[:, 0:2])
if (j == 0):
X2_features = polynomial_kernel(X[:, 2:21])
elif (j == 1):
X2_features = linear_kernel(X[:, 2:21])
elif (j == 2):
X2_features = rbf_kernel(X[:, 2:21])
elif (j == 3):
X2_features = laplacian_kernel(X[:, 2:21])
elif (j == 4):
X2_features = similarity(X_new[:, 2:21])
if (k == 0):
X3_features = polynomial_kernel(X[:, 21:])
elif (k == 1):
X3_features = linear_kernel(X[:, 21:])
elif (k == 2):
X3_features = rbf_kernel(X[:, 21:])
elif (k == 3):
X3_features = laplacian_kernel(X[:, 21:])
elif (k == 4):
X3_features = similarity(X_new[:, 21:])
X_features = (X1_features + X2_features + X3_features)
e1 = []
for l in range(10):
train_indices, test_indices = splitdata(X=X, Y=Y, ratio=R, seed=1000 + l)
X_features1 = np.transpose(X_features)
X_features2 = X_features1[train_indices]
X_features3 = np.transpose(X_features2)
c = Lsvm_patatune(train_x=X_features3[train_indices], train_y=Y[train_indices])
print(c)
clf = SVC(C=c,kernel='precomputed')
clf.fit(X_features3[train_indices], Y[train_indices])
e1.append(clf.score(X_features3[test_indices], Y[test_indices]))
s = "combination of %d_%d_%d"%(i,j,k)
if np.mean(e1)>big:
big = np.mean(e1)
print(np.mean(e1))
print(s)
mm=s
err = big
std = np.std(e1)
testfile.write(s + ":%f \p %f" % (np.mean(e1), np.std(e1)) + '\n')
testfile.write("best peformance is" + mm + ":%f \p %f" % (err, std) + '\n')
testfile.close()
| apache-2.0 |
vshtanko/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
faroit/mir_eval | tests/test_display.py | 4 | 11202 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''Unit tests for the display module'''
# For testing purposes, clobber the rcfile
import matplotlib
matplotlib.use('Agg') # nopep8
import matplotlib.pyplot as plt
import numpy as np
# Import the hacked image comparison module
from mpl_ic import image_comparison
from nose.tools import raises
# We'll make a decorator to handle style contexts
from decorator import decorator
import mir_eval
import mir_eval.display
from mir_eval.io import load_labeled_intervals
from mir_eval.io import load_valued_intervals
from mir_eval.io import load_labeled_events
from mir_eval.io import load_ragged_time_series
from mir_eval.io import load_wav
@decorator
def styled(f, *args, **kwargs):
matplotlib.rcdefaults()
return f(*args, **kwargs)
@image_comparison(baseline_images=['segment'], extensions=['png'])
@styled
def test_display_segment():
plt.figure()
# Load some segment data
intervals, labels = load_labeled_intervals('data/segment/ref00.lab')
# Plot the segments with no labels
mir_eval.display.segments(intervals, labels, text=False)
# Draw a legend
plt.legend()
@image_comparison(baseline_images=['segment_text'], extensions=['png'])
@styled
def test_display_segment_text():
plt.figure()
# Load some segment data
intervals, labels = load_labeled_intervals('data/segment/ref00.lab')
# Plot the segments with no labels
mir_eval.display.segments(intervals, labels, text=True)
@image_comparison(baseline_images=['labeled_intervals'], extensions=['png'])
@styled
def test_display_labeled_intervals():
plt.figure()
# Load some chord data
intervals, labels = load_labeled_intervals('data/chord/ref01.lab')
# Plot the chords with nothing fancy
mir_eval.display.labeled_intervals(intervals, labels)
@image_comparison(baseline_images=['labeled_intervals_noextend'],
extensions=['png'])
@styled
def test_display_labeled_intervals_noextend():
plt.figure()
# Load some chord data
intervals, labels = load_labeled_intervals('data/chord/ref01.lab')
# Plot the chords with nothing fancy
ax = plt.axes()
ax.set_yticklabels([])
mir_eval.display.labeled_intervals(intervals, labels,
label_set=[],
extend_labels=False,
ax=ax)
@image_comparison(baseline_images=['labeled_intervals_compare'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
# Plot reference and estimates using label set extension
mir_eval.display.labeled_intervals(ref_int, ref_labels,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_intervals_compare_noextend'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare_noextend():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
# Plot reference and estimate, but only use the reference labels
mir_eval.display.labeled_intervals(ref_int, ref_labels,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
extend_labels=False,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_intervals_compare_common'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare_common():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
label_set = list(sorted(set(ref_labels) | set(est_labels)))
# Plot reference and estimate with a common label set
mir_eval.display.labeled_intervals(ref_int, ref_labels,
label_set=label_set,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
label_set=label_set,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['hierarchy_nolabel'], extensions=['png'])
@styled
def test_display_hierarchy_nolabel():
plt.figure()
# Load some chord data
int0, lab0 = load_labeled_intervals('data/hierarchy/ref00.lab')
int1, lab1 = load_labeled_intervals('data/hierarchy/ref01.lab')
# Plot reference and estimate with a common label set
mir_eval.display.hierarchy([int0, int1],
[lab0, lab1])
plt.legend()
@image_comparison(baseline_images=['hierarchy_label'], extensions=['png'])
@styled
def test_display_hierarchy_label():
plt.figure()
# Load some chord data
int0, lab0 = load_labeled_intervals('data/hierarchy/ref00.lab')
int1, lab1 = load_labeled_intervals('data/hierarchy/ref01.lab')
# Plot reference and estimate with a common label set
mir_eval.display.hierarchy([int0, int1],
[lab0, lab1],
levels=['Large', 'Small'])
plt.legend()
@image_comparison(baseline_images=['pitch_hz'], extensions=['png'])
@styled
def test_pitch_hz():
plt.figure()
ref_times, ref_freqs = load_labeled_events('data/melody/ref00.txt')
est_times, est_freqs = load_labeled_events('data/melody/est00.txt')
# Plot pitches on a Hz scale
mir_eval.display.pitch(ref_times, ref_freqs, unvoiced=True,
label='Reference')
mir_eval.display.pitch(est_times, est_freqs, unvoiced=True,
label='Estimate')
plt.legend()
@image_comparison(baseline_images=['pitch_midi'], extensions=['png'])
@styled
def test_pitch_midi():
plt.figure()
times, freqs = load_labeled_events('data/melody/ref00.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.pitch(times, freqs, midi=True)
mir_eval.display.ticker_notes()
@image_comparison(baseline_images=['pitch_midi_hz'], extensions=['png'])
@styled
def test_pitch_midi_hz():
plt.figure()
times, freqs = load_labeled_events('data/melody/ref00.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.pitch(times, freqs, midi=True)
mir_eval.display.ticker_pitch()
@image_comparison(baseline_images=['multipitch_hz_unvoiced'],
extensions=['png'])
@styled
def test_multipitch_hz_unvoiced():
plt.figure()
times, pitches = load_ragged_time_series('data/multipitch/est01.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.multipitch(times, pitches, midi=False, unvoiced=True)
@image_comparison(baseline_images=['multipitch_hz_voiced'], extensions=['png'])
@styled
def test_multipitch_hz_voiced():
plt.figure()
times, pitches = load_ragged_time_series('data/multipitch/est01.txt')
mir_eval.display.multipitch(times, pitches, midi=False, unvoiced=False)
@image_comparison(baseline_images=['multipitch_midi'], extensions=['png'])
@styled
def test_multipitch_midi():
plt.figure()
ref_t, ref_p = load_ragged_time_series('data/multipitch/ref01.txt')
est_t, est_p = load_ragged_time_series('data/multipitch/est01.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.multipitch(ref_t, ref_p, midi=True,
alpha=0.5, label='Reference')
mir_eval.display.multipitch(est_t, est_p, midi=True,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['piano_roll'], extensions=['png'])
@styled
def test_pianoroll():
plt.figure()
ref_t, ref_p = load_valued_intervals('data/transcription/ref04.txt')
est_t, est_p = load_valued_intervals('data/transcription/est04.txt')
mir_eval.display.piano_roll(ref_t, ref_p,
label='Reference', alpha=0.5)
mir_eval.display.piano_roll(est_t, est_p,
label='Estimate', alpha=0.5, facecolor='r')
plt.legend()
@image_comparison(baseline_images=['piano_roll_midi'], extensions=['png'])
@styled
def test_pianoroll_midi():
plt.figure()
ref_t, ref_p = load_valued_intervals('data/transcription/ref04.txt')
est_t, est_p = load_valued_intervals('data/transcription/est04.txt')
ref_midi = mir_eval.util.hz_to_midi(ref_p)
est_midi = mir_eval.util.hz_to_midi(est_p)
mir_eval.display.piano_roll(ref_t, midi=ref_midi,
label='Reference', alpha=0.5)
mir_eval.display.piano_roll(est_t, midi=est_midi,
label='Estimate', alpha=0.5, facecolor='r')
plt.legend()
@image_comparison(baseline_images=['ticker_midi_zoom'], extensions=['png'])
@styled
def test_ticker_midi_zoom():
plt.figure()
plt.plot(np.arange(3))
mir_eval.display.ticker_notes()
@image_comparison(baseline_images=['separation'], extensions=['png'])
@styled
def test_separation():
plt.figure()
x0, fs = load_wav('data/separation/ref05/0.wav')
x1, fs = load_wav('data/separation/ref05/1.wav')
x2, fs = load_wav('data/separation/ref05/2.wav')
mir_eval.display.separation([x0, x1, x2], fs=fs)
@image_comparison(baseline_images=['separation_label'], extensions=['png'])
@styled
def test_separation_label():
plt.figure()
x0, fs = load_wav('data/separation/ref05/0.wav')
x1, fs = load_wav('data/separation/ref05/1.wav')
x2, fs = load_wav('data/separation/ref05/2.wav')
mir_eval.display.separation([x0, x1, x2], fs=fs,
labels=['Alice', 'Bob', 'Carol'])
plt.legend()
@image_comparison(baseline_images=['events'], extensions=['png'])
@styled
def test_events():
plt.figure()
# Load some event data
beats_ref = mir_eval.io.load_events('data/beat/ref00.txt')[:30]
beats_est = mir_eval.io.load_events('data/beat/est00.txt')[:30]
# Plot both with labels
mir_eval.display.events(beats_ref, label='reference')
mir_eval.display.events(beats_est, label='estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_events'], extensions=['png'])
@styled
def test_labeled_events():
plt.figure()
# Load some event data
beats_ref = mir_eval.io.load_events('data/beat/ref00.txt')[:10]
labels = list('abcdefghijklmnop')
# Plot both with labels
mir_eval.display.events(beats_ref, labels)
@raises(ValueError)
def test_pianoroll_nopitch_nomidi():
# Issue 214
mir_eval.display.piano_roll([[0, 1]])
| mit |
MatthieuBizien/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 55 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
rubikloud/scikit-learn | examples/decomposition/plot_pca_iris.py | 2 | 1805 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[[0]])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[[1]])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
balajiln/mondrianforest | src/mondrianforest.py | 1 | 72853 | #!/usr/bin/env python
#
# Example usage:
#
# NOTE:
# optype=real: Gaussian parametrization uses a non-linear transformation of split times
# variance should decrease as split_time increases:
# variance at node j = variance_coef * (sigmoid(sigmoid_coef * t_j) - sigmoid(sigmoid_coef * t_{parent(j)}))
# non-linear transformation should be a monotonically non-decreasing function
# sigmoid has a saturation effect: children will be similar to parent as we go down the tree
# split times t_j scales inversely with the number of dimensions
import sys
import os
import optparse
import math
import time
import cPickle as pickle
import random
import pprint as pp
import numpy as np
from warnings import warn
from utils import hist_count, logsumexp, softmax, sample_multinomial, \
sample_multinomial_scores, empty, assert_no_nan, check_if_zero, check_if_one, \
multiply_gaussians, divide_gaussians, sigmoid, logsumexp_array
from mondrianforest_utils import Forest, Param, parser_add_common_options, parser_check_common_options, \
bootstrap, parser_add_mf_options, parser_check_mf_options, reset_random_seed, \
load_data, add_stuff_2_settings, compute_gaussian_pdf, compute_gaussian_logpdf, \
get_filename_mf, precompute_minimal, compute_left_right_statistics, \
create_prediction_tree, init_prediction_tree, update_predictive_posterior_node, \
compute_metrics_classification, compute_metrics_regression, \
update_posterior_node_incremental, init_update_posterior_node_incremental
from itertools import izip, count, chain
from collections import defaultdict
try:
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib import rc
rc('font', **{'family':'serif'})
rc('text', usetex=True)
rc('legend', handlelength=4)
rc('legend', **{'fontsize':9})
except:
warn('matplotlib not loaded: plotting not possible; set draw_mondrian=0')
try:
import pydot
except:
warn('pydot not loaded: tree will not be printed; set draw_mondrian=0')
# setting numpy options to debug RuntimeWarnings
#np.seterr(divide='raise')
np.seterr(divide='ignore') # to avoid warnings for np.log(0)
np.seterr(invalid='ignore') # to avoid warnings for inf * 0 = nan
np.set_printoptions(precision=3)
np.set_printoptions(linewidth=200)
# color scheme for mondrian
# colors_list = ['DarkRed', 'Navy', 'DimGray', 'Beige']
# other nice colors: Beige, MediumBlue, DarkRed vs FireBrick
colors_list = ['LightGray'] # paused leaf will always be shaded gray
LW = 2
FIGSIZE = (12, 9)
INF = np.inf
def process_command_line():
parser = parser_add_common_options()
parser = parser_add_mf_options(parser)
settings, args = parser.parse_args()
add_stuff_2_settings(settings)
if settings.optype == 'class':
settings.alpha = 0 # normalized stable prior
assert settings.smooth_hierarchically
parser_check_common_options(parser, settings)
parser_check_mf_options(parser, settings)
if settings.budget < 0:
settings.budget_to_use = INF
else:
settings.budget_to_use = settings.budget
return settings
class MondrianBlock(object):
"""
defines Mondrian block
variables:
- min_d : dimension-wise min of training data in current block
- max_d : dimension-wise max of training data in current block
- range_d : max_d - min_d
- sum_range_d : sum of range_d
- left : id of left child
- right : id of right child
- parent : id of parent
- is_leaf : boolen variable to indicate if current block is leaf
- budget : remaining lifetime for subtree rooted at current block
= lifetime of Mondrian - time of split of parent
NOTE: time of split of parent of root node is 0
"""
def __init__(self, data, settings, budget, parent, range_stats):
self.min_d, self.max_d, self.range_d, self.sum_range_d = range_stats
self.budget = budget + 0.
self.parent = parent
self.left = None
self.right = None
self.is_leaf = True
class MondrianTree(object):
"""
defines a Mondrian tree
variables:
- node_info : stores splits for internal nodes
- root : id of root node
- leaf_nodes : list of leaf nodes
- non_leaf_nodes: list of non-leaf nodes
- max_split_costs : max_split_cost for a node is time of split of node - time of split of parent
max_split_cost is drawn from an exponential
- train_ids : list of train ids stored for paused Mondrian blocks
- counts : stores histogram of labels at each node (when optype = 'class')
- grow_nodes : list of Mondrian blocks that need to be "grown"
functions:
- __init__ : initialize a Mondrian tree
- grow : samples Mondrian block (more precisely, restriction of blocks to training data)
- extend_mondrian : extend a Mondrian to include new training data
- extend_mondrian_block : conditional Mondrian algorithm
"""
def __init__(self, data=None, train_ids=None, settings=None, param=None, cache=None):
"""
initialize Mondrian tree data structure and sample restriction of Mondrian tree to current training data
data is a N x D numpy array containing the entire training data
train_ids is the training ids of the first minibatch
"""
if data is None:
return
root_node = MondrianBlock(data, settings, settings.budget_to_use, None, \
get_data_range(data, train_ids))
self.root = root_node
self.non_leaf_nodes = []
self.leaf_nodes = []
self.node_info = {}
self.max_split_costs = {}
self.split_times = {}
self.train_ids = {root_node: train_ids}
self.copy_params(param, settings)
init_prediction_tree(self, settings)
if cache:
if settings.optype == 'class':
self.counts = {root_node: cache['y_train_counts']}
else:
self.sum_y = {root_node: cache['sum_y']}
self.sum_y2 = {root_node: cache['sum_y2']}
self.n_points = {root_node: cache['n_points']}
if settings.bagging == 1 or settings.n_minibatches > 1:
init_update_posterior_node_incremental(self, data, param, settings, cache, root_node, train_ids)
self.grow_nodes = [root_node]
self.grow(data, settings, param, cache)
def copy_params(self, param, settings):
if settings.optype == 'real':
self.noise_variance = param.noise_variance + 0
self.noise_precision = param.noise_precision + 0
self.sigmoid_coef = param.sigmoid_coef + 0
self.variance_coef = param.variance_coef + 0
def get_average_depth(self, settings, data):
"""
compute average depth of tree (averaged over training data)
= depth of a leaf weighted by fraction of training data at that leaf
"""
self.depth_nodes = {self.root: 0}
tmp_node_list = [self.root]
n_total = 0.
average_depth = 0.
self.node_size_by_depth = defaultdict(list)
leaf_node_sizes = []
while True:
try:
node_id = tmp_node_list.pop(0)
except IndexError:
break
if node_id.is_leaf:
if settings.optype == 'class':
n_points_node = np.sum(self.counts[node_id])
else:
n_points_node = self.n_points[node_id]
n_total += n_points_node
average_depth += n_points_node * self.depth_nodes[node_id]
self.node_size_by_depth[self.depth_nodes[node_id]].append(node_id.sum_range_d)
if not node_id.is_leaf:
self.depth_nodes[node_id.left] = self.depth_nodes[node_id] + 1
self.depth_nodes[node_id.right] = self.depth_nodes[node_id] + 1
tmp_node_list.extend([node_id.left, node_id.right])
else:
leaf_node_sizes.append(node_id.sum_range_d)
assert data['n_train'] == int(n_total)
average_depth /= n_total
average_leaf_node_size = np.mean(leaf_node_sizes)
average_node_size_by_depth = {}
for k in self.node_size_by_depth:
average_node_size_by_depth[k] = np.mean(self.node_size_by_depth[k])
return (average_depth, average_leaf_node_size, average_node_size_by_depth)
def get_print_label_draw_tree(self, node_id, graph):
"""
helper function for draw_tree using pydot
"""
name = self.node_ids_print[node_id]
name2 = name
if name2 == '':
name2 = 'e'
if node_id.is_leaf:
op = name
else:
feat_id, split = self.node_info[node_id]
op = r'x_%d > %.2f\nt = %.2f' % (feat_id+1, split, self.cumulative_split_costs[node_id])
if op == '':
op = 'e'
node = pydot.Node(name=name2, label=op) # latex labels don't work
graph.add_node(node)
return (name2, graph)
def draw_tree(self, data, settings, figure_id=0, i_t=0):
"""
function to draw Mondrian tree using pydot
NOTE: set ADD_TIME=True if you want want set edge length between parent and child
to the difference in time of splits
"""
self.gen_node_ids_print()
self.gen_cumulative_split_costs_only(settings, data)
graph = pydot.Dot(graph_type='digraph')
dummy, graph = self.get_print_label_draw_tree(self.root, graph)
ADD_TIME = False
for node_id in self.non_leaf_nodes:
parent, graph = self.get_print_label_draw_tree(node_id, graph)
left, graph = self.get_print_label_draw_tree(node_id.left, graph)
right, graph = self.get_print_label_draw_tree(node_id.right, graph)
for child, child_id in izip([left, right], [node_id.left, node_id.right]):
edge = pydot.Edge(parent, child)
if ADD_TIME and (not child_id.is_leaf):
edge.set_minlen(self.max_split_costs[child_id])
edge2 = pydot.Edge(dummy, child)
edge2.set_minlen(self.cumulative_split_costs[child_id])
edge2.set_style('invis')
graph.add_edge(edge2)
graph.add_edge(edge)
filename_plot_tag = get_filename_mf(settings)[:-2]
if settings.save:
tree_name = filename_plot_tag + '-mtree_minibatch-' + str(figure_id) + '.pdf'
print 'saving file: %s' % tree_name
graph.write_pdf(tree_name)
def draw_mondrian(self, data, settings, figure_id=None, i_t=0):
"""
function to draw Mondrian partitions; each Mondrian tree is one subplot.
"""
assert data['n_dim'] == 2 and settings.normalize_features == 1 \
and settings.n_mondrians <= 10
self.gen_node_list()
if settings.n_mondrians == 1 and settings.dataset == 'toy-mf':
self.draw_tree(data, settings, figure_id, i_t)
if settings.n_mondrians > 2:
n_row = 2
else:
n_row = 1
n_col = int(math.ceil(settings.n_mondrians / n_row))
if figure_id is None:
figure_id = 0
fig = plt.figure(figure_id)
plt.hold(True)
ax = plt.subplot(n_row, n_col, i_t+1, aspect='equal')
EPS = 0.
ax.set_xlim(xmin=0-EPS)
ax.set_xlim(xmax=1+EPS)
ax.set_ylim(ymin=0-EPS)
ax.set_ylim(ymax=1+EPS)
ax.autoscale(False)
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
non_leaf_nodes = [self.root]
while non_leaf_nodes:
node_id = non_leaf_nodes.pop(0)
try:
feat_id, split = self.node_info[node_id]
except:
continue
left, right = node_id.left, node_id.right
non_leaf_nodes.append(left)
non_leaf_nodes.append(right)
EXTRA = 0.0 # to show splits that separate 2 data points
if feat_id == 1:
# axhline doesn't work if you rescale
ax.hlines(split, node_id.min_d[0] - EXTRA, node_id.max_d[0] + EXTRA, lw=LW, color='k')
else:
ax.vlines(split, node_id.min_d[1] - EXTRA, node_id.max_d[1] + EXTRA, lw=LW, color='k')
# add "outer patch" that defines the extent (not data dependent)
block = patches.Rectangle((0, 0), 1, 1, facecolor='white', edgecolor='gray', ls='dashed')
ax.add_patch(block)
for i_, node_id in enumerate(self.node_list):
# plot only the block where Mondrian has been induced (limited by extent of training data)
block = patches.Rectangle((node_id.min_d[0], node_id.min_d[1]), node_id.range_d[0], \
node_id.range_d[1], facecolor='white', edgecolor='gray')
ax.add_patch(block)
for i_, node_id in enumerate(self.leaf_nodes):
# plot only the block where Mondrian has been induced (limited by extent of training data)
block = patches.Rectangle((node_id.min_d[0], node_id.min_d[1]), node_id.range_d[0], \
node_id.range_d[1], facecolor=colors_list[i_ % len(colors_list)], edgecolor='black')
ax.add_patch(block)
# zorder = 1 will make points inside the blocks invisible, >= 2 will make them visible
x_train = data['x_train'][self.train_ids[node_id], :]
#ax.scatter(x_train[:, 0], x_train[:, 1], color='k', marker='x', s=10, zorder=2)
color_y = 'rbk'
for y_ in range(data['n_class']):
idx = data['y_train'][self.train_ids[node_id]] == y_
ax.scatter(x_train[idx, 0], x_train[idx, 1], color=color_y[y_], marker='o', s=16, zorder=2)
plt.draw()
def gen_node_ids_print(self):
"""
generate binary string label for each node
root_node is denoted by empty string "e"
all other node labels are defined as follows: left(j) = j0, right(j) = j1
e.g. left and right child of root_node are 0 and 1 respectively,
left and right of node 0 are 00 and 01 respectively and so on.
"""
node_ids = [self.root]
self.node_ids_print = {self.root: ''}
while node_ids:
node_id = node_ids.pop(0)
try:
feat_id, split = self.node_info[node_id]
left, right = node_id.left, node_id.right
node_ids.append(left)
node_ids.append(right)
self.node_ids_print[left] = self.node_ids_print[node_id] + '0'
self.node_ids_print[right] = self.node_ids_print[node_id] + '1'
except KeyError:
continue
def print_dict(self, d):
"""
print a dictionary
"""
for k in d:
print '\tk_map = %10s, val = %s' % (self.node_ids_print[k], d[k])
def print_list(self, list_):
"""
print a list
"""
print '\t%s' % ([self.node_ids_print[x] for x in list_])
def print_tree(self, settings):
"""
prints some tree statistics: leaf nodes, non-leaf nodes, information and so on
"""
self.gen_node_ids_print()
print 'printing tree:'
print 'len(leaf_nodes) = %s, len(non_leaf_nodes) = %s' \
% (len(self.leaf_nodes), len(self.non_leaf_nodes))
print 'node_info ='
node_ids = [self.root]
while node_ids:
node_id = node_ids.pop(0)
node_id_print = self.node_ids_print[node_id]
try:
feat_id, split = self.node_info[node_id]
print '%10s, feat = %5d, split = %.2f, node_id = %s' % \
(node_id_print, feat_id, split, node_id)
if settings.optype == 'class':
print 'counts = %s' % self.counts[node_id]
else:
print 'n_points = %6d, sum_y = %.2f' % (self.n_points[node_id], self.sum_y[node_id])
left, right = node_id.left, node_id.right
node_ids.append(left)
node_ids.append(right)
except KeyError:
continue
print 'leaf info ='
for node_id in self.leaf_nodes:
node_id_print = self.node_ids_print[node_id]
print '%10s, train_ids = %s, node_id = %s' % \
(node_id_print, self.train_ids[node_id], node_id)
if settings.optype == 'class':
print 'counts = %s' % self.counts[node_id]
else:
print 'n_points = %6d, sum_y = %.2f' % (self.n_points[node_id], self.sum_y[node_id])
def check_if_labels_same(self, node_id):
"""
checks if all labels in a node are identical
"""
return np.count_nonzero(self.counts[node_id]) == 1
def pause_mondrian(self, node_id, settings):
"""
should you pause a Mondrian block or not?
pause if sum_range_d == 0 (important for handling duplicates) or
- optype == class: pause if all labels in a node are identical
- optype == real: pause if n_points < min_samples_split
"""
if settings.optype == 'class':
#pause_mondrian_tmp = self.check_if_labels_same(node_id)
if self.check_if_labels_same(node_id):
pause_mondrian_tmp = True
else:
pause_mondrian_tmp = (np.sum(self.counts[node_id]) < settings.min_samples_split)
else:
pause_mondrian_tmp = self.n_points[node_id] < settings.min_samples_split
pause_mondrian = pause_mondrian_tmp or (node_id.sum_range_d == 0)
return pause_mondrian
def get_parent_split_time(self, node_id, settings):
if node_id == self.root:
return 0.
else:
return self.split_times[node_id.parent]
def update_gaussian_hyperparameters(self, param, data, settings):
n_points = float(self.n_points[self.root])
param.prior_mean = self.sum_y[self.root] / n_points
param.prior_variance = self.sum_y2[self.root] / n_points \
- param.prior_mean ** 2
param.prior_precision = 1.0 / param.prior_variance
# TODO: estimate K using estimate of noise variance at leaf nodes?
# TODO: need to do this once for forest, rather than for each tree
# FIXME very very hacky, surely a better way to tune this?
if 'sfactor' in settings.tag:
s_begin = settings.tag.find('sfactor-') + 8
s_tmp = settings.tag[s_begin:]
s_factor = float(s_tmp[:s_tmp.find('-')])
else:
s_factor = 2.0
if 'kfactor' in settings.tag:
k_begin = settings.tag.find('kfactor-') + 8
k_tmp = settings.tag[k_begin:]
k_factor = float(k_tmp[:k_tmp.find('-')])
else:
k_factor = min(2 * n_points, 500) # noise variance is 1/K times prior_variance
if k_factor <= 0.:
K = 2. * n_points
else:
K = k_factor
param.noise_variance = param.prior_variance / K
param.noise_precision = 1.0 / param.noise_variance
param.variance_coef = 2.0 * param.prior_variance * K / (K + 2.)
param.sigmoid_coef = data['n_dim'] / (s_factor * np.log2(n_points))
# FIXME: important to copy over since prediction accesses hyperparameters in self
self.copy_params(param, settings)
def get_node_mean_and_variance(self, node):
n_points = float(self.n_points[node])
node_mean = self.sum_y[node] / n_points
node_variance = self.sum_y2[node] / n_points - node_mean ** 2
return (node_mean, node_variance)
def update_gaussian_hyperparameters_indep(self, param, data, settings):
n_points = float(self.n_points[self.root])
self.prior_mean, self.prior_variance = self.get_node_mean_and_variance(self.root)
self.prior_precision = 1.0 / self.prior_variance
self.cumulative_split_costs = {}
self.leaf_means = []
self.leaf_variances = []
node_means = []
d_node_means = {self.root: self.prior_mean}
node_parent_means = []
node_split_times = []
node_parent_split_times = []
if self.root.is_leaf:
self.cumulative_split_costs[self.root] = 0.
remaining = []
self.max_split_time = 0.1 # NOTE: initial value, need to specify non-zero value
else:
self.cumulative_split_costs[self.root] = self.max_split_costs[self.root]
remaining = [self.root.left, self.root.right]
self.max_split_time = self.cumulative_split_costs[self.root] + 0
node_split_times.append(self.cumulative_split_costs[self.root])
node_parent_split_times.append(0.)
node_means.append(self.prior_mean)
node_parent_means.append(self.prior_mean)
while True:
try:
node_id = remaining.pop(0)
except IndexError:
break
self.cumulative_split_costs[node_id] = self.cumulative_split_costs[node_id.parent] \
+ self.max_split_costs[node_id]
node_mean, node_variance = self.get_node_mean_and_variance(node_id)
node_split_times.append(self.cumulative_split_costs[node_id])
node_parent_split_times.append(self.cumulative_split_costs[node_id.parent])
node_means.append(node_mean)
node_parent_means.append(d_node_means[node_id.parent])
d_node_means[node_id] = node_mean
if not node_id.is_leaf:
remaining.append(node_id.left)
remaining.append(node_id.right)
self.max_split_time = max(self.max_split_time, self.cumulative_split_costs[node_id])
else:
self.leaf_means.append(node_mean)
self.leaf_variances.append(node_variance)
#self.noise_variance = np.max(self.leaf_variances)
self.noise_variance = np.mean(self.leaf_variances)
self.noise_precision = 1.0 / self.noise_variance
self.sigmoid_coef = 3. / self.max_split_time
#self.sigmoid_coef = data['n_dim']
#self.sigmoid_coef = data['n_dim'] / 5
#self.sigmoid_coef = data['n_dim'] / (2. * np.log2(n_points))
#self.sigmoid_coef = data['n_dim'] / (2. * np.log2(n_points))
#self.sigmoid_coef = data['n_dim'] / (n_points)
#self.variance_leaf_from_root = 2 * np.mean((np.array(self.leaf_means) - self.prior_mean) ** 2)
# set sd to 3 times the empirical sd so that leaf node means are highly plausible (avoid too much smoothing)
#self.variance_coef = 1.0 * self.variance_leaf_from_root
if self.root.is_leaf:
self.variance_coef = 1.0
else:
node_means = np.array(node_means)
node_parent_means = np.array(node_parent_means)
node_split_times = np.array(node_split_times)
node_parent_split_times = np.array(node_parent_split_times)
tmp_den = sigmoid(self.sigmoid_coef * node_split_times) \
- sigmoid(self.sigmoid_coef * node_parent_split_times)
tmp_num = (node_means - node_parent_means) ** 2
variance_coef_est = np.mean(tmp_num / tmp_den)
self.variance_coef = variance_coef_est
print 'sigmoid_coef = %.3f, variance_coef = %.3f' % (self.sigmoid_coef, variance_coef_est)
def grow(self, data, settings, param, cache):
"""
sample a Mondrian tree (each Mondrian block is restricted to range of training data in that block)
"""
if settings.debug:
print 'entering grow'
while self.grow_nodes:
node_id = self.grow_nodes.pop(0)
train_ids = self.train_ids[node_id]
if settings.debug:
print 'node_id = %s' % node_id
pause_mondrian = self.pause_mondrian(node_id, settings)
if settings.debug and pause_mondrian:
print 'pausing mondrian at node = %s, train_ids = %s' % (node_id, self.train_ids[node_id])
if pause_mondrian or (node_id.sum_range_d == 0): # BL: redundant now
split_cost = np.inf
self.max_split_costs[node_id] = node_id.budget + 0
self.split_times[node_id] = np.inf # FIXME: is this correct? inf or budget?
else:
split_cost = random.expovariate(node_id.sum_range_d)
self.max_split_costs[node_id] = split_cost
self.split_times[node_id] = split_cost + self.get_parent_split_time(node_id, settings)
new_budget = node_id.budget - split_cost
if node_id.budget > split_cost:
feat_id_chosen = sample_multinomial_scores(node_id.range_d)
split_chosen = random.uniform(node_id.min_d[feat_id_chosen], \
node_id.max_d[feat_id_chosen])
(train_ids_left, train_ids_right, cache_tmp) = \
compute_left_right_statistics(data, param, cache, train_ids, feat_id_chosen, split_chosen, settings)
left = MondrianBlock(data, settings, new_budget, node_id, get_data_range(data, train_ids_left))
right = MondrianBlock(data, settings, new_budget, node_id, get_data_range(data, train_ids_right))
node_id.left, node_id.right = left, right
self.grow_nodes.append(left)
self.grow_nodes.append(right)
self.train_ids[left] = train_ids_left
self.train_ids[right] = train_ids_right
if settings.optype == 'class':
self.counts[left] = cache_tmp['cnt_left_chosen']
self.counts[right] = cache_tmp['cnt_right_chosen']
else:
self.sum_y[left] = cache_tmp['sum_y_left']
self.sum_y2[left] = cache_tmp['sum_y2_left']
self.n_points[left] = cache_tmp['n_points_left']
self.sum_y[right] = cache_tmp['sum_y_right']
self.sum_y2[right] = cache_tmp['sum_y2_right']
self.n_points[right] = cache_tmp['n_points_right']
self.node_info[node_id] = [feat_id_chosen, split_chosen]
self.non_leaf_nodes.append(node_id)
node_id.is_leaf = False
if not settings.draw_mondrian:
self.train_ids.pop(node_id)
else:
self.leaf_nodes.append(node_id) # node_id.is_leaf set to True at init
def gen_cumulative_split_costs_only(self, settings, data):
"""
creates node_id.cumulative_split_cost as well as a dictionary self.cumulative_split_costs
helper function for draw_tree
"""
self.cumulative_split_costs = {}
if self.root.is_leaf:
self.cumulative_split_costs[self.root] = 0.
remaining = []
else:
self.cumulative_split_costs[self.root] = self.max_split_costs[self.root]
remaining = [self.root.left, self.root.right]
while True:
try:
node_id = remaining.pop(0)
except IndexError:
break
self.cumulative_split_costs[node_id] = self.cumulative_split_costs[node_id.parent] \
+ self.max_split_costs[node_id]
if not node_id.is_leaf:
remaining.append(node_id.left)
remaining.append(node_id.right)
def gen_node_list(self):
"""
generates an ordered node_list such that parent appears before children
useful for updating predictive posteriors
"""
self.node_list = [self.root]
i = -1
while True:
try:
i += 1
node_id = self.node_list[i]
except IndexError:
break
if not node_id.is_leaf:
self.node_list.extend([node_id.left, node_id.right])
def predict_class(self, x_test, n_class, param, settings):
"""
predict new label (for classification tasks)
"""
pred_prob = np.zeros((x_test.shape[0], n_class))
prob_not_separated_yet = np.ones(x_test.shape[0])
prob_separated = np.zeros(x_test.shape[0])
node_list = [self.root]
d_idx_test = {self.root: np.arange(x_test.shape[0])}
while True:
try:
node_id = node_list.pop(0)
except IndexError:
break
idx_test = d_idx_test[node_id]
if len(idx_test) == 0:
continue
x = x_test[idx_test, :]
expo_parameter = np.maximum(0, node_id.min_d - x).sum(1) + np.maximum(0, x - node_id.max_d).sum(1)
prob_not_separated_now = np.exp(-expo_parameter * self.max_split_costs[node_id])
prob_separated_now = 1 - prob_not_separated_now
if math.isinf(self.max_split_costs[node_id]):
# rare scenario where test point overlaps exactly with a training data point
idx_zero = expo_parameter == 0
# to prevent nan in computation above when test point overlaps with training data point
prob_not_separated_now[idx_zero] = 1.
prob_separated_now[idx_zero] = 0.
# predictions for idx_test_zero
# data dependent discounting (depending on how far test data point is from the mondrian block)
idx_non_zero = expo_parameter > 0
idx_test_non_zero = idx_test[idx_non_zero]
expo_parameter_non_zero = expo_parameter[idx_non_zero]
base = self.get_prior_mean(node_id, param, settings)
if np.any(idx_non_zero):
num_tables_k, num_customers, num_tables = self.get_counts(self.cnt[node_id])
# expected discount (averaging over time of cut which is a truncated exponential)
# discount = (expo_parameter_non_zero / (expo_parameter_non_zero + settings.discount_param)) * \
# (-np.expm1(-(expo_parameter_non_zero + settings.discount_param) * self.max_split_costs[node_id]))
discount = (expo_parameter_non_zero / (expo_parameter_non_zero + settings.discount_param)) \
* (-np.expm1(-(expo_parameter_non_zero + settings.discount_param) * self.max_split_costs[node_id])) \
/ (-np.expm1(-expo_parameter_non_zero * self.max_split_costs[node_id]))
discount_per_num_customers = discount / num_customers
pred_prob_tmp = num_tables * discount_per_num_customers[:, np.newaxis] * base \
+ self.cnt[node_id] / num_customers - discount_per_num_customers[:, np.newaxis] * num_tables_k
if settings.debug:
check_if_one(np.sum(pred_prob_tmp))
pred_prob[idx_test_non_zero, :] += prob_separated_now[idx_non_zero][:, np.newaxis] \
* prob_not_separated_yet[idx_test_non_zero][:, np.newaxis] * pred_prob_tmp
prob_not_separated_yet[idx_test] *= prob_not_separated_now
# predictions for idx_test_zero
if math.isinf(self.max_split_costs[node_id]) and np.any(idx_zero):
idx_test_zero = idx_test[idx_zero]
pred_prob_node_id = self.compute_posterior_mean_normalized_stable(self.cnt[node_id], \
self.get_discount_node_id(node_id, settings), base, settings)
pred_prob[idx_test_zero, :] += prob_not_separated_yet[idx_test_zero][:, np.newaxis] * pred_prob_node_id
try:
feat_id, split = self.node_info[node_id]
cond = x[:, feat_id] <= split
left, right = get_children_id(node_id)
d_idx_test[left], d_idx_test[right] = idx_test[cond], idx_test[~cond]
node_list.append(left)
node_list.append(right)
except KeyError:
pass
if True or settings.debug:
check_if_zero(np.sum(np.abs(np.sum(pred_prob, 1) - 1)))
return pred_prob
def predict_real(self, x_test, y_test, param, settings):
"""
predict new label (for regression tasks)
"""
pred_mean = np.zeros(x_test.shape[0])
pred_second_moment = np.zeros(x_test.shape[0])
pred_sample = np.zeros(x_test.shape[0])
log_pred_prob = -np.inf * np.ones(x_test.shape[0])
prob_not_separated_yet = np.ones(x_test.shape[0])
prob_separated = np.zeros(x_test.shape[0])
node_list = [self.root]
d_idx_test = {self.root: np.arange(x_test.shape[0])}
while True:
try:
node_id = node_list.pop(0)
except IndexError:
break
idx_test = d_idx_test[node_id]
if len(idx_test) == 0:
continue
x = x_test[idx_test, :]
expo_parameter = np.maximum(0, node_id.min_d - x).sum(1) + np.maximum(0, x - node_id.max_d).sum(1)
prob_not_separated_now = np.exp(-expo_parameter * self.max_split_costs[node_id])
prob_separated_now = 1 - prob_not_separated_now
if math.isinf(self.max_split_costs[node_id]):
# rare scenario where test point overlaps exactly with a training data point
idx_zero = expo_parameter == 0
# to prevent nan in computation above when test point overlaps with training data point
prob_not_separated_now[idx_zero] = 1.
prob_separated_now[idx_zero] = 0.
# predictions for idx_test_zero
idx_non_zero = expo_parameter > 0
idx_test_non_zero = idx_test[idx_non_zero]
n_test_non_zero = len(idx_test_non_zero)
expo_parameter_non_zero = expo_parameter[idx_non_zero]
if np.any(idx_non_zero):
# expected variance (averaging over time of cut which is a truncated exponential)
# NOTE: expected variance is approximate since E[f(x)] not equal to f(E[x])
expected_cut_time = 1.0 / expo_parameter_non_zero
if not np.isinf(self.max_split_costs[node_id]):
tmp_exp_term_arg = -self.max_split_costs[node_id] * expo_parameter_non_zero
tmp_exp_term = np.exp(tmp_exp_term_arg)
expected_cut_time -= self.max_split_costs[node_id] * tmp_exp_term / (-np.expm1(tmp_exp_term_arg))
try:
assert np.all(expected_cut_time >= 0.)
except AssertionError:
print tmp_exp_term_arg
print tmp_exp_term
print expected_cut_time
print np.any(np.isnan(expected_cut_time))
print 1.0 / expo_parameter_non_zero
raise AssertionError
if not settings.smooth_hierarchically:
pred_mean_tmp = self.sum_y[node_id] / float(self.n_points[node_id])
pred_second_moment_tmp = self.sum_y2[node_id] / float(self.n_points[node_id]) + param.noise_variance
else:
pred_mean_tmp, pred_second_moment_tmp = self.pred_moments[node_id]
# FIXME: approximate since E[f(x)] not equal to f(E[x])
expected_split_time = expected_cut_time + self.get_parent_split_time(node_id, settings)
variance_from_mean = self.variance_coef * (sigmoid(self.sigmoid_coef * expected_split_time) \
- sigmoid(self.sigmoid_coef * self.get_parent_split_time(node_id, settings)))
pred_second_moment_tmp += variance_from_mean
pred_variance_tmp = pred_second_moment_tmp - pred_mean_tmp ** 2
pred_sample_tmp = pred_mean_tmp + np.random.randn(n_test_non_zero) * np.sqrt(pred_variance_tmp)
log_pred_prob_tmp = compute_gaussian_logpdf(pred_mean_tmp, pred_variance_tmp, y_test[idx_test_non_zero])
prob_separated_now_weighted = \
prob_separated_now[idx_non_zero] * prob_not_separated_yet[idx_test_non_zero]
pred_mean[idx_test_non_zero] += prob_separated_now_weighted * pred_mean_tmp
pred_sample[idx_test_non_zero] += prob_separated_now_weighted * pred_sample_tmp
pred_second_moment[idx_test_non_zero] += prob_separated_now_weighted * pred_second_moment_tmp
log_pred_prob[idx_test_non_zero] = logsumexp_array(log_pred_prob[idx_test_non_zero], \
np.log(prob_separated_now_weighted) + log_pred_prob_tmp)
prob_not_separated_yet[idx_test] *= prob_not_separated_now
# predictions for idx_test_zero
if math.isinf(self.max_split_costs[node_id]) and np.any(idx_zero):
idx_test_zero = idx_test[idx_zero]
n_test_zero = len(idx_test_zero)
if not settings.smooth_hierarchically:
pred_mean_node_id = self.sum_y[node_id] / float(self.n_points[node_id])
pred_second_moment_node_id = self.sum_y2[node_id] / float(self.n_points[node_id]) \
+ param.noise_variance
else:
pred_mean_node_id, pred_second_moment_node_id = self.pred_moments[node_id]
pred_variance_node_id = pred_second_moment_node_id - pred_mean_node_id ** 2
pred_sample_node_id = pred_mean_node_id + np.random.randn(n_test_zero) * np.sqrt(pred_variance_node_id)
log_pred_prob_node_id = \
compute_gaussian_logpdf(pred_mean_node_id, pred_variance_node_id, y_test[idx_test_zero])
pred_mean[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_mean_node_id
pred_sample[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_sample_node_id
pred_second_moment[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_second_moment_node_id
log_pred_prob[idx_test_zero] = logsumexp_array(log_pred_prob[idx_test_zero], \
np.log(prob_not_separated_yet[idx_test_zero]) + log_pred_prob_node_id)
try:
feat_id, split = self.node_info[node_id]
cond = x[:, feat_id] <= split
left, right = get_children_id(node_id)
d_idx_test[left], d_idx_test[right] = idx_test[cond], idx_test[~cond]
node_list.append(left)
node_list.append(right)
except KeyError:
pass
pred_var = pred_second_moment - (pred_mean ** 2)
if True or settings.debug: # FIXME: remove later
assert not np.any(np.isnan(pred_mean))
assert not np.any(np.isnan(pred_var))
try:
assert np.all(pred_var >= 0.)
except AssertionError:
min_pred_var = np.min(pred_var)
print 'min_pred_var = %s' % min_pred_var
assert np.abs(min_pred_var) < 1e-3 # allowing some numerical errors
assert not np.any(np.isnan(log_pred_prob))
return (pred_mean, pred_var, pred_second_moment, log_pred_prob, pred_sample)
def extend_mondrian(self, data, train_ids_new, settings, param, cache):
"""
extend Mondrian tree to include new training data indexed by train_ids_new
"""
self.extend_mondrian_block(self.root, train_ids_new, data, settings, param, cache)
if settings.debug:
print 'completed extend_mondrian'
self.check_tree(settings, data)
def check_tree(self, settings, data):
"""
check if tree violates any sanity check
"""
if settings.debug:
#print '\nchecking tree'
print '\nchecking tree: printing tree first'
self.print_tree(settings)
for node_id in self.non_leaf_nodes:
assert node_id.left.parent == node_id.right.parent == node_id
assert not node_id.is_leaf
if settings.optype == 'class':
assert np.count_nonzero(self.counts[node_id]) > 1
assert not self.pause_mondrian(node_id, settings)
if node_id != self.root:
assert np.all(node_id.min_d >= node_id.parent.min_d)
assert np.all(node_id.max_d <= node_id.parent.max_d)
if settings.optype == 'class':
try:
check_if_zero(np.sum(np.abs(self.counts[node_id] - \
self.counts[node_id.left] - self.counts[node_id.right])))
except AssertionError:
print 'counts: node = %s, left = %s, right = %s' \
% (self.counts[node_id], self.counts[node_id.left], self.counts[node_id.right])
raise AssertionError
if settings.budget == -1:
assert math.isinf(node_id.budget)
check_if_zero(self.split_times[node_id] - self.get_parent_split_time(node_id, settings) \
- self.max_split_costs[node_id])
if settings.optype == 'class':
num_data_points = 0
for node_id in self.leaf_nodes:
assert node_id.is_leaf
assert math.isinf(self.max_split_costs[node_id])
if settings.budget == -1:
assert math.isinf(node_id.budget)
if settings.optype == 'class':
num_data_points += self.counts[node_id].sum()
assert (np.count_nonzero(self.counts[node_id]) == 1) or (np.sum(self.counts[node_id]) < settings.min_samples_split)
assert self.pause_mondrian(node_id, settings)
if node_id != self.root:
assert np.all(node_id.min_d >= node_id.parent.min_d)
assert np.all(node_id.max_d <= node_id.parent.max_d)
if settings.optype == 'class':
print 'num_train = %s, number of data points at leaf nodes = %s' % \
(data['n_train'], num_data_points)
set_non_leaf = set(self.non_leaf_nodes)
set_leaf = set(self.leaf_nodes)
assert (set_leaf & set_non_leaf) == set([])
assert set_non_leaf == set(self.node_info.keys())
assert len(set_leaf) == len(self.leaf_nodes)
assert len(set_non_leaf) == len(self.non_leaf_nodes)
def extend_mondrian_block(self, node_id, train_ids_new, data, settings, param, cache):
"""
conditional Mondrian algorithm that extends a Mondrian block to include new training data
"""
if settings.debug:
print 'entered extend_mondrian_block'
print '\nextend_mondrian_block: node_id = %s' % node_id
if not train_ids_new.size:
if settings.debug:
print 'nothing to extend here; train_ids_new = %s' % train_ids_new
# nothing to extend
return
min_d, max_d = get_data_min_max(data, train_ids_new)
additional_extent_lower = np.maximum(0, node_id.min_d - min_d)
additional_extent_upper = np.maximum(0, max_d - node_id.max_d)
expo_parameter = float(additional_extent_lower.sum() + additional_extent_upper.sum())
if expo_parameter == 0:
split_cost = np.inf
else:
split_cost = random.expovariate(expo_parameter) # will be updated below in case mondrian is paused
unpause_paused_mondrian = False
if settings.debug:
print 'is_leaf = %s, pause_mondrian = %s, sum_range_d = %s' % \
(node_id.is_leaf, self.pause_mondrian(node_id, settings), node_id.sum_range_d)
if self.pause_mondrian(node_id, settings):
assert node_id.is_leaf
split_cost = np.inf
n_points_new = len(data['y_train'][train_ids_new])
# FIXME: node_id.sum_range_d not tested
if settings.optype == 'class':
y_unique = np.unique(data['y_train'][train_ids_new])
# unpause only if more than one unique label and number of points >= min_samples_split
is_pure_leaf = (len(y_unique) == 1) and (self.counts[node_id][y_unique] > 0) \
and self.check_if_labels_same(node_id)
if is_pure_leaf:
unpause_paused_mondrian = False
else:
unpause_paused_mondrian = \
((n_points_new + np.sum(self.counts[node_id])) >= settings.min_samples_split)
else:
unpause_paused_mondrian = \
not( (n_points_new + self.n_points[node_id]) < settings.min_samples_split )
if settings.debug:
print 'trying to extend a paused Mondrian; is_leaf = %s, node_id = %s' % (node_id.is_leaf, node_id)
if settings.optype == 'class':
print 'y_unique (new) = %s, n_points_new = %s, counts = %s, split_cost = %s, max_split_costs = %s' % \
(y_unique, n_points_new, self.counts[node_id], split_cost, self.max_split_costs[node_id])
print 'unpause_paused_mondrian = %s, is_pure_leaf = %s' % (unpause_paused_mondrian, is_pure_leaf)
if split_cost >= self.max_split_costs[node_id]:
# take root form of node_id (no cut outside the extent of the current block)
if not node_id.is_leaf:
if settings.debug:
print 'take root form: non-leaf node'
feat_id, split = self.node_info[node_id]
update_range_stats(node_id, (min_d, max_d)) # required here as well
left, right = node_id.left, node_id.right
cond = data['x_train'][train_ids_new, feat_id] <= split
train_ids_new_left, train_ids_new_right = train_ids_new[cond], train_ids_new[~cond]
self.add_training_points_to_node(node_id, train_ids_new, data, param, settings, cache, False)
self.extend_mondrian_block(left, train_ids_new_left, data, settings, param, cache)
self.extend_mondrian_block(right, train_ids_new_right, data, settings, param, cache)
else:
# reached a leaf; add train_ids_new to node_id & update range
if settings.debug:
print 'take root form: leaf node'
assert node_id.is_leaf
update_range_stats(node_id, (min_d, max_d))
self.add_training_points_to_node(node_id, train_ids_new, data, param, settings, cache, True)
# FIXME: node_id.sum_range_d tested here; perhaps move this to pause_mondrian?
unpause_paused_mondrian = unpause_paused_mondrian and (node_id.sum_range_d != 0)
if not self.pause_mondrian(node_id, settings):
assert unpause_paused_mondrian
self.leaf_nodes.remove(node_id)
self.grow_nodes = [node_id]
self.grow(data, settings, param, cache)
else:
# initialize "outer mondrian"
if settings.debug:
print 'trying to introduce a cut outside current block'
new_block = MondrianBlock(data, settings, node_id.budget, node_id.parent, \
get_data_range_from_min_max(np.minimum(min_d, node_id.min_d), np.maximum(max_d, node_id.max_d)))
init_update_posterior_node_incremental(self, data, param, settings, cache, new_block, \
train_ids_new, node_id) # counts of outer block are initialized with counts of current block
if node_id.is_leaf:
warn('\nWARNING: a leaf should not be expanded here; printing out some diagnostics')
print 'node_id = %s, is_leaf = %s, max_split_cost = %s, split_cost = %s' \
% (node_id, node_id.is_leaf, self.max_split_costs[node_id], split_cost)
print 'counts = %s\nmin_d = \n%s\nmax_d = \n%s' % (self.counts[node_id], node_id.min_d, node_id.max_d)
raise Exception('a leaf should be expanded via grow call; see diagnostics above')
if settings.debug:
print 'looks like cut possible'
# there is a cut outside the extent of the current block
feat_score = additional_extent_lower + additional_extent_upper
feat_id = sample_multinomial_scores(feat_score)
draw_from_lower = np.random.rand() <= (additional_extent_lower[feat_id] / feat_score[feat_id])
if draw_from_lower:
split = random.uniform(min_d[feat_id], node_id.min_d[feat_id])
else:
split = random.uniform(node_id.max_d[feat_id], max_d[feat_id])
assert (split < node_id.min_d[feat_id]) or (split > node_id.max_d[feat_id])
new_budget = node_id.budget - split_cost
cond = data['x_train'][train_ids_new, feat_id] <= split
train_ids_new_left, train_ids_new_right = train_ids_new[cond], train_ids_new[~cond]
is_left = split > node_id.max_d[feat_id] # is existing block the left child of "outer mondrian"?
if is_left:
train_ids_new_child = train_ids_new_right # new_child is the other child of "outer mondrian"
else:
train_ids_new_child = train_ids_new_left
# grow the "unconditional mondrian child" of the "outer mondrian"
new_child = MondrianBlock(data, settings, new_budget, new_block, get_data_range(data, train_ids_new_child))
if settings.debug:
print 'new_block = %s' % new_block
print 'new_child = %s' % new_child
self.train_ids[new_child] = train_ids_new_child # required for grow call below
init_update_posterior_node_incremental(self, data, param, settings, cache, new_child, train_ids_new_child)
self.node_info[new_block] = (feat_id, split)
if settings.draw_mondrian:
train_ids_new_block = np.append(self.train_ids[node_id], train_ids_new)
self.train_ids[new_block] = train_ids_new_block
self.non_leaf_nodes.append(new_block)
new_block.is_leaf = False
# update budget and call the "conditional mondrian child" of the "outer mondrian"
node_id.budget = new_budget
# self.max_split_costs[new_child] will be added in the grow call above
self.max_split_costs[new_block] = split_cost
self.split_times[new_block] = split_cost + self.get_parent_split_time(node_id, settings)
self.max_split_costs[node_id] -= split_cost
check_if_zero(self.split_times[node_id] - self.split_times[new_block] - self.max_split_costs[node_id])
# grow the new child of the "outer mondrian"
self.grow_nodes = [new_child]
self.grow(data, settings, param, cache)
# update tree structure and extend "conditional mondrian child" of the "outer mondrian"
if node_id == self.root:
self.root = new_block
else:
if settings.debug:
assert (node_id.parent.left == node_id) or (node_id.parent.right == node_id)
if node_id.parent.left == node_id:
node_id.parent.left = new_block
else:
node_id.parent.right = new_block
node_id.parent = new_block
if is_left:
new_block.left = node_id
new_block.right = new_child
self.extend_mondrian_block(node_id, train_ids_new_left, data, settings, param, cache)
else:
new_block.left = new_child
new_block.right = node_id
self.extend_mondrian_block(node_id, train_ids_new_right, data, settings, param, cache)
def add_training_points_to_node(self, node_id, train_ids_new, data, param, settings, cache, pause_mondrian=False):
"""
add a training data point to a node in the tree
"""
# range updated in extend_mondrian_block
if settings.draw_mondrian or pause_mondrian:
self.train_ids[node_id] = np.append(self.train_ids[node_id], train_ids_new)
update_posterior_node_incremental(self, data, param, settings, cache, node_id, train_ids_new)
def update_posterior_counts(self, param, data, settings):
"""
posterior update for hierarchical normalized stable distribution
using interpolated Kneser Ney smoothing (where number of tables serving a dish at a restaurant is atmost 1)
NOTE: implementation optimized for minibatch training where more than one data point added per minibatch
if only 1 datapoint is added, lots of counts will be unnecesarily updated
"""
self.cnt = {}
node_list = [self.root]
while True:
try:
node_id = node_list.pop(0)
except IndexError:
break
if node_id.is_leaf:
cnt = self.counts[node_id]
else:
cnt = np.minimum(self.counts[node_id.left], 1) + np.minimum(self.counts[node_id.right], 1)
node_list.extend([node_id.left, node_id.right])
self.cnt[node_id] = cnt
def update_predictive_posteriors(self, param, data, settings):
"""
update predictive posterior for hierarchical normalized stable distribution
pred_prob computes posterior mean of the label distribution at each node recursively
"""
node_list = [self.root]
if settings.debug:
self.gen_node_ids_print()
while True:
try:
node_id = node_list.pop(0)
except IndexError:
break
base = self.get_prior_mean(node_id, param, settings)
discount = self.get_discount_node_id(node_id, settings)
cnt = self.cnt[node_id]
if not node_id.is_leaf:
self.pred_prob[node_id] = self.compute_posterior_mean_normalized_stable(cnt, discount, base, settings)
node_list.extend([node_id.left, node_id.right])
if settings.debug and False:
print 'node_id = %20s, is_leaf = %5s, discount = %.2f, cnt = %s, base = %s, pred_prob = %s' \
% (self.node_ids_print[node_id], node_id.is_leaf, discount, cnt, base, self.pred_prob[node_id])
check_if_one(np.sum(self.pred_prob[node_id]))
def get_variance_node(self, node_id, param, settings):
# the non-linear transformation should be a monotonically non-decreasing function
# if the function saturates (e.g. sigmoid) children will be closer to parent deeper down the tree
# var = self.variance_coef * (sigmoid(self.sigmoid_coef * self.split_times[node_id]) \
# - sigmoid(self.sigmoid_coef * self.get_parent_split_time(node_id, settings)))
var = self.variance_coef * (sigmoid(self.sigmoid_coef * self.split_times[node_id]) \
- sigmoid(self.sigmoid_coef * self.get_parent_split_time(node_id, settings)))
return var
def update_posterior_gaussians(self, param, data, settings):
"""
computes marginal gaussian distribution at each node of the tree using gaussian belief propagation
the solution is exact since underlying graph is a tree
solution takes O(#nodes) time, which is much more efficient than naive GP implementation which
would cost O(#nodes^3) time
"""
self.gen_node_list()
self.message_to_parent = {}
self.message_from_parent = {}
self.likelihood_children = {}
self.pred_param = {}
self.pred_moments = {}
for node_id in self.node_list[::-1]:
if node_id.is_leaf:
# use marginal likelihood of data at this leaf
mean = self.sum_y[node_id] / float(self.n_points[node_id])
variance = self.get_variance_node(node_id, param, settings) \
+ self.noise_variance / float(self.n_points[node_id])
precision = 1.0 / variance
self.message_to_parent[node_id] = np.array([mean, precision])
self.likelihood_children[node_id] = np.array([mean, self.noise_precision*float(self.n_points[node_id])])
else:
likelihood_children = multiply_gaussians(self.message_to_parent[node_id.left], \
self.message_to_parent[node_id.right])
mean = likelihood_children[0]
self.likelihood_children[node_id] = likelihood_children
variance = self.get_variance_node(node_id, param, settings) + 1.0 / likelihood_children[1]
precision = 1.0 / variance
self.message_to_parent[node_id] = np.array([mean, precision])
variance_at_root = self.get_variance_node(node_id, param, settings)
self.message_from_parent[self.root] = np.array([param.prior_mean, variance_at_root])
for node_id in self.node_list:
# pred_param stores the mean and precision
self.pred_param[node_id] = multiply_gaussians(self.message_from_parent[node_id], \
self.likelihood_children[node_id])
# pred_moments stores the first and second moments (useful for prediction)
self.pred_moments[node_id] = np.array([self.pred_param[node_id][0], \
1.0 / self.pred_param[node_id][1] + self.pred_param[node_id][0] ** 2 + self.noise_variance])
if not node_id.is_leaf:
self.message_from_parent[node_id.left] = \
multiply_gaussians(self.message_from_parent[node_id], self.message_to_parent[node_id.right])
self.message_from_parent[node_id.right] = \
multiply_gaussians(self.message_from_parent[node_id], self.message_to_parent[node_id.left])
def update_posterior_counts_and_predictive_posteriors(self, param, data, settings):
if settings.optype == 'class':
# update posterior counts
self.update_posterior_counts(param, data, settings)
# update predictive posteriors
self.update_predictive_posteriors(param, data, settings)
else:
# updates hyperparameters in param (common to all trees)
self.update_gaussian_hyperparameters(param, data, settings)
# updates hyperparameters in self (independent for each tree)
# self.update_gaussian_hyperparameters_indep(param, data, settings)
if settings.smooth_hierarchically:
self.update_posterior_gaussians(param, data, settings)
def get_prior_mean(self, node_id, param, settings):
if settings.optype == 'class':
if node_id == self.root:
base = param.base_measure
else:
base = self.pred_prob[node_id.parent]
else:
base = None # for settings.settings.smooth_hierarchically = False
return base
def get_discount_node_id(self, node_id, settings):
"""
compute discount for a node (function of discount_param, time of split and time of split of parent)
"""
discount = math.exp(-settings.discount_param * self.max_split_costs[node_id])
return discount
def compute_posterior_mean_normalized_stable(self, cnt, discount, base, settings):
num_tables_k, num_customers, num_tables = self.get_counts(cnt)
pred_prob = (cnt - discount * num_tables_k + discount * num_tables * base) / num_customers
if settings.debug:
check_if_one(pred_prob.sum())
return pred_prob
def get_counts(self, cnt):
num_tables_k = np.minimum(cnt, 1)
num_customers = float(cnt.sum())
num_tables = float(num_tables_k.sum())
return (num_tables_k, num_customers, num_tables)
def get_data_range(data, train_ids):
"""
returns min, max, range and linear dimension of training data
"""
min_d, max_d = get_data_min_max(data, train_ids)
range_d = max_d - min_d
sum_range_d = float(range_d.sum())
return (min_d, max_d, range_d, sum_range_d)
def get_data_min_max(data, train_ids):
"""
returns min, max of training data
"""
x_tmp = data['x_train'].take(train_ids, 0)
min_d = np.min(x_tmp, 0)
max_d = np.max(x_tmp, 0)
return (min_d, max_d)
def get_data_range_from_min_max(min_d, max_d):
range_d = max_d - min_d
sum_range_d = float(range_d.sum())
return (min_d, max_d, range_d, sum_range_d)
def update_range_stats(node_id, (min_d, max_d)):
"""
updates min and max of training data at this block
"""
node_id.min_d = np.minimum(node_id.min_d, min_d)
node_id.max_d = np.maximum(node_id.max_d, max_d)
node_id.range_d = node_id.max_d - node_id.min_d
node_id.sum_range_d = float(node_id.range_d.sum())
def get_children_id(parent):
return (parent.left, parent.right)
class MondrianForest(Forest):
"""
defines Mondrian forest
variables:
- forest : stores the Mondrian forest
methods:
- fit(data, train_ids_current_minibatch, settings, param, cache) : batch training
- partial_fit(data, train_ids_current_minibatch, settings, param, cache) : online training
- evaluate_predictions (see Forest in mondrianforest_utils.py) : predictions
"""
def __init__(self, settings, data):
self.forest = [None] * settings.n_mondrians
if settings.optype == 'class':
settings.discount_param = settings.discount_factor * data['n_dim']
def fit(self, data, train_ids_current_minibatch, settings, param, cache):
for i_t, tree in enumerate(self.forest):
if settings.verbose >= 2 or settings.debug:
print 'tree_id = %s' % i_t
tree = self.forest[i_t] = MondrianTree(data, train_ids_current_minibatch, settings, param, cache)
tree.update_posterior_counts_and_predictive_posteriors(param, data, settings)
def partial_fit(self, data, train_ids_current_minibatch, settings, param, cache):
for i_t, tree in enumerate(self.forest):
if settings.verbose >= 2 or settings.debug:
print 'tree_id = %s' % i_t
tree.extend_mondrian(data, train_ids_current_minibatch, settings, param, cache)
tree.update_posterior_counts_and_predictive_posteriors(param, data, settings)
def main():
time_0 = time.clock()
settings = process_command_line()
print
print '%' * 120
print 'Beginning mondrianforest.py'
print 'Current settings:'
pp.pprint(vars(settings))
# Resetting random seed
reset_random_seed(settings)
# Loading data
print '\nLoading data ...'
data = load_data(settings)
print 'Loading data ... completed'
print 'Dataset name = %s' % settings.dataset
print 'Characteristics of the dataset:'
print 'n_train = %d, n_test = %d, n_dim = %d' %\
(data['n_train'], data['n_test'], data['n_dim'])
if settings.optype == 'class':
print 'n_class = %d' % (data['n_class'])
# precomputation
param, cache = precompute_minimal(data, settings)
time_init = time.clock() - time_0
print '\nCreating Mondrian forest'
# online training with minibatches
time_method_sans_init = 0.
time_prediction = 0.
mf = MondrianForest(settings, data)
if settings.store_every:
log_prob_test_minibatch = -np.inf * np.ones(settings.n_minibatches)
log_prob_train_minibatch = -np.inf * np.ones(settings.n_minibatches)
metric_test_minibatch = -np.inf * np.ones(settings.n_minibatches)
metric_train_minibatch = -np.inf * np.ones(settings.n_minibatches)
time_method_minibatch = np.inf * np.ones(settings.n_minibatches)
forest_numleaves_minibatch = np.zeros(settings.n_minibatches)
for idx_minibatch in range(settings.n_minibatches):
time_method_init = time.clock()
is_last_minibatch = (idx_minibatch == settings.n_minibatches - 1)
print_results = is_last_minibatch or (settings.verbose >= 2) or settings.debug
if print_results:
print '*' * 120
print 'idx_minibatch = %5d' % idx_minibatch
train_ids_current_minibatch = data['train_ids_partition']['current'][idx_minibatch]
if settings.debug:
print 'bagging = %s, train_ids_current_minibatch = %s' % \
(settings.bagging, train_ids_current_minibatch)
if idx_minibatch == 0:
mf.fit(data, train_ids_current_minibatch, settings, param, cache)
else:
mf.partial_fit(data, train_ids_current_minibatch, settings, param, cache)
for i_t, tree in enumerate(mf.forest):
if settings.debug or settings.verbose >= 2:
print '-'*100
tree.print_tree(settings)
print '.'*100
if settings.draw_mondrian:
tree.draw_mondrian(data, settings, idx_minibatch, i_t)
if settings.save == 1:
filename_plot = get_filename_mf(settings)[:-2]
if settings.store_every:
plt.savefig(filename_plot + '-mondrians_minibatch-' + str(idx_minibatch) + '.pdf', format='pdf')
time_method_sans_init += time.clock() - time_method_init
time_method = time_method_sans_init + time_init
# Evaluate
if is_last_minibatch or settings.store_every:
time_predictions_init = time.clock()
weights_prediction = np.ones(settings.n_mondrians) * 1.0 / settings.n_mondrians
if False:
if print_results:
print 'Results on training data (log predictive prob is bogus)'
train_ids_cumulative = data['train_ids_partition']['cumulative'][idx_minibatch]
# NOTE: some of these data points are not used for "training" if bagging is used
pred_forest_train, metrics_train = \
mf.evaluate_predictions(data, data['x_train'][train_ids_cumulative, :], \
data['y_train'][train_ids_cumulative], \
settings, param, weights_prediction, print_results)
else:
# not computing metrics on training data
metrics_train = {'log_prob': -np.inf, 'acc': 0, 'mse': np.inf}
pred_forest_train = None
if print_results:
print '\nResults on test data'
pred_forest_test, metrics_test = \
mf.evaluate_predictions(data, data['x_test'], data['y_test'], \
settings, param, weights_prediction, print_results)
name_metric = settings.name_metric # acc or mse
log_prob_train = metrics_train['log_prob']
log_prob_test = metrics_test['log_prob']
metric_train = metrics_train[name_metric]
metric_test = metrics_test[name_metric]
if settings.store_every:
log_prob_train_minibatch[idx_minibatch] = metrics_train['log_prob']
log_prob_test_minibatch[idx_minibatch] = metrics_test['log_prob']
metric_train_minibatch[idx_minibatch] = metrics_train[name_metric]
metric_test_minibatch[idx_minibatch] = metrics_test[name_metric]
time_method_minibatch[idx_minibatch] = time_method
tree_numleaves = np.zeros(settings.n_mondrians)
for i_t, tree in enumerate(mf.forest):
tree_numleaves[i_t] = len(tree.leaf_nodes)
forest_numleaves_minibatch[idx_minibatch] = np.mean(tree_numleaves)
time_prediction += time.clock() - time_predictions_init
# printing test performance:
if settings.store_every:
print 'printing test performance for every minibatch:'
print 'idx_minibatch\tmetric_test\ttime_method\tnum_leaves'
for idx_minibatch in range(settings.n_minibatches):
print '%10d\t%.3f\t\t%.3f\t\t%.1f' % \
(idx_minibatch, \
metric_test_minibatch[idx_minibatch], \
time_method_minibatch[idx_minibatch], forest_numleaves_minibatch[idx_minibatch])
print '\nFinal forest stats:'
tree_stats = np.zeros((settings.n_mondrians, 2))
tree_average_depth = np.zeros(settings.n_mondrians)
for i_t, tree in enumerate(mf.forest):
tree_stats[i_t, -2:] = np.array([len(tree.leaf_nodes), len(tree.non_leaf_nodes)])
tree_average_depth[i_t] = tree.get_average_depth(settings, data)[0]
print 'mean(num_leaves) = %.1f, mean(num_non_leaves) = %.1f, mean(tree_average_depth) = %.1f' \
% (np.mean(tree_stats[:, -2]), np.mean(tree_stats[:, -1]), np.mean(tree_average_depth))
print 'n_train = %d, log_2(n_train) = %.1f, mean(tree_average_depth) = %.1f +- %.1f' \
% (data['n_train'], np.log2(data['n_train']), np.mean(tree_average_depth), np.std(tree_average_depth))
if settings.draw_mondrian:
if settings.save == 1:
plt.savefig(filename_plot + '-mondrians-final.pdf', format='pdf')
else:
plt.show()
# Write results to disk (timing doesn't include saving)
time_total = time.clock() - time_0
# resetting
if settings.save == 1:
filename = get_filename_mf(settings)
print 'filename = ' + filename
results = {'log_prob_test': log_prob_test, 'log_prob_train': log_prob_train, \
'metric_test': metric_test, 'metric_train': metric_train, \
'time_total': time_total, 'time_method': time_method, \
'time_init': time_init, 'time_method_sans_init': time_method_sans_init,\
'time_prediction': time_prediction}
if 'log_prob2' in metrics_test:
results['log_prob2_test'] = metrics_test['log_prob2']
store_data = settings.dataset[:3] == 'toy' or settings.dataset == 'sim-reg'
if store_data:
results['data'] = data
if settings.store_every:
results['log_prob_test_minibatch'] = log_prob_test_minibatch
results['log_prob_train_minibatch'] = log_prob_train_minibatch
results['metric_test_minibatch'] = metric_test_minibatch
results['metric_train_minibatch'] = metric_train_minibatch
results['time_method_minibatch'] = time_method_minibatch
results['forest_numleaves_minibatch'] = forest_numleaves_minibatch
results['settings'] = settings
results['tree_stats'] = tree_stats
results['tree_average_depth'] = tree_average_depth
pickle.dump(results, open(filename, "wb"), protocol=pickle.HIGHEST_PROTOCOL)
# storing final predictions as well; recreate new "results" dict
results = {'pred_forest_train': pred_forest_train, \
'pred_forest_test': pred_forest_test}
filename2 = filename[:-2] + '.tree_predictions.p'
pickle.dump(results, open(filename2, "wb"), protocol=pickle.HIGHEST_PROTOCOL)
time_total = time.clock() - time_0
print
print 'Time for initializing Mondrian forest (seconds) = %f' % (time_init)
print 'Time for executing mondrianforest.py (seconds) = %f' % (time_method_sans_init)
print 'Total time for executing mondrianforest.py, including init (seconds) = %f' % (time_method)
print 'Time for prediction/evaluation (seconds) = %f' % (time_prediction)
print 'Total time (Loading data/ initializing / running / predictions / saving) (seconds) = %f\n' % (time_total)
if __name__ == "__main__":
main()
| mit |
mganeva/mantid | MantidPlot/pymantidplot/proxies.py | 1 | 37572 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
"""
Module containing classes that act as proxies to the various MantidPlot gui objects that are
accessible from python. They listen for the QObject 'destroyed' signal and set the wrapped
reference to None, thus ensuring that further attempts at access do not cause a crash.
"""
from __future__ import (absolute_import, division,
print_function)
from six.moves import range
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt, pyqtSlot
try:
import builtins
except ImportError:
import __builtin__ as builtins
import mantid
import mantidqtpython
#-----------------------------------------------------------------------------
#--------------------------- MultiThreaded Access ----------------------------
#-----------------------------------------------------------------------------
class CrossThreadCall(QtCore.QObject):
"""
Defines a dispatch call that marshals
function calls between threads
"""
__callable = None
__args = []
__kwargs = {}
__func_return = None
__exception = None
def __init__(self, callable):
""" Construct the object
"""
QtCore.QObject.__init__(self)
self.moveToThread(QtGui.qApp.thread())
self.__callable = callable
self.__call__.__func__.__doc__ = callable.__doc__
def dispatch(self, *args, **kwargs):
"""Dispatches a call to callable with
the given arguments using QMetaObject.invokeMethod
to ensure the call happens in the object's thread
"""
self.__args = args
self.__kwargs = kwargs
self.__func_return = None
self.__exception = None
return self._do_dispatch()
def __call__(self, *args, **kwargs):
"""
Calls the dispatch method
"""
return self.dispatch(*args, **kwargs)
@pyqtSlot()
def _do_dispatch(self):
"""Perform a call to a GUI function across a
thread and return the result
"""
if QtCore.QThread.currentThread() != QtGui.qApp.thread():
QtCore.QMetaObject.invokeMethod(self, "_do_dispatch", Qt.BlockingQueuedConnection)
else:
try:
self.__func_return = self.__callable(*self.__args, **self.__kwargs)
except Exception as exc:
self.__exception = exc
if self.__exception is not None:
raise self.__exception # Ensures this happens in the right thread
return self.__func_return
def _get_argtype(self, argument):
"""
Returns the argument type that will be passed to
the QMetaObject.invokeMethod call.
Most types pass okay, but enums don't so they have
to be coerced to ints. An enum is currently detected
as a type that is not a bool and inherits from int
"""
argtype = type(argument)
return argtype
if isinstance(argument, builtins.int) and argtype != bool:
argtype = int
return argtype
def threadsafe_call(callable, *args, **kwargs):
"""
Calls the given function with the given arguments
by passing it through the CrossThreadCall class. This
ensures that the calls to the GUI functions
happen on the correct thread.
"""
caller = CrossThreadCall(callable)
return caller.dispatch(*args, **kwargs)
def new_proxy(classType, callable, *args, **kwargs):
"""
Calls the callable object with the given args and kwargs dealing
with possible thread-safety issues.
If the returned value is not None it is wrapped in a new proxy of type classType
@param classType :: A new proxy class for the return type
@param callable :: A python callable object, i.e. a function/method
@param \*args :: The positional arguments passed on as given
@param \*kwargs :: The keyword arguments passed on as given
"""
obj = threadsafe_call(callable, *args, **kwargs)
if obj is None:
return obj
return classType(obj)
#-----------------------------------------------------------------------------
#--------------------------- Proxy Objects -----------------------------------
#-----------------------------------------------------------------------------
class QtProxyObject(QtCore.QObject):
"""Generic Proxy object for wrapping Qt C++ QObjects.
This holds the QObject internally and passes methods to it.
When the underlying object is deleted, the reference is set
to None to avoid segfaults.
"""
def __init__(self, toproxy):
QtCore.QObject.__init__(self)
self.__obj = toproxy
# Connect to track the destroyed
if self.__obj is not None:
self.connect(self.__obj, QtCore.SIGNAL("destroyed()"),
self._kill_object, Qt.DirectConnection)
def __del__(self):
"""
Disconnect the signal
"""
self._disconnect_from_destroyed()
def close(self):
"""
Reroute a method call to the the stored object
"""
self._disconnect_from_destroyed()
if hasattr(self.__obj, 'closeDependants'):
threadsafe_call(self.__obj.closeDependants)
if hasattr(self.__obj, 'close'):
threadsafe_call(self.__obj.close)
self._kill_object()
def inherits(self, className):
"""
Reroute a method call to the stored object
"""
return threadsafe_call(self.__obj.inherits, className)
def _disconnect_from_destroyed(self):
"""
Disconnects from the wrapped object's destroyed signal
"""
if self.__obj is not None:
self.disconnect(self.__obj, QtCore.SIGNAL("destroyed()"),
self._kill_object)
def __getattr__(self, attr):
"""
Reroute a method call to the the stored object via
the threadsafe call mechanism. Essentially this guarantees
that when the method is called it will be on the GUI thread
"""
callable = getattr(self._getHeldObject(), attr)
return CrossThreadCall(callable)
def __dir__(self):
return dir(self._getHeldObject())
def __str__(self):
"""
Return a string representation of the proxied object
"""
return str(self._getHeldObject())
def __repr__(self):
"""
Return a string representation of the proxied object
"""
return repr(self._getHeldObject())
def _getHeldObject(self):
"""
Returns a reference to the held object
"""
return self.__obj
def _kill_object(self):
"""
Release the stored instance
"""
self.__obj = None
def _swap(self, obj):
"""
Swap an object so that the proxy now refers to this object
"""
self.__obj = obj
#-----------------------------------------------------------------------------
class MDIWindow(QtProxyObject):
"""Proxy for the _qti.MDIWindow object.
Also used for subclasses that do not need any methods intercepted (e.g. Table, Note, Matrix)
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def folder(self):
return new_proxy(Folder, self._getHeldObject().folder)
#-----------------------------------------------------------------------------
class Graph(MDIWindow):
"""Proxy for the _qti.Graph object.
"""
# When checking the SIP interface, remember the following name mappings (PyName):
# C++ 'Multilayer' class => Python 'Graph' class
# C++ 'Graph' class => Python 'Layer' class
def __init__(self, toproxy):
MDIWindow.__init__(self,toproxy)
def activeLayer(self):
"""Get a handle to the presently active layer """
return new_proxy(Layer, self._getHeldObject().activeLayer)
def setActiveLayer(self, layer):
"""Set the active layer to that specified.
Args:
layer: A reference to the Layer to make the active one. Must belong to this Graph.
"""
threadsafe_call(self._getHeldObject().setActiveLayer, layer._getHeldObject())
def layer(self, num):
""" Get a handle to a specified layer
Args:
num: The index of the required layer
"""
return new_proxy(Layer, self._getHeldObject().layer, num)
def addLayer(self, x=0, y=0, width=None, height=None):
"""Add a layer to the graph.
Args:
x: The coordinate in pixels (from the graph's left) of the top-left of the new layer (default: 0).
y: The coordinate in pixels (from the graph's top) of the top-left of the new layer (default: 0).
width: The width of the new layer (default value if not specified).
height: The height of the new layer (default value if not specified).
Returns:
A handle to the newly created layer.
"""
# Turn the optional arguments into the magic numbers that the C++ expects
if width is None:
width=0
if height is None:
height=0
return new_proxy(Layer, self._getHeldObject().addLayer, x,y,width,height)
def insertCurve(self, graph, index):
"""Add a curve from another graph to this one.
Args:
graph: A reference to the graph from which the curve is coming (does nothing if this argument is the present Graph).
index: The index of the curve to add (counts from zero).
"""
threadsafe_call(self._getHeldObject().insertCurve, graph._getHeldObject(), index)
#-----------------------------------------------------------------------------
class Layer(QtProxyObject):
"""Proxy for the _qti.Layer object.
"""
# These methods are used for the new matplotlib-like CLI
# These ones are provided by the C++ class Graph, which in the SIP declarations is renamed as Layer
# The only purpose of listing them here is that these will be returned by this class' __dir()__, and
# shown interactively, while the ones not listed and/or overloaded here may not be shown in ipython, etc.
additional_methods = ['logLogAxes', 'logXLinY', 'logXLinY',
'removeLegend', 'export', 'setAxisScale', 'setCurveLineColor', 'setCurveLineStyle',
'setCurveLineWidth', 'setCurveSymbol', 'setScale', 'setTitle', 'setXTitle', 'setYTitle']
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def insertCurve(self, *args):
"""Add a curve from a workspace, table or another Layer to the plot
Args:
The first argument should be a reference to a workspace, table or layer, or a workspace name.
Subsequent arguments vary according to the type of the first.
Returns:
A boolean indicating success or failure.
"""
if isinstance(args[0],str):
return threadsafe_call(self._getHeldObject().insertCurve, *args)
elif hasattr(args[0], 'getName'):
return threadsafe_call(self._getHeldObject().insertCurve, args[0].name(),*args[1:])
else:
return threadsafe_call(self._getHeldObject().insertCurve, args[0]._getHeldObject(),*args[1:])
def addCurves(self, table, columns, style=0, lineWidth=1, symbolSize=3, startRow=0, endRow=-1):
"""Add curves based on table columns to the plot.
Args:
table: A reference to the table containing the data to plot.
columns: A tuple of column indices.
style: The curve style (default: Line).
lineWidth: The width of the curve line (default: 1).
symbolSize: The curve's symbol size (default: 3).
startRow: The first row to include in the curve's data (default: the first one)
endRow: The last row to include in the curve's data (default: the last one)
Returns:
A boolean indicating success or failure.
"""
return threadsafe_call(self._getHeldObject().addCurves, table._getHeldObject(),columns,style,lineWidth,symbolSize,startRow,endRow)
def addCurve(self, table, columnName, style=0, lineWidth=1, symbolSize=3, startRow=0, endRow=-1):
"""Add a curve based on a table column to the plot.
Args:
table: A reference to the table containing the data to plot.
columns: The name of the column to plot.
style: The curve style (default: Line).
lineWidth: The width of the curve line (default: 1).
symbolSize: The curve's symbol size (default: 3).
startRow: The first row to include in the curve's data (default: the first one)
endRow: The last row to include in the curve's data (default: the last one)
Returns:
A boolean indicating success or failure.
"""
return threadsafe_call(self._getHeldObject().addCurve, table._getHeldObject(),columnName,style,lineWidth,symbolSize,startRow,endRow)
def addErrorBars(self, yColName, errTable, errColName, type=1, width=1, cap=8, color=Qt.black, through=False, minus=True, plus=True):
"""Add error bars to a plot that was created from a table column.
Args:
yColName: The name of the column pertaining to the curve's data values.
errTable: A reference to the table holding the error values.
errColName: The name of the column containing the error values.
type: The orientation of the error bars - horizontal (0) or vertical (1, the default).
width: The line width of the error bars (default: 1).
cap: The length of the cap on the error bars (default: 8).
color: The color of error bars (default: black).
through: Whether the error bars are drawn through the symbol (default: no).
minus: Whether these errors should be shown as negative errors (default: yes).
plus: Whether these errors should be shown as positive errors (default: yes).
"""
threadsafe_call(self._getHeldObject().addErrorBars, yColName,errTable._getHeldObject(),errColName,type,width,cap,color,through,minus,plus)
def errorBarSettings(self, curveIndex, errorBarIndex=0):
"""Get a handle to the error bar settings for a specified curve.
Args:
curveIndex: The curve to get the settings for
errorBarIndex: A curve can hold more than one set of error bars. Specify which one (default: the first).
Note that a curve plotted from a workspace can have only one set of error bars (and hence settings).
Returns: A handle to the error bar settings object.
"""
return new_proxy(QtProxyObject, self._getHeldObject().errorBarSettings, curveIndex,errorBarIndex)
def addHistogram(self, matrix):
"""Add a matrix histogram to the graph"""
threadsafe_call(self._getHeldObject().addHistogram, matrix._getHeldObject())
def newLegend(self, text):
"""Create a new legend.
Args:
text: The text of the legend.
Returns:
A handle to the newly created legend widget.
"""
return new_proxy(QtProxyObject, self._getHeldObject().newLegend, text)
def legend(self):
"""Get a handle to the layer's legend widget."""
return new_proxy(QtProxyObject, self._getHeldObject().legend)
def grid(self):
"""Get a handle to the grid object for this layer."""
return new_proxy(QtProxyObject, self._getHeldObject().grid)
def spectrogram(self):
"""If the layer contains a spectrogram, get a handle to the spectrogram object."""
return new_proxy(QtProxyObject, self._getHeldObject().spectrogram)
def __dir__(self):
"""Returns the list of attributes of this object."""
# The first part (explicitly defined ones) are here for the traditional Mantid CLI,
# the additional ones have been added for the matplotlib-like CLI (without explicit
# declaration/documentation here in the proxies layer.
return ['insertCurve', 'addCurves', 'addCurve', 'addErrorBars', 'errorBarSettings', 'addHistogram',
'newLegend', 'legend', 'grid', 'spectrogram' ] + self.additional_methods
#-----------------------------------------------------------------------------
class Graph3D(QtProxyObject):
"""Proxy for the _qti.Graph3D object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def setData(self, table, colName, type=0):
"""Set a table column to be the data source for this plot.
Args:
table: A reference to the table.
colName: The name of the column to set as the data source.
type: The plot type.
"""
threadsafe_call(self._getHeldObject().setData, table._getHeldObject(),colName,type)
def setMatrix(self, matrix):
"""Set a matrix (N.B. not a MantidMatrix) to be the data source for this plot.
Args:
matrix: A reference to the matrix.
"""
threadsafe_call(self._getHeldObject().setMatrix, matrix._getHeldObject())
#-----------------------------------------------------------------------------
class Spectrogram(QtProxyObject):
"""Proxy for the _qti.Spectrogram object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def matrix(self):
"""Get a handle to the data source."""
return new_proxy(QtProxyObject, self._getHeldObject().matrix)
#-----------------------------------------------------------------------------
class Folder(QtProxyObject):
"""Proxy for the _qti.Folder object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def windows(self):
"""Get a list of the windows in this folder"""
f = self._getHeldObject().windows()
ret = []
for item in f:
ret.append(MDIWindow(item))
return ret
def folders(self):
"""Get a list of the subfolders of this folder"""
f = self._getHeldObject().folders()
ret = []
for item in f:
ret.append(Folder(item))
return ret
def folder(self, name, caseSensitive=True, partialMatch=False):
"""Get a handle to a named subfolder.
Args:
name: The name of the subfolder.
caseSensitive: Whether to search case-sensitively or not (default: yes).
partialMatch: Whether to return a partial match (default: no).
Returns:
A handle to the requested folder, or None if no match found.
"""
return new_proxy(Folder, self._getHeldObject().folder, name,caseSensitive,partialMatch)
def findWindow(self, name, searchOnName=True, searchOnLabel=True, caseSensitive=False, partialMatch=True):
"""Get a handle to the first window matching the search criteria.
Args:
name: The name of the window.
searchOnName: Whether to search the window names (takes precedence over searchOnLabel).
searchOnLabel: Whether to search the window labels.
caseSensitive: Whether to search case-sensitively or not (default: no).
partialMatch: Whether to return a partial match (default: yes).
Returns:
A handle to the requested window, or None if no match found.
"""
return new_proxy(MDIWindow, self._getHeldObject().findWindow, name,searchOnName,searchOnLabel,caseSensitive,partialMatch)
def window(self, name, cls='MdiSubWindow', recursive=False):
"""Get a handle to a named window of a particular type.
Args:
name: The name of the window.
cls: Search only for windows of type inheriting from this class (N.B. This is the C++ class name).
recursive: If True, do a depth-first recursive search (default: False).
Returns:
A handle to the window, or None if no match found.
"""
return new_proxy(MDIWindow, self._getHeldObject().window, name,cls,recursive)
def table(self, name, recursive=False):
"""Get a handle to the table with the given name.
Args:
name: The name of the table to search for.
recursive: If True, do a depth-first recursive search (default: False).
"""
return new_proxy(MDIWindow, self._getHeldObject().table, name,recursive)
def matrix(self, name, recursive=False):
"""Get a handle to the matrix with the given name.
Args:
name: The name of the matrix to search for.
recursive: If True, do a depth-first recursive search (default: False).
"""
return new_proxy(MDIWindow, self._getHeldObject().matrix, name,recursive)
def graph(self, name, recursive=False):
"""Get a handle to the graph with the given name.
Args:
name: The name of the graph to search for.
recursive: If True, do a depth-first recursive search (default: False).
"""
return new_proxy(Graph, self._getHeldObject().graph, name,recursive)
def rootFolder(self):
"""Get the folder at the root of the hierarchy"""
return new_proxy(Folder, self._getHeldObject().rootFolder)
#-----------------------------------------------------------------------------
class MantidMatrix(MDIWindow):
"""Proxy for the _qti.MantidMatrix object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def plotGraph3D(self, style=3):
"""Create a 3D plot of the workspace data.
Args:
style: The qwt3d plotstyle of the generated graph (default: filled mesh)
Returns:
A handle to the newly created graph (a Graph3D object)
"""
return new_proxy(Graph3D, self._getHeldObject().plotGraph3D, style)
def plotGraph2D(self, type=16):
"""Create a spectrogram from the workspace data.
Args:
type: The style of the plot (default: ColorMap)
Returns:
A handle the newly created graph (a Graph object)
"""
return new_proxy(Graph, self._getHeldObject().plotGraph2D, type)
#-----------------------------------------------------------------------------
class InstrumentView(MDIWindow):
"""Proxy for the instrument window
"""
def __init__(self, toproxy):
"""Creates a proxy object around an instrument window
Args:
toproxy: The raw C object to proxy
"""
QtProxyObject.__init__(self, toproxy)
def getTab(self, name_or_tab):
"""Retrieve a handle to the given tab
Args:
name_or_index: A string containing the title or tab type
Returns:
A handle to a tab widget
"""
handle = new_proxy(QtProxyObject, self._getHeldObject().getTab, name_or_tab)
if handle is None:
raise ValueError("Invalid tab title '%s'" % str(name_or_tab))
return handle
# ----- Deprecated functions -----
def changeColormap(self, filename=None):
import warnings
warnings.warn("InstrumentWidget.changeColormap has been deprecated. Use the render tab method instead.")
callable = QtProxyObject.__getattr__(self, "changeColormap")
if filename is None:
callable()
else:
callable(filename)
def setColorMapMinValue(self, value):
import warnings
warnings.warn("InstrumentWidget.setColorMapMinValue has been deprecated. Use the render tab setMinValue method instead.")
QtProxyObject.__getattr__(self, "setColorMapMinValue")(value)
def setColorMapMaxValue(self, value):
import warnings
warnings.warn("InstrumentWidget.setColorMapMaxValue has been deprecated. Use the render tab setMaxValue method instead.")
QtProxyObject.__getattr__(self, "setColorMapMaxValue")(value)
def setColorMapRange(self, minvalue, maxvalue):
import warnings
warnings.warn("InstrumentWidget.setColorMapRange has been deprecated. Use the render tab setRange method instead.")
QtProxyObject.__getattr__(self, "setColorMapRange")(minvalue,maxvalue)
def setScaleType(self, scale_type):
import warnings
warnings.warn("InstrumentWidget.setScaleType has been deprecated. Use the render tab setScaleType method instead.")
QtProxyObject.__getattr__(self, "setScaleType")(scale_type)
def setViewType(self, view_type):
import warnings
warnings.warn("InstrumentWidget.setViewType has been deprecated. Use the render tab setSurfaceType method instead.")
QtProxyObject.__getattr__(self, "setViewType")(view_type)
def selectComponent(self, name):
import warnings
warnings.warn("InstrumentWidget.selectComponent has been deprecated. Use the tree tab selectComponentByName method instead.")
QtProxyObject.__getattr__(self, "selectComponent")(name)
#-----------------------------------------------------------------------------
class SliceViewerWindowProxy(QtProxyObject):
"""Proxy for a C++ SliceViewerWindow object.
It will pass-through method calls that can be applied to the
SliceViewer widget contained within.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self, toproxy)
def __getattr__(self, attr):
"""
Reroute a method call to the the stored object
"""
if self._getHeldObject() is None:
raise Exception("Error! The SliceViewerWindow has been deleted.")
# Pass-through to the contained SliceViewer widget.
sv = self.getSlicer()
# But only those attributes that are methods on the SliceViewer
if attr in SliceViewerProxy.slicer_methods:
return getattr(sv, attr)
else:
# Otherwise, pass through to the stored object
return getattr(self._getHeldObject(), attr)
def __str__(self):
"""
Return a string representation of the proxied object
"""
if self._getHeldObject() is None:
return "None"
else:
return 'SliceViewerWindow(workspace="%s")' % self._getHeldObject().getSlicer().getWorkspaceName()
def __repr__(self):
"""
Return a string representation of the proxied object
"""
return repr(self._getHeldObject())
def __dir__(self):
"""
Returns the list of attributes for this object.
Might allow tab-completion to work under ipython
"""
return SliceViewerProxy.slicer_methods + ['showLine']
def getLiner(self):
"""
Returns the LineViewer widget that is part of this
SliceViewerWindow
"""
return LineViewerProxy(self._getHeldObject().getLiner())
def getSlicer(self):
"""
Returns the SliceViewer widget that is part of this
SliceViewerWindow
"""
return SliceViewerProxy(self._getHeldObject().getSlicer())
def showLine(self, start, end, width=None, planar_width=0.1, thicknesses=None,
num_bins=100):
"""Opens the LineViewer and define a 1D line along which to integrate.
The line is created in the same XY dimensions and at the same slice
point as is currently shown in the SliceViewer.
Args:
start :: (X,Y) coordinates of the start point in the XY dimensions
of the current slice.
end :: (X,Y) coordinates of the end point in the XY dimensions
of the current slice.
width :: if specified, sets all the widths (planar and other
dimensions) to this integration width.
planar_width :: sets the XY-planar (perpendicular to the line)
integration width. Default 0.1.
thicknesses :: list with one thickness value for each dimension in the
workspace (including the XY dimensions, which are ignored).
e.g. [0,1,2,3] in a XYZT workspace.
num_bins :: number of bins by which to divide the line.
Default 100.
Returns:
The LineViewer object of the SliceViewerWindow. There are methods
available to modify the line drawn.
"""
# First show the lineviewer
self.getSlicer().toggleLineMode(True)
liner = self.getLiner()
# Start and end point
liner.setStartXY(start[0], start[1])
liner.setEndXY(end[0], end[1])
# Set the width.
if not width is None:
liner.setThickness(width)
liner.setPlanarWidth(width*0.5)
else:
liner.setPlanarWidth(planar_width*0.5)
if not thicknesses is None:
for d in range(len(thicknesses)):
liner.setThickness(d, thicknesses[d])
# Bins
liner.setNumBins(num_bins)
liner.apply()
# Return the proxy to the LineViewer widget
return liner
#-----------------------------------------------------------------------------
def getWorkspaceNames(source):
"""Takes a "source", which could be a WorkspaceGroup, or a list
of workspaces, or a list of names, and converts
it to a list of workspace names.
Args:
source :: input list or workspace group
Returns:
list of workspace names
"""
ws_names = []
if isinstance(source, list) or isinstance(source,tuple):
for w in source:
names = getWorkspaceNames(w)
ws_names += names
elif hasattr(source, 'name'):
if hasattr(source, '_getHeldObject'):
wspace = source._getHeldObject()
else:
wspace = source
if wspace == None:
return []
if hasattr(wspace, 'getNames'):
grp_names = wspace.getNames()
for n in grp_names:
if n != wspace.name():
ws_names.append(n)
else:
ws_names.append(wspace.name())
elif isinstance(source,str):
w = None
try:
# for non-existent names this raises a KeyError
w = mantid.AnalysisDataService.Instance()[source]
except Exception as exc:
raise ValueError("Workspace '%s' not found!"%source)
if w != None:
names = getWorkspaceNames(w)
for n in names:
ws_names.append(n)
else:
raise TypeError('Incorrect type passed as workspace argument "' + str(source) + '"')
return ws_names
#-----------------------------------------------------------------------------
class ProxyCompositePeaksPresenter(QtProxyObject):
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def getPeaksPresenter(self, source):
to_present = None
if isinstance(source, str):
to_present = source
elif isinstance(source, mantid.api.Workspace):
to_present = source.name()
else:
raise ValueError("getPeaksPresenter expects a Workspace name or a Workspace object.")
if not mantid.api.mtd.doesExist(to_present):
raise ValueError("%s does not exist in the workspace list" % to_present)
return new_proxy(QtProxyObject, self._getHeldObject().getPeaksPresenter, to_present)
#-----------------------------------------------------------------------------
class SliceViewerProxy(QtProxyObject):
"""Proxy for a C++ SliceViewer widget.
"""
# These are the exposed python method names
slicer_methods = ["setWorkspace", "getWorkspaceName", "showControls", "openFromXML", "getImage", "saveImage", "copyImageToClipboard", "setFastRender", "getFastRender", "toggleLineMode", "setXYDim", "setXYDim", "getDimX", "getDimY", "setSlicePoint", "setSlicePoint", "getSlicePoint", "getSlicePoint", "setXYLimits", "getXLimits", "getYLimits", "zoomBy", "setXYCenter", "resetZoom", "loadColorMap", "setColorScale", "setColorScaleMin", "setColorScaleMax", "setColorScaleLog", "getColorScaleMin", "getColorScaleMax", "getColorScaleLog", "setColorScaleAutoFull", "setColorScaleAutoSlice", "setColorMapBackground", "setTransparentZeros", "setNormalization", "getNormalization", "setRebinThickness", "setRebinNumBins", "setRebinMode", "setPeaksWorkspaces", "refreshRebin"]
def __init__(self, toproxy):
QtProxyObject.__init__(self, toproxy)
def __dir__(self):
"""Returns the list of attributes for this object. """
return self.slicer_methods()
def setPeaksWorkspaces(self, source):
workspace_names = getWorkspaceNames(source)
if len(workspace_names) == 0:
raise ValueError("No workspace names given to setPeaksWorkspaces")
for name in workspace_names:
if not mantid.api.mtd.doesExist(name):
raise ValueError("%s does not exist in the workspace list" % name)
if not isinstance(mantid.api.mtd[name], mantid.api.IPeaksWorkspace):
raise ValueError("%s is not an IPeaksWorkspace" % name)
return new_proxy(ProxyCompositePeaksPresenter, self._getHeldObject().setPeaksWorkspaces, workspace_names)
#-----------------------------------------------------------------------------
class LineViewerProxy(QtProxyObject):
"""Proxy for a C++ LineViewer widget.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self, toproxy)
def __dir__(self):
"""Returns the list of attributes for this object. """
return ["apply", "showPreview", "showFull", "setStartXY", "setEndXY", "setThickness", "setThickness",
"setThickness", "setPlanarWidth", "getPlanarWidth", "setNumBins", "setFixedBinWidthMode", "getFixedBinWidth",
"getFixedBinWidthMode", "getNumBins", "getBinWidth", "setPlotAxis", "getPlotAxis"]
#-----------------------------------------------------------------------------
class FitBrowserProxy(QtProxyObject):
"""
Proxy for the FitPropertyBrowser object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
#-----------------------------------------------------------------------------
class TiledWindowProxy(QtProxyObject):
"""
Proxy for the TiledWindow object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def addWidget(self, tile, row, col):
"""
Add a new sub-window at a given position in the layout.
The layout will re-shape itself if necessary to fit in the new tile.
Args:
tile :: An MdiSubWindow to add.
row :: A row index at which to place the new tile.
col :: A column index at which to place the new tile.
"""
threadsafe_call(self._getHeldObject().addWidget, tile._getHeldObject(), row, col)
def insertWidget(self, tile, row, col):
"""
Insert a new sub-window at a given position in the layout.
The widgets to the right and below the inserted tile will be shifted
towards the bottom of the window. If necessary a new row will be appended.
The number of columns doesn't change.
Args:
tile :: An MdiSubWindow to insert.
row :: A row index at which to place the new tile.
col :: A column index at which to place the new tile.
"""
threadsafe_call(self._getHeldObject().insertWidget, tile._getHeldObject(), row, col)
def getWidget(self, row, col):
"""
Get a sub-window at a location in this TiledWindow.
Args:
row :: A row of a sub-window.
col :: A column of a sub-window.
"""
return MDIWindow( threadsafe_call(self._getHeldObject().getWidget, row, col) )
def clear(self):
"""
Clear the content this TiledWindow.
"""
threadsafe_call(self._getHeldObject().clear)
def showHelpPage(page_name=None):
"""Show a page in the help system"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showHelpPage, page_name)
def showWikiPage(page_name=None):
"""Show a wiki page through the help system"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showWikiPage, page_name)
def showAlgorithmHelp(algorithm=None, version=-1):
"""Show an algorithm help page"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showAlgorithmHelp, algorithm, version)
def showConceptHelp(name=None):
"""Show a concept help page"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showConceptHelp, name)
def showFitFunctionHelp(name=None):
"""Show a fit function help page"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showFitFunctionHelp, name)
def showCustomInterfaceHelp(name=None):
"""Show a custom interface help page"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showCustomInterfaceHelp, name)
| gpl-3.0 |
nickgentoo/scikit-learn-graph | scripts/Online_PassiveAggressive_ReservoirHashKernels_notanhTABLES.py | 1 | 10510 | # -*- coding: utf-8 -*-
"""
python -m scripts/Online_PassiveAggressive_countmeansketch LMdata 3 1 a ODDST 0.01
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
from copy import copy
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import sys
from skgraph.feature_extraction.graph.ODDSTVectorizer import ODDSTVectorizer
from skgraph.feature_extraction.graph.WLVectorizer import WLVectorizer
from sklearn.linear_model import PassiveAggressiveClassifier as PAC
from skgraph.datasets import load_graph_datasets
import numpy as np
from scipy.sparse import csc_matrix
from sklearn.utils import compute_class_weight
from scipy.sparse import csr_matrix
from skgraph.utils.countminsketch_TABLESrandomprojectionNEWLinear import CountMinSketch
from itertools import izip
import time
if __name__=='__main__':
start_time = time.time()
if len(sys.argv)<1:
sys.exit("python ODDKernel_example.py dataset r l filename kernel C m seed")
dataset=sys.argv[1]
max_radius=int(sys.argv[2])
la=float(sys.argv[3])
#hashs=int(sys.argv[3])
njobs=1
name=str(sys.argv[4])
kernel=sys.argv[5]
C=float(sys.argv[6])
m=int(sys.argv[7])
rs=int(sys.argv[8])
#lr=float(sys.argv[7])
#FIXED PARAMETERS
normalization=False
#working with Chemical
g_it=load_graph_datasets.dispatch(dataset)
f=open(name,'w')
#At this point, one_hot_encoding contains the encoding for each symbol in the alphabet
if kernel=="WL":
print "Lambda ignored"
print "Using WL fast subtree kernel"
Vectorizer=WLVectorizer(r=max_radius,normalization=normalization)
elif kernel=="ODDST":
print "Using ST kernel"
Vectorizer=ODDSTVectorizer(r=max_radius,l=la,normalization=normalization)
elif kernel=="NSPDK":
print "Using NSPDK kernel, lambda parameter interpreted as d"
Vectorizer=NSPDKVectorizer(r=max_radius,d=int(la),normalization=normalization)
else:
print "Unrecognized kernel"
#TODO the C parameter should probably be optimized
#print zip(_letters, _one_hot)
#exit()
features=Vectorizer.transform(g_it.graphs) #Parallel ,njobs
print "examples, features", features.shape
features_time=time.time()
print("Computed features in %s seconds ---" % (features_time - start_time))
errors=0
tp=0
fp=0
tn=0
fn=0
predictions=[0]*50
correct=[0]*50
#print ESN
#netDataSet=[]
#netTargetSet=[]
#netKeyList=[]
BERtotal=[]
bintargets=[1,-1]
#print features
#print list_for_deep.keys()
tp = 0
fp = 0
fn = 0
tn = 0
part_plus=0
part_minus=0
sizes=[5000]*50
transformer=CountMinSketch(m,features.shape[1],rs)
WCMS=np.zeros(shape=(m,1))
cms_creation=0.0
for i in xrange(features.shape[0]):
time1=time.time()
ex=features[i][0].T
exCMS=transformer.transform(ex)
#print "exCMS", type(exCMS), exCMS.shape
target=g_it.target[i]
#W=csr_matrix(ex)
#dot=0.0
module=np.dot(exCMS.T,exCMS)[0,0]
#print "module", module
time2=time.time()
cms_creation+=time2 - time1
dot=np.dot(WCMS.T,exCMS)
#print "dot", dot
#print "dot:", dot, "dotCMS:",dot1
if (np.sign(dot) != target ):
#print "error on example",i, "predicted:", dot, "correct:", target
errors+=1
if target==1:
fn+=1
else:
fp+=1
else:
#print "correct classification", target
if target==1:
tp+=1
else:
tn+=1
if(target==1):
coef=(part_minus+1.0)/(part_plus+part_minus+1.0)
part_plus+=1
else:
coef=(part_plus+1.0)/(part_plus+part_minus+1.0)
part_minus+=1
tao = min (C, max (0.0,( (1.0 - target*dot )*coef) / module ) );
if (tao > 0.0):
WCMS+=(exCMS*(tao*target))
# for row,col in zip(rows,cols):
# ((row,col), ex[row,col])
# #print col, ex[row,col]
# WCMS.add(col,target*tao*ex[row,col])
#print "Correct prediction example",i, "pred", score, "target",target
if i%50==0 and i!=0:
#output performance statistics every 50 examples
if (tn+fp) > 0:
pos_part= float(fp) / (tn+fp)
else:
pos_part=0
if (tp+fn) > 0:
neg_part=float(fn) / (tp+fn)
else:
neg_part=0
BER = 0.5 * ( pos_part + neg_part)
print "1-BER Window esempio ",i, (1.0 - BER)
f.write("1-BER Window esempio "+str(i)+" "+str(1.0 - BER)+"\n")
#print>>f,"1-BER Window esempio "+str(i)+" "+str(1.0 - BER)
BERtotal.append(1.0 - BER)
tp = 0
fp = 0
fn = 0
tn = 0
part_plus=0
part_minus=0
end_time=time.time()
print("Learning phase time %s seconds ---" % (end_time - features_time )) #- cms_creation
print("Total time %s seconds ---" % (end_time - start_time))
print "BER AVG", str(np.average(BERtotal)),"std", np.std(BERtotal)
f.write("BER AVG "+ str(np.average(BERtotal))+" std "+str(np.std(BERtotal))+"\n")
f.close()
transformer.removetmp()
#print "N_features", ex.shape
#generate explicit W from CountMeanSketch
#print W
#raw_input("W (output)")
#==============================================================================
#
# tao = /*(double)labels->get_label(idx_a) **/ min (C, max (0.0,(1.0 - (((double)labels->get_label(idx_a))*(classe_mod) )) * c_plus ) / modulo_test);
#
# #W=W_old #dump line
#
#
# #set the weights of PA to the predicted values
# PassiveAggressive.coef_=W
# pred=PassiveAggressive.predict(ex)
#
# score=PassiveAggressive.decision_function(ex)
#
# bintargets.append(target)
# if pred!=target:
# errors+=1
# print "Error",errors," on example",i, "pred", score, "target",target
# if target==1:
# fn+=1
# else:
# fp+=1
#
# else:
# if target==1:
# tp+=1
# else:
# tn+=1
# #print "Correct prediction example",i, "pred", score, "target",target
#
# else:
# #first example is always an error!
# pred=0
# score=0
# errors+=1
# print "Error",errors," on example",i
# if g_it.target[i]==1:
# fn+=1
# else:
# fp+=1
# #print i
# if i%50==0 and i!=0:
# #output performance statistics every 50 examples
# if (tn+fp) > 0:
# pos_part= float(fp) / (tn+fp)
# else:
# pos_part=0
# if (tp+fn) > 0:
# neg_part=float(fn) / (tp+fn)
# else:
# neg_part=0
# BER = 0.5 * ( pos_part + neg_part)
# print "1-BER Window esempio ",i, (1.0 - BER)
# print>>f,"1-BER Window esempio "+str(i)+" "+str(1.0 - BER)
# BERtotal.append(1.0 - BER)
# tp = 0
# fp = 0
# fn = 0
# tn = 0
# bintargets=[1,-1]
# #print features[0][i]
# #print features[0][i].shape
# #f=features[0][i,:]
# #print f.shape
# #print f.shape
# #print g_it.target[i]
# #third parameter is compulsory just for the first call
# print "prediction", pred, score
# #print "intecept",PassiveAggressive.intercept_
# #raw_input()
# if abs(score)<1.0 or pred!=g_it.target[i]:
#
# ClassWeight=compute_class_weight('auto',np.asarray([1,-1]),bintargets)
# #print "class weights", {1:ClassWeight[0],-1:ClassWeight[1]}
# PassiveAggressive.class_weight={1:ClassWeight[0],-1:ClassWeight[1]}
#
# PassiveAggressive.partial_fit(ex,np.array([g_it.target[i]]),np.unique(g_it.target))
# #PassiveAggressive.partial_fit(ex,np.array([g_it.target[i]]),np.unique(g_it.target))
# W_old=PassiveAggressive.coef_
#
#
# #ESN target---#
# netTargetSet=[]
# for key,rowDict in list_for_deep[i].iteritems():
#
#
# target=np.asarray( [np.asarray([W_old[0,key]])]*len(rowDict))
#
#
# netTargetSet.append(target)
#
#
#
#
# #------------ESN TargetSetset--------------------#
# # ESN Training
#
# #for ftDataset,ftTargetSet in zip(netDataSet,netTargetSet):
# #print "Input"
# #print netDataSet
# #raw_input("Output")
# #print netTargetSet
# #raw_input("Target")
# model.OnlineTrain(netDataSet,netTargetSet,lr)
# #raw_input("TR")
# #calcolo statistiche
#
# print "BER AVG", sum(BERtotal) / float(len(BERtotal))
# print>>f,"BER AVG "+str(sum(BERtotal) / float(len(BERtotal)))
# f.close()
#==============================================================================
| gpl-3.0 |
pnedunuri/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
BhavyaLight/kaggle-predicting-Red-Hat-Business-Value | Initial_Classification_Models/Ensemble/RandomForest500XGBoost.py | 1 | 8265 | import pandas as pd
import xgboost as xgb1
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OneHotEncoder
from Classification import Utility
import pickle
import time
from sklearn.preprocessing import StandardScaler
# Function to change labels of categories to one-hot encoding using scikit's OneHot Encoding
# pd.get_dummies(df) does the same, provides sweet header's as well but it it not fast enough, kill's memory
def category_to_one_hot(dataset, non_feature, continuous_feature):
# Function to change labels of categories to one-hot encoding using scikit's OneHot Encoding sparse matrix
# pd.get_dummies(df) does the same, provides sweet header's as well but it kill's memory
ds = dataset.drop(non_feature, axis=1, errors='ignore')
boolean_column = []
counter = 0
if('days' in ds.columns):
ds['weekend'] = ds['days']//5
for column in ds.columns:
if column not in continuous_feature:
boolean_column.append(counter)
counter += 1
# boolean_column is not the column name but index
print("Done filtering columns...")
grd_enc = OneHotEncoder(categorical_features=boolean_column)
encoded_arr = grd_enc.fit_transform(ds)
return encoded_arr
start_time = time.time()
# features = [(1.0, 'people_group_1')]
# columns = []
#
# filename = "randomForest500Model"
#
# for val in features:
# if (val[0] == 1.0):
# columns.append(val[1])
#
# RandomForestFilename = "randomForest500Model"
#
# train_dataset = pd.read_csv("../../Data/act_train_features_reduced.csv")
# test_dataset = pd.read_csv("../../Data/act_test_features_reduced.csv")
# train_output = pd.read_csv("../../Data/act_train_output.csv")
#
# train_dataset = pd.merge(train_dataset, train_output, on="activity_id", how='inner')
# print("--- %s seconds ---" % (time.time() - start_time))
#
# randomForestModel = Utility.loadModel("randomForestModel_OHE")
# # randomForestModel = RandomForestClassifier(n_estimators=500)
# #
# # randomForestModel.fit(train_dataset[columns], train_dataset[["outcome"]].values.ravel())
#
# prob_train = randomForestModel.predict_proba(train_dataset[columns])
# prob_test = randomForestModel.predict_proba(test_dataset[columns])
# # Utility.saveModel(randomForestModel, "randomForestModel_OHE")
#
# train_dataset["Random_Forest_1"] = prob_train[:,1]
#
# test_dataset["Random_Forest_1"] = prob_test[:,1]
#
# Utility.saveModel(train_dataset, "train_randomforest")
# Utility.saveModel(test_dataset, "test_randomforest")
train_dataset = Utility.loadModel("train_randomforest")
test_dataset = Utility.loadModel("test_randomforest")
print("Random Forest Done")
print("--- %s seconds ---" % (time.time() - start_time))
features = [(1.0, 'Random_Forest_1'), (1.0, 'char_3'), (1.0, 'char_4'), (1.0, 'char_5'),
(1.0, 'char_6'), (1.0, 'char_8'), (1.0, 'char_9'), (1.0, 'days'), (1.0, 'month'), (1.0, 'people_char_1'),
(1.0, 'people_char_10'), (1.0, 'people_char_11'), (1.0, 'people_char_12'), (1.0, 'people_char_13'),
(1.0, 'people_char_14'), (1.0, 'people_char_15'), (1.0, 'people_char_16'), (1.0, 'people_char_17'),
(1.0, 'people_char_18'), (1.0, 'people_char_19'), (1.0, 'people_char_2'), (1.0, 'people_char_20'),
(1.0, 'people_char_21'), (1.0, 'people_char_22'), (1.0, 'people_char_23'), (1.0, 'people_char_24'),
(1.0, 'people_char_25'), (1.0, 'people_char_26'), (1.0, 'people_char_27'), (1.0, 'people_char_28'),
(1.0, 'people_char_29'), (1.0, 'people_char_3'), (1.0, 'people_char_30'), (1.0, 'people_char_31'),
(1.0, 'people_char_32'), (1.0, 'people_char_33'), (1.0, 'people_char_34'), (1.0, 'people_char_35'),
(1.0, 'people_char_36'), (1.0, 'people_char_37'), (1.0, 'people_char_38'), (1.0, 'people_char_4'),
(1.0, 'people_char_5'), (1.0, 'people_char_6'), (1.0, 'people_char_7'), (1.0, 'people_char_8'),
(1.0, 'people_char_9'), (1.0, 'people_dayOfMonth'), (1.0, 'people_month'), (1.0, 'people_quarter'),
(1.0, 'people_week'), (1.0, 'people_year'), (1.0, 'quarter'), (1.0, 'week'), (1.0, 'year'), (2.0, 'char_7'),
(3.0, 'char_1'), (4.0, 'dayOfMonth'), (5.0, 'activity_category'), (6.0, 'people_days'), (7.0, 'char_2'),
(8.0, 'people_group_1'), (9.0, 'people_id')]
columns = []
filename = 'randomPlusXGBOHE_new_woGP10_2'
for val in features:
# if(val[0] == 1.0):
columns.append(val[1])
train_dataset_outcome = train_dataset[["outcome"]]
train_dataset = train_dataset[columns]
# Non feature
# Non feature
NON_FEATURE=['activity_id','people_id','date','people_date']
# Categorical data that is only label encoded
CATEGORICAL_DATA = ['people_char_1', 'people_char_2','people_group_1',
'people_char_3', 'people_char_4', 'people_char_5',
'people_char_6', 'people_char_7', 'people_char_8',
'people_char_9', 'activity_category',
'char_1', 'char_2', 'char_3', 'char_4', 'char_5', 'char_6',
'char_7', 'char_8', 'char_9']
#removed char_10 to check xgb
# Already in a one-hot encoded form
CATEGORICAL_BINARY = ['people_char_10', 'people_char_11', 'people_char_12',
'people_char_13', 'people_char_14', 'people_char_15',
'people_char_16', 'people_char_17', 'people_char_18',
'people_char_19', 'people_char_20', 'people_char_21',
'people_char_22', 'people_char_23', 'people_char_24',
'people_char_25', 'people_char_26', 'people_char_27',
'people_char_28', 'people_char_29', 'people_char_30',
'people_char_31', 'people_char_32', 'people_char_33',
'people_char_34', 'people_char_35', 'people_char_36',
'people_char_37', 'Random_Forest_1' ]
# Continuous categories
CONT = ['people_days', 'days',
'people_month', 'month',
'people_quarter', 'quarter',
'people_week', 'week',
'people_dayOfMonth', 'dayOfMonth',
'people_year', 'year',
'people_char_38']
train_dataset_array = (category_to_one_hot(train_dataset, NON_FEATURE, CONT))
Utility.saveModel(train_dataset_array, "ohe_log")
norm = StandardScaler(with_mean=False, with_std=True)
norm1 = StandardScaler(with_mean=False, with_std=True)
# train_dataset_array =
print("--- %s seconds ---" % (time.time() - start_time))
print("Starting Log Reg")
X = train_dataset_array
#norm.fit(X)
#X = norm.transform(X)
Y = train_dataset_outcome.values.ravel()
# logisticModel = Utility.loadModel(filename)
test_dataset_act_id = test_dataset[["activity_id"]]
test_dataset = test_dataset[columns]
test_dataset_array = (category_to_one_hot(test_dataset, NON_FEATURE, CONT))
#norm1.fit(test_dataset_array)
#test_dataset_array = norm1.transform(test_dataset_array)
xgb = XGBClassifier(max_depth=10, learning_rate=0.3, n_estimators=25,
objective='binary:logistic', subsample=0.7,
colsample_bytree=0.7, seed=0, silent=1, nthread=4,
min_child_weight=0)
dtrain = xgb1.DMatrix(X,label=Y)
dtest = xgb1.DMatrix(test_dataset_array)
param = {'max_depth':10, 'eta':0.02, 'silent':1, 'objective':'binary:logistic' }
param['nthread'] = 4
param['eval_metric'] = 'auc'
param['subsample'] = 0.7
param['colsample_bytree']= 0.7
param['min_child_weight'] = 0
param['booster'] = "gblinear"
watchlist = [(dtrain,'train')]
num_round = 1500
early_stopping_rounds=10
bst = xgb1.train(param, dtrain, num_round, watchlist,early_stopping_rounds=early_stopping_rounds)
print("--- %s seconds ---" % (time.time() - start_time))
Utility.saveModel(bst, filename)
# pickle.dump(logisti cModel, open(filename, 'wb'))
ypred = bst.predict(dtest)
# probs = (xgb.predict_proba(test_dataset_array))
# test_dataset_act_id["outcome"] = probs[:,1]
test_dataset_act_id["outcome"] = ypred
print("--- %s seconds ---" % (time.time() - start_time))
Utility.saveInOutputForm(test_dataset_act_id, filename + ".csv", "ensemble")
# test_dataset_act_id[["activity_id", "outcome"]].set_index(["activity_id"]).to_csv("../../Data/" + filename + ".csv")
| mit |
harshaneelhg/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
sss1/DeepInteractions | pairwise/util.py | 2 | 5513 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, log_loss, roc_curve, auc, precision_recall_curve, average_precision_score
def initialize_with_JASPAR(enhancer_conv_layer, promoter_conv_layer):
JASPAR_motifs = list(np.load('/home/sss1/Desktop/projects/DeepInteractions/JASPAR_CORE_2016_vertebrates.npy'))
print 'Initializing ' + str(len(JASPAR_motifs)) + ' kernels with JASPAR motifs.'
enhancer_conv_weights = enhancer_conv_layer.get_weights()
promoter_conv_weights = promoter_conv_layer.get_weights()
reverse_motifs = [JASPAR_motifs[19][::-1,::-1], JASPAR_motifs[97][::-1,::-1],
JASPAR_motifs[98][::-1,::-1], JASPAR_motifs[99][::-1,::-1],
JASPAR_motifs[100][::-1,::-1], JASPAR_motifs[101][::-1,::-1]]
JASPAR_motifs = JASPAR_motifs + reverse_motifs
for i in xrange(len(JASPAR_motifs)):
m = JASPAR_motifs[i][::-1,:]
w = len(m)
start = np.random.randint(low=3, high=30-w+1-3)
enhancer_conv_weights[0][i,:,start:start+w,0] = m.T - 0.25
enhancer_conv_weights[1][i] = np.random.uniform(low=-1.0,high=0.0)
promoter_conv_weights[0][i,:,start:start+w,0] = m.T - 0.25
promoter_conv_weights[1][i] = np.random.uniform(low=-1.0,high=0.0)
enhancer_conv_layer.set_weights(enhancer_conv_weights)
promoter_conv_layer.set_weights(promoter_conv_weights)
# Splits the data into training and validation data, keeping training_frac of
# the input samples in the training set and the rest for validation
def split_train_and_val_data(X_enhancer_train, X_promoter_train, y_train, training_frac):
n_train = int(training_frac * np.shape(y_train)[0]) # number of training samples
X_enhancer_val = X_enhancer_train[n_train:, :]
X_enhancer_train = X_enhancer_train[:n_train, :]
X_promoter_val = X_promoter_train[n_train:, :]
X_promoter_train = X_promoter_train[:n_train, :]
y_val = y_train[n_train:]
y_train = y_train[:n_train]
return X_enhancer_train, X_promoter_train, y_train, X_enhancer_val, X_promoter_val, y_val
# Calculates and prints several metrics (confusion matrix, Precision/Recall/F1)
# in real time; also updates the values in the conf_mat_callback so they can be
# plotted or analyzed later
def print_live(conf_mat_callback, y_val, val_predict, logs):
conf_mat = confusion_matrix(y_val, val_predict).astype(float)
precision = conf_mat[1, 1] / conf_mat[:, 1].sum()
recall = conf_mat[1, 1] / conf_mat[1, :].sum()
f1_score = 2 * precision * recall / (precision + recall)
acc = (conf_mat[0, 0] + conf_mat[1, 1]) / np.sum(conf_mat)
loss = log_loss(y_val, val_predict)
conf_mat_callback.precisions.append(precision)
conf_mat_callback.recalls.append(recall)
conf_mat_callback.f1_scores.append(f1_score)
conf_mat_callback.losses.append(loss)
conf_mat_callback.accs.append(acc)
print '\nConfusion matrix:\n' + str(conf_mat) + '\n'
print 'Precision: ' + str(precision) + \
' Recall: ' + str(recall) + \
' F1: ' + str(f1_score) + \
' Accuracy: ' + str(acc) + \
' Log Loss: ' + str(loss)
print 'Predicted fractions: ' + str(val_predict.mean())
print 'Actual fractions: ' + str(y_val.mean()) + '\n'
# Plots several metrics (Precision/Recall/F1, loss, Accuracy) in real time
# (i.e., after each epoch)
def plot_live(conf_mat_callback):
epoch = conf_mat_callback.epoch
plt.clf()
xs = [1 + i for i in range(epoch)]
precisions_plot = plt.plot(xs, conf_mat_callback.precisions, label = 'Precision')
recalls_plot = plt.plot(xs, conf_mat_callback.recalls, label = 'Recall')
f1_scores_plot = plt.plot(xs, conf_mat_callback.f1_scores, label = 'F1 score')
accs_plot = plt.plot(xs, conf_mat_callback.accs, label = 'Accuracy')
losses_plot = plt.plot(xs, conf_mat_callback.losses / max(conf_mat_callback.losses), label = 'Loss')
batch_xs = [1 + epoch * float(i)/len(conf_mat_callback.training_losses) for i in range(len(conf_mat_callback.training_losses))]
training_losses_plot = plt.plot(batch_xs, conf_mat_callback.training_losses / max(conf_mat_callback.training_losses), label = 'Training Loss')
training_losses_plot = plt.plot(batch_xs, conf_mat_callback.training_accs, label = 'Training Accuracy')
plt.legend(bbox_to_anchor = (0, 1), loc = 4, borderaxespad = 0., prop={'size':6})
plt.ylim([0, 1])
plt.pause(.001)
# Given a (nearly) balanced data set (i.e., labeled enhancer and promoter
# sequence pairs), subsamples the positive samples to produce the desired
# fraction of positive samples; retains all negative samples
def subsample_imbalanced(X_enhancer, X_promoter, y, positive_subsample_frac):
n = np.shape(y_train)[0] # sample size (i.e., number of pairs)
# indices that are positive and selected to be retained or negative
to_keep = (np.random(n) < positive_subsample_frac) or (y == 1)
return X_enhancer[to_keep, :], X_promoter[to_keep, :], y[to_keep]
def compute_AUPR(y, y_score):
# print 'Computing Precision-Recall curve...'
precision, recall, _ = precision_recall_curve(y, y_score)
average_precision = average_precision_score(y, y_score)
def plot_PR_curve(y, y_score):
# print 'Computing Precision-Recall curve...'
precision, recall, _ = precision_recall_curve(y, y_score)
return average_precision_score(y, y_score)
def plot_ROC_curve(y, y_score):
# print 'Computing ROC curve...'
fpr, tpr, thresholds = roc_curve(y, y_score)
return auc(fpr, tpr)
| gpl-3.0 |
joshloyal/scikit-learn | examples/calibration/plot_compare_calibration.py | 82 | 5012 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
thundertrick/imagePicker | imageProcesser.py | 1 | 16389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Xuyang Hu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 2.1
# as published by the Free Software Foundation
"""
imageProcesser is used to process images received from UI.
This file can be test standalone using cmd:
python imageProcesser.py
"""
import cv2
import os
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
from PySide import QtGui, QtCore
import math
import time
# pylint: disable=C0103,R0904,W0102,W0201
testPath = './lena.jpeg'
def fileExp(matchedSuffixes=['bmp', 'jpg', 'jpeg', 'png']):
"""
Returns a compiled regexp matcher object for given list of suffixes.
"""
# Create a regular expression string to match all the suffixes
matchedString = r'|'.join([r'^.*\.' + s + '$' for s in matchedSuffixes])
return re.compile(matchedString, re.IGNORECASE)
class SingleImageProcess(QtCore.QObject):
"""
Process single image.
Note: Batch process will use the class,
so less `print` is recommoned.
"""
# Public
sel = None # can be set from outside
selSignal = QtCore.Signal(list)
def __init__(self, fileName=testPath, isGray=False, parent=None):
"""
Load the image in gray scale (isGray=False)
"""
super(SingleImageProcess, self).__init__(parent)
self.fileName = fileName
self.img = cv2.imread(fileName, isGray)
# private for safty
self.dragStart = None
self.roiNeedUpadte = False
self.isInWaitLoop = False
def simpleDemo(self):
"""
Print image shape and gray level info
And show the image with highgui.
Usage: press esc to quit image window.
"""
width, height = self.img.shape
meanVal, meanStdDevVal = cv2.meanStdDev(self.img)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(self.img)
print "Size:"
print (width, height)
print "(min, max, mean, meanStdDev):"
print (minVal, maxVal, meanVal[0][0], meanStdDevVal[0][0])
cv2.imshow("SingleImageWindow", self.img)
cv2.setMouseCallback("SingleImageWindow", self.onMouse)
print "Press esc to exit" # any key except q in fact
self.isInWaitLoop = True
while True:
ch = cv2.waitKey()
if ch == 27: # ESC
break
elif self.roiNeedUpadte and ch == 97: # selection is made
print "Accept ROI (minX, minY, maxX, maxY): " + str(self.sel)
self.selSignal.emit(self.sel)
self.setROI()
self.roiNeedUpadte = False
break
elif ch == ord('b'):
self.getButterworthBlur(stopband2=35, showResult=True)
cv2.destroyAllWindows()
self.isInWaitLoop = False
def setROI(self, showPatch=False):
if not(self.sel):
return self.img
patch = self.img[self.sel[1]:self.sel[3],self.sel[0]:self.sel[2]]
if showPatch:
cv2.imshow("patch", patch)
self.enterWaitLoop()
self.roiNeedUpadte = False
return patch
def saveFile(self):
"""
Save the file with the time stamp.
"""
# TODO: make it work!!
print "This function has not been implemented yet. It is recommand to "+
" use matplotlib instead."
return False
# newName = time.strftime('%Y%m%d_%H%M%S') + self.fileName
# if cv2.imwrite(newName, self.img):
# print "Image is saved @ " + newName
# return True
# else:
# print "Error: Fasiled to save image"
# return False
# --------------------------------------------------- Get image info
def getCenterPoint(self):
"""
Blur image and return the center point of image.
"""
gaussianImg = cv2.GaussianBlur(self.img, (9,9), 3)
centerPoint = self.getAvgIn4x4rect(
self.img.shape[0]/2 - 2,
self.img.shape[1]/2 - 2)
return centerPoint
def getAvgIn4x4rect(self, LocX=2, LocY=2):
"""
Calculate average value of a 4x4 rect in the image.
Note: this function do not check if the rect is fully
inside the image!
@param (LocX, LocY) start point of rect
@reutrn retval average value in float
"""
imROI = self.img[LocX:LocX+4, LocY:LocY+4]
return cv2.mean(imROI)[0]
def getGaussaianBlur(self, size=(33,33)):
"""
Return the blurred image with size and sigmaX=9
"""
blurImg = cv2.GaussianBlur(self.img, size, 9)
# self.showImage(blurImg)
return blurImg
def getButterworthBlur(self, stopband2=5, showResult=False):
"""
Apply Butterworth filter to image.
@param stopband2 stopband^2
"""
dft4img = self.getDFT()
bwfilter = self.getButterworthFilter(stopband2=stopband2)
dstimg = dft4img * bwfilter
dstimg = cv2.idft(np.fft.ifftshift(dstimg))
dstimg = np.uint8(cv2.magnitude(dstimg[:,:,0], dstimg[:,:,1]))
if showResult:
# cv2.imshow("test", dstimg)
# self.enterWaitLoop()
plt.imshow(dstimg)
plt.show()
return dstimg
def getAverageValue(self):
return cv2.mean(self.img)[0]
def getDFT(self, img2dft=None, showdft=False):
"""
Return the spectrum in log scale.
"""
if img2dft == None:
img2dft = self.img
dft_A = cv2.dft(np.float32(self.img),flags = cv2.DFT_COMPLEX_OUTPUT|cv2.DFT_SCALE)
dft_A = np.fft.fftshift(dft_A)
if showdft:
self.showSpecturm(dft_A)
return dft_A
def getButterworthFilter(self, stopband2=5, order=3, showdft=False):
"""
Get Butterworth filter in frequency domain.
"""
h, w = self.img.shape[0], self.img.shape[1] # no optimization
P = h/2
Q = w/2
dst = np.zeros((h, w, 2), np.float64)
for i in range(h):
for j in range(w):
r2 = float((i-P)**2+(j-Q)**2)
if r2 == 0:
r2 = 1.0
dst[i,j] = 1/(1+(r2/stopband2)**order)
dst = np.float64(dst)
if showdft:
f = cv2.magnitude(dst[:,:,0], dst[:,:,1])
# cv2.imshow("butterworth", f)
# self.enterWaitLoop()
plt.imshow(f)
plt.show()
return dst
def getShannonEntropy(self, srcImage=None):
"""
calculate the shannon entropy for an image
"""
if not(srcImage):
srcImage = self.img
histogram = cv2.calcHist(srcImage, [0],None,[256],[0,256])
histLen = sum(histogram)
samplesPossiblity = [float(h) / histLen for h in histogram]
return -sum([p * math.log(p, 2) for p in samplesPossiblity if p != 0])
# ------------------------------------------------ Highgui functions
def showImage(self, img):
"""
Show input image with highgui.
"""
cv2.imshow("test", img)
self.enterWaitLoop()
def showSpecturm(self, dft_result):
"""
Show spectrun graph.
"""
cv2.normalize(dft_result, dft_result, 0.0, 1.0, cv2.cv.CV_MINMAX)
# Split fourier into real and imaginary parts
image_Re, image_Im = cv2.split(dft_result)
# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
magnitude = cv2.sqrt(image_Re ** 2.0 + image_Im ** 2.0)
# Compute log(1 + Mag)
log_spectrum = cv2.log(1.0 + magnitude)
# normalize and display the results as rgb
cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.cv.CV_MINMAX)
cv2.imshow("Spectrum", log_spectrum)
self.enterWaitLoop()
def onMouse(self, event, x, y, flags, param):
"""
Mouse callback funtion for setting ROI.
"""
if event == cv2.EVENT_LBUTTONDOWN:
self.dragStart = x, y
self.sel = 0,0,0,0
elif self.dragStart:
#print flags
if flags & cv2.EVENT_FLAG_LBUTTON:
minpos = min(self.dragStart[0], x), min(self.dragStart[1], y)
maxpos = max(self.dragStart[0], x), max(self.dragStart[1], y)
self.sel = minpos[0], minpos[1], maxpos[0], maxpos[1]
img = cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR)
cv2.rectangle(img, (self.sel[0], self.sel[1]), (self.sel[2], self.sel[3]), (0,255,255), 1)
cv2.imshow("SingleImageWindow", img)
else:
print "selection is complete. Press a to accept."
self.roiNeedUpadte = True
self.dragStart = None
def enterWaitLoop(self):
"""
Enter waitKey loop.
This function can make sure that there is only 1 wait loop running.
"""
if not(self.isInWaitLoop):
self.isInWaitLoop = True
print "DO NOT close the window directly. Press Esc to enter next step!!!"
while self.isInWaitLoop:
ch = cv2.waitKey()
if ch == 27:
break
if ch == ord('s'):
self.saveFile()
break
cv2.destroyAllWindows()
self.isInWaitLoop = False
class BatchProcessing():
"""
Process all the images in the given folder.
"""
resultArray = []
globalROI = None
def __init__(self, rootPath='./', roi=None):
print "Batch path: " + rootPath
if not os.path.isdir(rootPath):
rootPath = repr(rootPath)[2:-1]
if not os.path.isdir(rootPath):
return
self.rootPath = rootPath
self.listPaths = []
self.listFileNames = []
for fileName in os.listdir(rootPath):
if fileExp().match(fileName):
absPath = os.path.join(self.rootPath, fileName)
self.listPaths.append(absPath)
self.listFileNames.append(fileName)
print "Files count: " + str(len(self.listFileNames))
print self.listFileNames
self.processQueue = []
if roi:
self.globalROI = roi
self.loadImages()
def loadImages(self):
"""
Load all the images in the selected folder.
"""
for path in self.listPaths:
im = SingleImageProcess(fileName=path)
im.sel = self.globalROI
im.img = im.setROI()
# im.img = im.getGaussaianBlur()
im.img = im.getButterworthBlur()
self.processQueue.append(im)
def getCenterPoints(self, showResult=False):
"""
Calculate center points of all the iamges and save them into resultArray
"""
print "============== Getting Center Point =========="
centerPoints = []
for im in self.processQueue:
pcenter = im.getCenterPoint()
centerPoints.append(pcenter)
if showResult:
plt.plot(self.resultArray)
plt.title('Center Points')
plt.xlabel('Picture numbers')
plt.ylabel('Gray scale')
plt.show()
self.resultArray = centerPoints
return centerPoints
def getPointsInACol(self, LocX=0, pointCount=10, showResult=False):
"""
Return value of pointCount=10 points when x = LocX
resultArray includes pointCount=10 arrays, each array
has len(self.processQueue) numbers in float.
"""
print "========================= getPointsInACol =========================="
self.resultArray = [[]]*pointCount
height = self.processQueue[0].img.shape[1]
yInterval = height/pointCount
for i in range(pointCount):
tmpArr = []
for im in self.processQueue:
avg4x4Val = im.getAvgIn4x4rect(LocX, i*yInterval)
tmpArr.append(avg4x4Val)
self.resultArray[i] = tmpArr
if showResult:
plt.plot(range(0,height,yInterval), self.resultArray)
plt.title('Points in a col when x==' + str(LocX) )
plt.xlabel('Y position')
plt.ylabel('Gray scale')
plt.show()
return self.resultArray
def getPointsInARow(self, LocY=0, pointCount=10, showResult=False):
"""
Return value of pointCount=10 points when y = LocY
resultArray includes pointCount=10 arrays, each array
has len(self.processQueue) numbers in float.
"""
print "========================= getPointsInARow =========================="
self.resultArray = [[]]*pointCount
width = self.processQueue[0].img.shape[0]
xInterval = width/pointCount
for i in range(pointCount):
tmpArr = []
for im in self.processQueue:
avg4x4Val = im.getAvgIn4x4rect(i*xInterval, LocY)
tmpArr.append(avg4x4Val)
self.resultArray[i] = tmpArr
if showResult:
plt.plot(range(0,width,xInterval), self.resultArray)
plt.title('Points in a row when y==' + str(LocY) )
plt.xlabel('X position')
plt.ylabel('Gray scale')
plt.show()
return self.resultArray
def getAverageValues(self, showResult=False):
"""
Return average value of all images.
"""
averageArr = []
for im in self.processQueue:
averageArr.append(im.getAverageValue())
if showResult:
plt.plot(range(len(self.processQueue)), averageArr)
plt.title('Average value')
plt.xlabel('Picture numbers')
plt.ylabel('Gray scale')
plt.show()
return averageArr
def getCenterPointsWithoutShift(self, LocX=0, pointCount=10, showResult=False):
"""
Return gray scale of center points removing average value
as global shift.
"""
centerPoints = self.getCenterPoints()
avgPoints = self.getAverageValues()
dstPoints = np.subtract(centerPoints, avgPoints)
self.resultArray = dstPoints
if showResult:
plt.plot(dstPoints)
plt.title('Center value without shift')
plt.xlabel('Picture numbers')
plt.ylabel('Center Point\'s Gray scale')
plt.show()
return dstPoints
def getShannonEntropies(self, showResult=False):
"""
Return average value of all images.
"""
entropyArr = []
for im in self.processQueue:
entropyArr.append(im.getShannonEntropy())
if showResult:
plt.plot(range(len(self.processQueue)), entropyArr)
plt.title('Entropy value')
plt.xlabel('Picture numbers')
plt.ylabel('Entropy')
plt.show()
return entropyArr
def plotGraphs(dataArr):
dataCount = len(dataArr)
graphLayout = 2 * 100 + (dataCount / 2)*10 + 1
for i,data in enumerate(dataArr):
plt.subplot(graphLayout + i)
plt.plot(data)
plt.show()
if __name__ == "__main__":
"""
Following codes are for test.
"""
singleTest = SingleImageProcess()
singleTest.simpleDemo()
print "Entropy: " + str(singleTest.getShannonEntropy())
singleTest.getGaussaianBlur()
singleTest.getDFT(showdft=True)
singleTest.getButterworthFilter(showdft=True)
singleTest.getButterworthBlur(stopband2=100,showResult=True)
print "avg=" + str(singleTest.getAverageValue())
print singleTest.getAvgIn4x4rect()
print singleTest.getCenterPoint()
batchTest = BatchProcessing()
batchTest.getCenterPoints(showResult=True)
batchTest.getShannonEntropies(showResult=True)
batchTest.getPointsInACol(100, showResult=True)
avgArr = batchTest.getAverageValues(showResult=True)
batchTest.getCenterPointsWithoutShift(50, showResult=True)
entpArr = batchTest.getShannonEntropies(showResult=True)
plotGraphs([avgArr, entpArr]) | lgpl-2.1 |
public-ink/public-ink | server/appengine/lib/matplotlib/gridspec.py | 10 | 16112 | """
:mod:`~matplotlib.gridspec` is a module which specifies the location
of the subplot in the figure.
``GridSpec``
specifies the geometry of the grid that a subplot will be
placed. The number of rows and number of columns of the grid
need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
``SubplotSpec``
specifies the location of the subplot in the given *GridSpec*.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.transforms as mtransforms
import numpy as np
import warnings
class GridSpecBase(object):
"""
A base class of GridSpec that specifies the geometry of the grid
that a subplot will be placed.
"""
def __init__(self, nrows, ncols,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. Optionally, the ratio of heights and widths of rows and
columns can be specified.
"""
#self.figure = figure
self._nrows , self._ncols = nrows, ncols
self.set_height_ratios(height_ratios)
self.set_width_ratios(width_ratios)
def get_geometry(self):
'get the geometry of the grid, e.g., 2,3'
return self._nrows, self._ncols
def get_subplot_params(self, fig=None):
pass
def new_subplotspec(self, loc, rowspan=1, colspan=1):
"""
create and return a SuplotSpec instance.
"""
loc1, loc2 = loc
subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]
return subplotspec
def set_width_ratios(self, width_ratios):
if width_ratios is not None and len(width_ratios) != self._ncols:
raise ValueError('Expected the given number of width ratios to '
'match the number of columns of the grid')
self._col_width_ratios = width_ratios
def get_width_ratios(self):
return self._col_width_ratios
def set_height_ratios(self, height_ratios):
if height_ratios is not None and len(height_ratios) != self._nrows:
raise ValueError('Expected the given number of height ratios to '
'match the number of rows of the grid')
self._row_height_ratios = height_ratios
def get_height_ratios(self):
return self._row_height_ratios
def get_grid_positions(self, fig):
"""
return lists of bottom and top position of rows, left and
right positions of columns.
"""
nrows, ncols = self.get_geometry()
subplot_params = self.get_subplot_params(fig)
left = subplot_params.left
right = subplot_params.right
bottom = subplot_params.bottom
top = subplot_params.top
wspace = subplot_params.wspace
hspace = subplot_params.hspace
totWidth = right-left
totHeight = top-bottom
# calculate accumulated heights of columns
cellH = totHeight/(nrows + hspace*(nrows-1))
sepH = hspace*cellH
if self._row_height_ratios is not None:
netHeight = cellH * nrows
tr = float(sum(self._row_height_ratios))
cellHeights = [netHeight*r/tr for r in self._row_height_ratios]
else:
cellHeights = [cellH] * nrows
sepHeights = [0] + ([sepH] * (nrows-1))
cellHs = np.add.accumulate(np.ravel(list(zip(sepHeights, cellHeights))))
# calculate accumulated widths of rows
cellW = totWidth/(ncols + wspace*(ncols-1))
sepW = wspace*cellW
if self._col_width_ratios is not None:
netWidth = cellW * ncols
tr = float(sum(self._col_width_ratios))
cellWidths = [netWidth*r/tr for r in self._col_width_ratios]
else:
cellWidths = [cellW] * ncols
sepWidths = [0] + ([sepW] * (ncols-1))
cellWs = np.add.accumulate(np.ravel(list(zip(sepWidths, cellWidths))))
figTops = [top - cellHs[2*rowNum] for rowNum in range(nrows)]
figBottoms = [top - cellHs[2*rowNum+1] for rowNum in range(nrows)]
figLefts = [left + cellWs[2*colNum] for colNum in range(ncols)]
figRights = [left + cellWs[2*colNum+1] for colNum in range(ncols)]
return figBottoms, figTops, figLefts, figRights
def __getitem__(self, key):
"""
create and return a SuplotSpec instance.
"""
nrows, ncols = self.get_geometry()
total = nrows*ncols
if isinstance(key, tuple):
try:
k1, k2 = key
except ValueError:
raise ValueError("unrecognized subplot spec")
if isinstance(k1, slice):
row1, row2, _ = k1.indices(nrows)
else:
if k1 < 0:
k1 += nrows
if k1 >= nrows or k1 < 0 :
raise IndexError("index out of range")
row1, row2 = k1, k1+1
if isinstance(k2, slice):
col1, col2, _ = k2.indices(ncols)
else:
if k2 < 0:
k2 += ncols
if k2 >= ncols or k2 < 0 :
raise IndexError("index out of range")
col1, col2 = k2, k2+1
num1 = row1*ncols + col1
num2 = (row2-1)*ncols + (col2-1)
# single key
else:
if isinstance(key, slice):
num1, num2, _ = key.indices(total)
num2 -= 1
else:
if key < 0:
key += total
if key >= total or key < 0 :
raise IndexError("index out of range")
num1, num2 = key, None
return SubplotSpec(self, num1, num2)
class GridSpec(GridSpecBase):
"""
A class that specifies the geometry of the grid that a subplot
will be placed. The location of grid is determined by similar way
as the SubplotParams.
"""
def __init__(self, nrows, ncols,
left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None):
"""
The number of rows and number of columns of the
grid need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
"""
#self.figure = figure
self.left=left
self.bottom=bottom
self.right=right
self.top=top
self.wspace=wspace
self.hspace=hspace
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
#self.set_width_ratios(width_ratios)
#self.set_height_ratios(height_ratios)
_AllowedKeys = ["left", "bottom", "right", "top", "wspace", "hspace"]
def update(self, **kwargs):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc.
"""
for k, v in six.iteritems(kwargs):
if k in self._AllowedKeys:
setattr(self, k, v)
else:
raise AttributeError("%s is unknown keyword" % (k,))
from matplotlib import _pylab_helpers
from matplotlib.axes import SubplotBase
for figmanager in six.itervalues(_pylab_helpers.Gcf.figs):
for ax in figmanager.canvas.figure.axes:
# copied from Figure.subplots_adjust
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if ax._sharex is not None and isinstance(ax._sharex, SubplotBase):
if ax._sharex.get_subplotspec().get_gridspec() == self:
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif ax._sharey is not None and isinstance(ax._sharey,SubplotBase):
if ax._sharey.get_subplotspec().get_gridspec() == self:
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ss = ax.get_subplotspec().get_topmost_subplotspec()
if ss.get_gridspec() == self:
ax.update_params()
ax.set_position(ax.figbox)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters. The default
parameters are from rcParams unless a figure attribute is set.
"""
from matplotlib.figure import SubplotParams
import copy
if fig is None:
kw = dict([(k, rcParams["figure.subplot."+k]) \
for k in self._AllowedKeys])
subplotpars = SubplotParams(**kw)
else:
subplotpars = copy.copy(fig.subplotpars)
update_kw = dict([(k, getattr(self, k)) for k in self._AllowedKeys])
subplotpars.update(**update_kw)
return subplotpars
def locally_modified_subplot_params(self):
return [k for k in self._AllowedKeys if getattr(self, k)]
def tight_layout(self, fig, renderer=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_subplotspec_list,
get_tight_layout_figure,
get_renderer)
subplotspec_list = get_subplotspec_list(fig.axes, grid_spec=self)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(fig)
kwargs = get_tight_layout_figure(fig, fig.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect,
)
self.update(**kwargs)
class GridSpecFromSubplotSpec(GridSpecBase):
"""
GridSpec whose subplot layout parameters are inherited from the
location specified by a given SubplotSpec.
"""
def __init__(self, nrows, ncols,
subplot_spec,
wspace=None, hspace=None,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. An instance of SubplotSpec is also needed to be set
from which the layout parameters will be inherited. The wspace
and hspace of the layout can be optionally specified or the
default values (from the figure or rcParams) will be used.
"""
self._wspace=wspace
self._hspace=hspace
self._subplot_spec = subplot_spec
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters.
"""
if fig is None:
hspace = rcParams["figure.subplot.hspace"]
wspace = rcParams["figure.subplot.wspace"]
else:
hspace = fig.subplotpars.hspace
wspace = fig.subplotpars.wspace
if self._hspace is not None:
hspace = self._hspace
if self._wspace is not None:
wspace = self._wspace
figbox = self._subplot_spec.get_position(fig, return_all=False)
left, bottom, right, top = figbox.extents
from matplotlib.figure import SubplotParams
sp = SubplotParams(left=left,
right=right,
bottom=bottom,
top=top,
wspace=wspace,
hspace=hspace)
return sp
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
return self._subplot_spec.get_topmost_subplotspec()
class SubplotSpec(object):
"""
specifies the location of the subplot in the given *GridSpec*.
"""
def __init__(self, gridspec, num1, num2=None):
"""
The subplot will occupy the num1-th cell of the given
gridspec. If num2 is provided, the subplot will span between
num1-th cell and num2-th cell.
The index stars from 0.
"""
rows, cols = gridspec.get_geometry()
total = rows*cols
self._gridspec = gridspec
self.num1 = num1
self.num2 = num2
def get_gridspec(self):
return self._gridspec
def get_geometry(self):
"""
get the subplot geometry, e.g., 2,2,3. Unlike SuplorParams,
index is 0-based
"""
rows, cols = self.get_gridspec().get_geometry()
return rows, cols, self.num1, self.num2
def get_position(self, fig, return_all=False):
"""
update the subplot position from fig.subplotpars
"""
gridspec = self.get_gridspec()
nrows, ncols = gridspec.get_geometry()
figBottoms, figTops, figLefts, figRights = \
gridspec.get_grid_positions(fig)
rowNum, colNum = divmod(self.num1, ncols)
figBottom = figBottoms[rowNum]
figTop = figTops[rowNum]
figLeft = figLefts[colNum]
figRight = figRights[colNum]
if self.num2 is not None:
rowNum2, colNum2 = divmod(self.num2, ncols)
figBottom2 = figBottoms[rowNum2]
figTop2 = figTops[rowNum2]
figLeft2 = figLefts[colNum2]
figRight2 = figRights[colNum2]
figBottom = min(figBottom, figBottom2)
figLeft = min(figLeft, figLeft2)
figTop = max(figTop, figTop2)
figRight = max(figRight, figRight2)
figbox = mtransforms.Bbox.from_extents(figLeft, figBottom,
figRight, figTop)
if return_all:
return figbox, rowNum, colNum, nrows, ncols
else:
return figbox
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
gridspec = self.get_gridspec()
if hasattr(gridspec, "get_topmost_subplotspec"):
return gridspec.get_topmost_subplotspec()
else:
return self
def __eq__(self, other):
# check to make sure other has the attributes
# we need to do the comparison
if not (hasattr(other, '_gridspec') and
hasattr(other, 'num1') and
hasattr(other, 'num2')):
return False
return all((self._gridspec == other._gridspec,
self.num1 == other.num1,
self.num2 == other.num2))
def __hash__(self):
return (hash(self._gridspec) ^
hash(self.num1) ^
hash(self.num2))
| gpl-3.0 |
kylerbrown/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
nrhine1/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
sdadia/helper_functions | setup.py | 1 | 1455 | from distutils.core import setup
setup(
name = 'helper_functions',
version = '2.0.10',
py_modules = ['helper_functions'],
author = 'Sahil Dadia',
author_email = '[email protected]',
url = 'https://github.com/sdadia/helper_functions.git',# use the URL to the github repo
description = 'A simple module of simple function, for opencv and python3',
license = 'MIT',
keywords = ['opencv', 'helper', 'scripts'], # arbitrary keywords
classifiers = [
'Topic :: Utilities',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Natural Language :: English'
],
install_requires= [
'numpy',
'scipy',
'sklearn',
'matplotlib',
'imutils',
'natsort',
],
)
| mit |
henrykironde/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
jniediek/mne-python | examples/preprocessing/plot_find_ecg_artifacts.py | 14 | 1313 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 6 | 9508 | """Testing for Spectral Biclustering methods"""
import numpy as np
import pytest
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster import SpectralCoclustering
from sklearn.cluster import SpectralBiclustering
from sklearn.cluster._bicluster import _scale_normalize
from sklearn.cluster._bicluster import _bistochastic_normalize
from sklearn.cluster._bicluster import _log_normalize
from sklearn.metrics import (consensus_score, v_measure_score)
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BiclusterMixin, BaseEstimator):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert np.all(X != -1)
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert len(i_ind) == m
assert len(j_ind) == n
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert model.rows_.shape == (3, 30)
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
with pytest.raises(ValueError):
model.fit(mat)
continue
else:
model.fit(mat)
assert model.rows_.shape == (9, 30)
assert model.columns_.shape == (9, 30)
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(mat, vectors,
n_clusters=2)
assert_almost_equal(v_measure_score(labels, [0, 0, 1, 1]), 1.0)
def test_perfect_checkerboard():
# XXX Previously failed on build bot (not reproducible)
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert consensus_score(model.biclusters_,
(rows, cols)) == 1
@pytest.mark.parametrize(
"args",
[{'n_clusters': (3, 3, 3)},
{'n_clusters': 'abc'},
{'n_clusters': (3, 'abc')},
{'method': 'unknown'},
{'n_components': 0},
{'n_best': 0},
{'svd_method': 'unknown'},
{'n_components': 3, 'n_best': 4}]
)
def test_errors(args):
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(**args)
with pytest.raises(ValueError):
model.fit(data)
def test_wrong_shape():
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
with pytest.raises(ValueError):
model.fit(data)
@pytest.mark.parametrize('est',
(SpectralBiclustering(), SpectralCoclustering()))
def test_n_features_in_(est):
X, _, _ = make_biclusters((3, 3), 3, random_state=0)
assert not hasattr(est, 'n_features_in_')
est.fit(X)
assert est.n_features_in_ == 3
@pytest.mark.parametrize("klass", [SpectralBiclustering, SpectralCoclustering])
@pytest.mark.parametrize("n_jobs", [None, 1])
def test_n_jobs_deprecated(klass, n_jobs):
# FIXME: remove in 0.25
depr_msg = ("'n_jobs' was deprecated in version 0.23 and will be removed "
"in 0.25.")
S, _, _ = make_biclusters((30, 30), 3, noise=0.5, random_state=0)
est = klass(random_state=0, n_jobs=n_jobs)
with pytest.warns(FutureWarning, match=depr_msg):
est.fit(S)
| bsd-3-clause |
marcharper/stationary | examples/entropic_equilibria_plots.py | 1 | 9181 | """Figures for the publication
"Entropic Equilibria Selection of Stationary Extrema in Finite Populations"
"""
from __future__ import print_function
import math
import os
import pickle
import sys
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.misc
import ternary
import stationary
from stationary.processes import incentives, incentive_process
## Global Font config for plots ###
font = {'size': 14}
matplotlib.rc('font', **font)
def compute_entropy_rate(N=30, n=2, m=None, incentive_func=None, beta=1.,
mu=None, exact=False, lim=1e-13, logspace=False):
if not m:
m = np.ones((n, n))
if not incentive_func:
incentive_func = incentives.fermi
if not mu:
# mu = (n-1.)/n * 1./(N+1)
mu = 1. / N
fitness_landscape = incentives.linear_fitness_landscape(m)
incentive = incentive_func(fitness_landscape, beta=beta, q=1)
edges = incentive_process.multivariate_transitions(
N, incentive, num_types=n, mu=mu)
s = stationary.stationary_distribution(edges, exact=exact, lim=lim,
logspace=logspace)
e = stationary.entropy_rate(edges, s)
return e, s
# Entropy Characterization Plots
def dict_max(d):
k0, v0 = list(d.items())[0]
for k, v in d.items():
if v > v0:
k0, v0 = k, v
return k0, v0
def plot_data_sub(domain, plot_data, gs, labels=None, sci=True, use_log=False):
# Plot Entropy Rate
ax1 = plt.subplot(gs[0, 0])
ax1.plot(domain, [x[0] for x in plot_data[0]], linewidth=2)
# Plot Stationary Probabilities and entropies
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
if use_log:
transform = math.log
else:
transform = lambda x: x
for i, ax, t in [(1, ax2, lambda x: x), (2, ax3, transform)]:
if labels:
for data, label in zip(plot_data, labels):
ys = list(map(t, [x[i] for x in data]))
ax.plot(domain, ys, linewidth=2, label=label)
else:
for data in plot_data:
ys = list(map(t, [x[i] for x in data]))
ax.plot(domain, ys, linewidth=2)
ax1.set_ylabel("Entropy Rate")
ax2.set_ylabel("Stationary\nExtrema")
if use_log:
ax3.set_ylabel("log RTE $H_v$")
else:
ax3.set_ylabel("RTE $H_v$")
if sci:
ax2.yaxis.get_major_formatter().set_powerlimits((0, 0))
ax3.yaxis.get_major_formatter().set_powerlimits((0, 0))
return ax1, ax2, ax3
def ER_figure_beta2(N, m, betas):
"""Varying Beta, two dimensional example"""
# Beta test
# m = [[1, 4], [4, 1]]
# Compute the data
ss = []
plot_data = [[]]
for beta in betas:
print(beta)
e, s = compute_entropy_rate(N=N, m=m, beta=beta, exact=True)
ss.append(s)
state, s_max = dict_max(s)
plot_data[0].append((e, s_max, e / s_max))
gs = gridspec.GridSpec(3, 2)
ax1, ax2, ax3 = plot_data_sub(betas, plot_data, gs, sci=False)
ax3.set_xlabel("Strength of Selection $\\beta$")
# Plot stationary distribution
ax4 = plt.subplot(gs[:, 1])
for s in ss[::4]:
ax4.plot(range(0, N+1), [s[(i, N-i)] for i in range(0, N+1)])
ax4.set_title("Stationary Distributions")
ax4.set_xlabel("Population States $(i , N - i)$")
def remove_boundary(s):
s1 = dict()
for k, v in s.items():
a, b, c = k
if a * b * c != 0:
s1[k] = v
return s1
def ER_figure_beta3(N, m, mu, betas, iss_states, labels, stationary_beta=0.35,
pickle_filename="figure_beta3.pickle"):
"""Varying Beta, three dimensional example"""
ss = []
plot_data = [[] for _ in range(len(iss_states))]
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as f:
plot_data = pickle.load(f)
else:
for beta in betas:
print(beta)
e, s = compute_entropy_rate(
N=N, m=m, n=3, beta=beta, exact=False, mu=mu, lim=1e-10)
ss.append(s)
for i, iss_state in enumerate(iss_states):
s_max = s[iss_state]
plot_data[i].append((e, s_max, e / s_max))
with open(pickle_filename, 'wb') as f:
pickle.dump(plot_data, f)
gs = gridspec.GridSpec(3, 2)
ax1, ax2, ax3 = plot_data_sub(betas, plot_data, gs, labels=labels,
use_log=True, sci=False)
ax3.set_xlabel("Strength of selection $\\beta$")
ax2.legend(loc="upper right")
# Plot example stationary
ax4 = plt.subplot(gs[:, 1])
_, s = compute_entropy_rate(
N=N, m=m, n=3, beta=stationary_beta, exact=False, mu=mu, lim=1e-15)
_, tax = ternary.figure(ax=ax4, scale=N,)
tax.heatmap(s, cmap="jet", style="triangular")
tax.ticks(axis='lbr', linewidth=1, multiple=10, offset=0.015)
tax.clear_matplotlib_ticks()
ax4.set_xlabel("Population States $a_1 + a_2 + a_3 = N$")
# tax.left_axis_label("$a_1$")
# tax.right_axis_label("$a_2$")
# tax.bottom_axis_label("$a_3$")
def ER_figure_N(Ns, m, beta=1, labels=None):
"""Varying population size."""
ss = []
plot_data = [[] for _ in range(3)]
n = len(m[0])
for N in Ns:
print(N)
mu = 1 / N
norm = float(scipy.misc.comb(N+n, n))
e, s = compute_entropy_rate(
N=N, m=m, n=3, beta=beta, exact=False, mu=mu, lim=1e-10)
ss.append(s)
iss_states = [(N, 0, 0), (N / 2, N / 2, 0), (N / 3, N / 3, N / 3)]
for i, iss_state in enumerate(iss_states):
s_max = s[iss_state]
plot_data[i].append((e, s_max, e / (s_max * norm)))
# Plot data
gs = gridspec.GridSpec(3, 1)
ax1, ax2, ax3 = plot_data_sub(Ns, plot_data, gs, labels, use_log=True, sci=False)
ax2.legend(loc="upper right")
ax3.set_xlabel("Population Size $N$")
def ER_figure_mu(N, mus, m, iss_states, labels, beta=1.,
pickle_filename="figure_mu.pickle"):
"""
Plot entropy rates and trajectory entropies for varying mu.
"""
# Compute the data
ss = []
plot_data = [[] for _ in range(len(iss_states))]
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as f:
plot_data = pickle.load(f)
else:
for mu in mus:
print(mu)
e, s = compute_entropy_rate(
N=N, m=m, n=3, beta=beta, exact=False, mu=mu, lim=1e-10,
logspace=True)
ss.append(s)
for i, iss_state in enumerate(iss_states):
s_max = s[iss_state]
plot_data[i].append((e, s_max, e / s_max))
with open(pickle_filename, 'wb') as f:
pickle.dump(plot_data, f)
# Plot data
gs = gridspec.GridSpec(3, 1)
gs.update(hspace=0.5)
ax1, ax2, ax3 = plot_data_sub(mus, plot_data, gs, labels, use_log=True)
ax2.legend(loc="upper right")
ax3.set_xlabel("Mutation rate $\mu$")
if __name__ == '__main__':
fig_num = sys.argv[1]
if fig_num == "1":
## Figure 1
# Varying beta, two dimensional
N = 30
m = [[1, 2], [2, 1]]
betas = np.arange(0, 8, 0.2)
ER_figure_beta2(N, m, betas)
plt.tight_layout()
plt.show()
if fig_num == "2":
## Figure 2
# # Varying beta, three dimensional
N = 60
mu = 1. / N
m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
iss_states = [(N, 0, 0), (N / 2, N / 2, 0), (N / 3, N / 3, N / 3)]
labels = ["$v_0$", "$v_1$", "$v_2$"]
betas = np.arange(0.02, 0.6, 0.02)
ER_figure_beta3(N, m, mu, betas, iss_states, labels)
plt.show()
if fig_num == "3":
## Figure 3
# Varying mutation rate figure
N = 42
mus = np.arange(0.0001, 0.015, 0.0005)
m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
iss_states = [(N, 0, 0), (N / 2, N / 2, 0), (N / 3, N / 3, N / 3)]
labels = ["$v_0$: (42, 0, 0)", "$v_1$: (21, 21, 0)", "$v_2$: (14, 14, 14)"]
# labels = ["$v_0$", "$v_1$", "$v_2$"]
ER_figure_mu(N, mus, m, iss_states, labels, beta=1.)
plt.show()
if fig_num == "4":
## Figure 4
# Note: The RPS landscape takes MUCH longer to converge!
# Consider using the C++ implementation instead for larger N.
N = 120 # Manuscript uses 180
mu = 1. / N
m = incentives.rock_paper_scissors(a=-1, b=-1)
_, s = compute_entropy_rate(
N=N, m=m, n=3, beta=1.5, exact=False, mu=mu, lim=1e-16)
_, tax = ternary.figure(scale=N)
tax.heatmap(remove_boundary(s), cmap="jet", style="triangular")
tax.ticks(axis='lbr', linewidth=1, multiple=60)
tax.clear_matplotlib_ticks()
plt.show()
if fig_num == "5":
# ## Figure 5
# Varying Population Size
Ns = range(6, 6*6, 6)
m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
labels = ["$v_0$", "$v_1$", "$v_2$"]
ER_figure_N(Ns, m, beta=1, labels=labels)
plt.show()
| mit |
xubenben/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
pylayers/pylayers | pylayers/antprop/examples/ex_antenna2.py | 3 | 1396 | from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : display the 16 first
"""
filename = 'S1R1.mat'
A = Antenna(filename,directory='ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
#
# Calculate Vector Spherical Harmonics
#
A = vsh(A,dsf)
v = np.abs(A.C.Br.s1)
u = np.nonzero(v==v.max())
plt.figure(figsize=(15,15))
for l in range(16):
plt.subplot(4,4,l+1)
plt.plot(np.real(A.C.Br.s1[:,l,0]),np.imag(A.C.Br.s1[:,l,0]),'k')
plt.plot(np.real(A.C.Br.s1[:,l,1]),np.imag(A.C.Br.s1[:,l,1]),'b')
plt.plot(np.real(A.C.Br.s1[:,l,2]),np.imag(A.C.Br.s1[:,l,2]),'r')
plt.plot(np.real(A.C.Br.s1[:,l,2]),np.imag(A.C.Br.s1[:,l,2]),'g')
plt.axis([-0.6,0.6,-0.6,0.6])
plt.title('l='+str(l))
plt.show()
| mit |
yonglehou/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
UNR-AERIAL/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
AleKit/TFGDM17 | dm_spectra_f.py | 1 | 30644 | import numpy as np
import pylab as pl
import scipy as sp
import bisect
from scipy.interpolate import interp1d
from scipy.interpolate import spline
from matplotlib import pyplot as plt
import pyfits
pl.rcParams['figure.figsize'] = (10.0, 7.0)
pl.rcParams['font.size'] = 18
pl.rcParams['font.family'] = 'serif'
pl.rcParams['lines.linewidth'] = 3
pathforfigs ='/home/ale/TFGF/'
pathforaux='/home/ale/TFGF'
filename=pathforaux+'/CascadeSpectra/Spectra/AtProduction_gammas.dat'
path=pathforaux+"/sensitivities/"
#vts_file = np.genfromtxt(path+"Instrument/VERITAS_V6_std_50hr_5sigma_VERITAS2014_DiffSens.dat")
vts_file = np.genfromtxt(path+"Instrument/VERITAS_ICRC2015_envelope.dat")
magic_file = np.genfromtxt(path+"Instrument/MAGIC_DiffSensCU.dat")
hess_file_combined = np.genfromtxt(path+"Instrument/HESS_August2015_CT15_Combined_Std.dat")
hess_file_stereo = np.genfromtxt(path+"Instrument/HESS_August2015_CT15_Stereo_Std.dat")
hess_file_envelope = np.genfromtxt(path+"Instrument/HESS_ICRC2015_envelope.dat")
hawc_1yr_file = np.genfromtxt(path+"Instrument/HAWC300_1y_QuarterDecade_DiffSens.dat")
hawc_5yr_file = np.genfromtxt(path+"Instrument/HAWC300_5y_QuarterDecade_DiffSens.dat")
fermi_b0_file = np.genfromtxt(path+"Instrument/fermi_lat_pass8_l0_b0.dat")
fermi_b30_file = np.genfromtxt(path+"Instrument/fermi_lat_pass8_l0_b30.dat")
fermi_b90_file = np.genfromtxt(path+"Instrument/fermi_lat_pass8_l0_b90.dat")
hs_file = np.genfromtxt(path+"Instrument/hiscore.dat")
lhaaso_file = np.genfromtxt(path+"Instrument/lhaaso.dat")
cta_n_file = np.genfromtxt(path+"North/CTA-Performance-North-50h-DiffSens.txt",skip_header=9)
cta_s_file = np.genfromtxt(path+"South/CTA-Performance-South-50h-DiffSens.txt",skip_header=9)
Qe = 1.602176462e-19
TeV = 1
GeV = 1e-3 * TeV
MeV = 1e-6 * TeV
erg = 0.624151 * TeV
eV = 1e-9 * GeV
def Smooth(E, F):
logE = np.log10(E)
logF = np.log10(F)
logEnew = np.linspace(logE.min(), logE.max(), 300)
logF_smooth = spline(logE, logF, logEnew)
Enew = [10**x for x in logEnew]
F_smooth = [10**x for x in logF_smooth]
return (Enew, F_smooth)
def getMAGIC(magic_file, Escale = GeV, smooth=True):
x = 0.5*(magic_file[:,0] + magic_file[:,1]) * Escale
y = magic_file[:,2]
y_m = [y0 * 3.39e-11 * x0**(-2.51 - 0.21*np.log10(x0)) * x0 * x0 * 1e12 * Qe / 1e-7 for (x0, y0) in zip(x,y)]
if smooth:
return Smooth(x, y_m)
else:
return (x,y_m)
def getVERITAS(veritas_file, index=1, smooth=True):
x = veritas_file[:,0]
y = veritas_file[:,index]
y_m = [y0 * 3.269e-11 * x0**(-2.474 - 0.191*np.log10(x0)) * x0 * x0 * 1e12 * Qe / 1e-7 for (x0, y0) in zip(x,y)]
if smooth:
return Smooth(x, y_m)
else:
return (x,y_m)
def getHESS(hess_file, smooth=True):
x = hess_file[:,0]
y = hess_file[:,1]
x_m = [10**x0 for x0 in x]
y_m = [y0 * x0 * x0 * 1e12 * Qe / 1e-7 for (x0,y0) in zip(x_m,y)]
if smooth:
return Smooth(x_m, y_m)
else:
return (x_m,y_m)
def getHESSEnvelope(hess_file, smooth=True):
x = hess_file[:,0]
y = hess_file[:,1]
y_m = [y0 * x0 * x0 / erg for (x0,y0) in zip(x,y)]
if smooth:
return Smooth(x, y_m)
else:
return (x,y_m)
def getHAWCFermi(hfile, Escale = GeV, smooth=True):
# MeV scale for Fermi, GeV for HAWC
x = hfile[:,0]
y = hfile[:,1]
if smooth:
return Smooth(x * Escale, y)
else:
return (x * Escale,y)
def getCTA(ctafile, smooth=True):
x = (ctafile[:,0] + ctafile[:,1])/2.
y = ctafile[:,2]
if smooth:
return Smooth(x, y)
else:
return (x,y)
# Some useful units
#GeV = 1
#TeV = 1e3 * GeV
#erg = 624.15 * GeV
#eV = 1e-9 * GeV
def InterpolateTauEBL(E, redshift):
#filename = "/a/home/tehanu/santander/ebl/ebl_z%0.1f.dat" % redshift
filename = path + "ebl_z%0.1f.dat" % redshift
eblfile = np.genfromtxt(filename)
z = eblfile[:,0]
Egamma = eblfile[:,1] * TeV
Tau = eblfile[:,3]
EBLInterp = interp1d(np.log10(Egamma), np.log10(Tau), kind='linear')
TauValues = []
for i in range(len(E)):
if E[i] < Egamma[0]:
TauValues.append(np.log10(Tau[0]))
elif E[i] > Egamma[-1]:
TauValues.append(np.log10(Tau[-1]))
else:
TauValues.append(EBLInterp(np.log10(E[i])))
return [10**tau for tau in TauValues]
def SpectrumFlux(A, E, gamma, redshift = 0, Enorm = 1 * GeV, b = 0):
if redshift > 0:
tau = InterpolateTauEBL(E, redshift)
else:
tau = [0 for x in E]
opacity = [np.exp(-t) for t in tau]
return [A * (E0/Enorm)**(-gamma + b * np.log(E0/Enorm)) * exptau for (E0, exptau) in zip(E, opacity)]
def CrabSpectrumBroad(E):
C = -0.12
logf0 = -10.248
Eic = 48*GeV # GeV
a = 2.5
return E**(-2)* 10**(logf0+C*(np.abs(np.log10(E/Eic)))**a)
def SpectrumIntegrator(Ec, Ewidths, Flux):
nbins = 500
Ebins = np.logspace(np.log10(Emin), np.log10(Emax), nbins)
Ecenters = (Ebins[:-1]+Ebins[1:])/2
Flux = SpectrumFlux(A, Ecenters, gamma)
Ewidths = (Ebins[1:]-Ebins[:-1])
return (Ecenters, np.sum(Ewidths * Flux))
def SpectrumIntAboveEnergy(Ec, Ewidths, Flux):
prod = np.array([f * ew for (f, ew) in zip(Flux, Ewidths)])
return [np.sum(prod[bisect.bisect(Ec,En):]) / TeV for En in Ec]
def plotSensitivity(ax, filename, legend="", xscale = 1, yscale=1., color='black', ls='-', lw=3, multE=False, delim=',', inCU=False, CrabEscale = 1):
(Ec, flux) = plotCrabSpectrum(ax, scale=1, plot=False)
f = interp1d(Ec, flux)
if legend == "":
legend = filename
eblfile = np.genfromtxt(filename, delimiter=delim)
x = eblfile[:,0] * xscale
y = eblfile[:,1] * yscale
l = zip(x,y)
l.sort()
xx = [x for (x,y) in l]
yy = [y for (x,y) in l]
if inCU:
yy = f(xx) * yy * 1e-2
if multE:
zz = [xi * yi / TeV for (xi, yi) in zip(xx, yy)]
ax.loglog(xx, zz, label=legend, linewidth=lw, color=color, ls=ls)
else:
ax.loglog(xx,yy, label=legend, linewidth=lw, color=color, ls=ls)
e=1*GeV
e**2*CrabSpectrumBroad(e)
def plotIntegralSpectrum(ax, legend="", color='black', redshift=0, gamma=2, A=1e-8, Enorm = 1 * GeV, scale=1e-2, b = 0, fill=True, lwf=0.8, lwe=1, plot=True):
Emin = 0.1 * GeV
Emax = 1e8 * GeV
nbins = 1500
Ebins = np.logspace(np.log10(Emin), np.log10(Emax), nbins)
Ec = (Ebins[:-1]+Ebins[1:])/2
Ewidths = (Ebins[1:]-Ebins[:-1])
Flux = SpectrumFlux(A, Ec, gamma, redshift, Enorm, b)
IntFlux = SpectrumIntAboveEnergy(Ec, Ewidths, Flux)
if fill:
lowedge = [1e-16 for x in IntFlux]
if plot:
ax.fill_between(Ec,scale*Ec*IntFlux, lowedge, label="z = " + str(redshift),lw=0, alpha=0.08, color='#009933')
ax.loglog(Ec,scale*Ec*IntFlux,lw=lwf, color='#009933', ls='-',alpha=0.5)
return (Ec, scale*Ec*IntFlux)
else:
if plot:
ax.loglog(Ec,scale*Ec*IntFlux,lw=lwe, color=color, ls='--')
return (Ec, scale*Ec*IntFlux)
def plotSpectrum(ax, legend="", color='black', redshift=0, gamma=2, A=1e-8, Enorm = 1 * GeV, scale=1e-2, b = 0, fill=True, lwf=0.8, lwe=1, plot=True, fcolor='#009933', alpha=0.03):
Emin = 0.1 * GeV
Emax = 1e8 * GeV
nbins = 1500
Ebins = np.logspace(np.log10(Emin), np.log10(Emax), nbins)
Ec = (Ebins[:-1]+Ebins[1:])/2
Ewidths = (Ebins[1:]-Ebins[:-1])
Flux = SpectrumFlux(A, Ec, gamma, redshift, Enorm, b)
if fill:
lowedge = [1e-16 for x in Flux]
if plot:
ax.fill_between(Ec,scale*Ec*Ec*Flux, lowedge, label="z = " + str(redshift),lw=0, alpha=alpha, color=fcolor)
ax.loglog(Ec,scale*Ec*Ec*Flux,lw=lwf, color=color, ls='-',alpha=0.5)
return (Ec, scale*Ec*Ec*Flux)
else:
if plot:
ax.loglog(Ec,scale*Ec*Ec*Flux,lw=lwe, color=color, ls='--')
return (Ec, scale*Ec*Ec*Flux)
def plotCrabSpectrumBroad(ax, legend="", color='black', scale=1, fill=True, lwf=0.8, lwe=1, plot=True, fcolor='grey', alpha=0.03):
Emin = 0.1 * GeV
Emax = 1e8 * GeV
nbins = 1500
Ebins = np.logspace(np.log10(Emin), np.log10(Emax), nbins)
Ec = (Ebins[:-1]+Ebins[1:])/2
Flux = CrabSpectrumBroad(Ec)
if fill:
lowedge = [1e-16 for x in Flux]
if plot:
ax.fill_between(Ec,scale*Ec*Ec*Flux, lowedge, lw=0, alpha=alpha, color=fcolor)
ax.loglog(Ec,scale*Ec*Ec*Flux,lw=lwf, color=color, ls='-',alpha=0.5)
return (Ec, scale*Ec*Ec*Flux)
else:
if plot:
ax.loglog(Ec,scale*Ec*Ec*Flux,lw=lwe, color=color, ls='--')
return (Ec, scale*Ec*Ec*Flux)
Ns=1e3
fullsky = 4 * np.pi
def getDMspectrum(option='e',finalstate='b',mass=1000,Jfactor=1.7e19,boost=1):
#Options:
# e: outputs (E, dN/dE)
# e2: outputs (E, E**2 dN/dE)
# x: outputs (x,dN/dx)
# mass in GeV
# Jfactor in GeV2cm-5
sigmav=3*1e-26 # annihilation cross section in cm3s-1
data = np.genfromtxt (filename, names = True ,dtype = None,comments='#')
massvals = data["mDM"]
index = np.where(np.abs( (massvals - mass) / mass) < 1.e-3)
xvals = 10**(data["Log10x"][index])
def branchingratios(m_branon): #<sigmav>_particle / <sigmav>_total
#PhysRevD.68.103505
m_top = 172.44
m_W = 80.4
m_Z = 91.2
m_h = 125.1
m_c = 1.275
m_b = 4.18
m_tau = 1.7768
if m_branon > m_top:
c_0_top = 3.0 / 16 * m_branon ** 2 * m_top ** 2 * (m_branon ** 2 - m_top ** 2) * (1 - m_top ** 2 / m_branon ** 2) ** (1.0 / 2)
else:
c_0_top = 0
if m_branon > m_Z:
c_0_Z = 1.0 / 64 * m_branon ** 2 * (1 - m_Z ** 2 / m_branon ** 2) ** (1.0 / 2) * (4 * m_branon ** 4 - 4 * m_branon ** 2 * m_Z ** 2 + 3 * m_Z ** 4)
else:
c_0_Z = 0
if m_branon > m_W:
c_0_W = 2.0 / 64 * m_branon ** 2 * (1 - m_W ** 2 / m_branon ** 2) ** (1.0 / 2) * (4 * m_branon ** 4 - 4 * m_branon ** 2 * m_W ** 2 + 3 * m_W ** 4)
else:
c_0_W = 0
if m_branon > m_h:
c_0_h = 1.0 / 64 * m_branon ** 2 * (2 * m_branon ** 2 + m_h ** 2) ** 2 * (1 - m_h ** 2 / m_branon ** 2) ** (1.0 / 2)
else:
c_0_h = 0
if m_branon > m_c:
c_0_c = 3.0 / 16 * m_branon ** 2 * m_c ** 2 * (m_branon ** 2 - m_c ** 2) * (1 - m_c ** 2 / m_branon ** 2) ** (1.0 / 2)
else:
c_0_c = 0
if m_branon > m_b:
c_0_b = 3.0 / 16 * m_branon ** 2 * m_b ** 2 * (m_branon ** 2 - m_b ** 2) * (1 - m_b ** 2 / m_branon ** 2) ** (1.0 / 2)
else:
c_0_b = 0
if m_branon > m_tau:
c_0_tau = 1.0 / 16 * m_branon ** 2 * m_tau ** 2 * (m_branon ** 2 - m_tau ** 2) * (1 - m_tau ** 2 / m_branon ** 2) ** (1.0 / 2)
else:
c_0_tau = 0
c_0_T = c_0_top + c_0_Z + c_0_W + c_0_h + c_0_c + c_0_b + c_0_tau
br_t = (c_0_top / c_0_T)
br_Z = c_0_Z / c_0_T
br_W = c_0_W / c_0_T
br_h = c_0_h / c_0_T
br_c = c_0_c / c_0_T
br_b = c_0_b / c_0_T
br_tau = c_0_tau / c_0_T
#f.append((c_0_T/(3*10**(-26)*math.pi**2))**(1./8))
return {'masas': m_branon, 't': br_t, 'Z': br_Z, 'W': br_W, 'h': br_h, 'c': br_c, 'b': br_b, 'Tau': br_tau}
#tau name modified in AtProduction_Gammas.dat
if finalstate == "new":
di = branchingratios(mass)
flux_c = data[di.keys()[1]][index]/(np.log(10)*xvals)
flux_tau = data[di.keys()[2]][index]/(np.log(10)*xvals)
flux_b = data[di.keys()[3]][index]/(np.log(10)*xvals)
flux_t = data[di.keys()[4]][index]/(np.log(10)*xvals)
flux_W = data[di.keys()[5]][index]/(np.log(10)*xvals)
flux_Z = data[di.keys()[7]][index]/(np.log(10)*xvals)
flux_h = data[di.keys()[6]][index]/(np.log(10)*xvals)
loadspec_h = interp1d(xvals,flux_h)
loadspec_Z = interp1d(xvals,flux_Z)
loadspec_t = interp1d(xvals,flux_t)
loadspec_W = interp1d(xvals,flux_W)
loadspec_b = interp1d(xvals,flux_b)
loadspec_c = interp1d(xvals,flux_c)
loadspec_tau = interp1d(xvals,flux_tau)
else:
flux = data[finalstate][index]/(np.log(10)*xvals) #data is given in dN/d(log10(X)) = x ln10 dN/dx
#flux = data[finalstate][index]
loadspec = interp1d(xvals,flux)
def dNdx(x):
fluxval = loadspec(x)
if (x>1 or fluxval<0):
return 0
else:
return fluxval
def dNdx_new(x,di):
fluxval_h = loadspec_h(x)
if (x>1 or fluxval_h<0):
fluxval_h = 0
fluxval_Z = loadspec_Z(x)
if (x>1 or fluxval_Z<0):
fluxval_Z = 0
fluxval_t = loadspec_t(x)
if (x>1 or fluxval_t<0):
fluxval_t = 0
fluxval_W = loadspec_W(x)
if (x>1 or fluxval_W<0):
fluxval_W = 0
fluxval_b = loadspec_b(x)
if (x>1 or fluxval_b<0):
fluxval_b = 0
fluxval_c = loadspec_c(x)
if (x>1 or fluxval_c<0):
fluxval_c = 0
fluxval_tau = loadspec_tau(x)
if (x>1 or fluxval_tau<0):
fluxval_tau = 0
return (di.values()[1]*fluxval_c + di.values()[2]*fluxval_tau +
di.values()[3]*fluxval_b + di.values()[4]*fluxval_t +
di.values()[5]*fluxval_W + di.values()[7]*fluxval_Z +
di.values()[6]*fluxval_h)
vdNdx = []
x2vdNdx = []
dNde = []
e2dNde = []
evals = []
xvals2 = [] #aportacion mia
if option is 'e': #and boost > 1:
#if mass == 5000:
sigmavboost = sigmav * boost #no era necesario
file1 = open("tabla"+str(mass)+str(finalstate)+str(sigmavboost)+".txt","w")
logxvalsnew = np.linspace(-8.9,0,10000)
xvalsnew = 10**logxvalsnew
for i in range(len(xvalsnew)):
x=xvalsnew[i]
xvals2.append(x) #aportacion mia
#vdNdx.append(dNdx(x))
#x2vdNdx.append(x**2*dNdx(x))
#dNde.append(dNdx(x)*Jfactor*GeV**2*sigmav*boost/(8*np.pi*(mass*GeV)**3))
#e2dNde.append((1/erg)*x**2*dNdx(x)*Jfactor*GeV**2*sigmav*boost/(8*np.pi*mass*GeV))
if finalstate == 'new':
aux = dNdx_new(x,di)
else:
aux = dNdx(x)
vdNdx.append(aux)
x2vdNdx.append(x**2*aux)
dNdeaux = aux*Jfactor*GeV**2*sigmav*boost/(8*np.pi*(mass*GeV)**3)
dNde.append(dNdeaux)
e2dNde.append((1/erg)*x**2*aux*Jfactor*GeV**2*sigmav*boost/(8*np.pi*mass*GeV))
evals.append(x*mass*GeV)
if option is 'e': #and boost > 1:
#if mass == 5000 and dNdeaux != 0:
if dNdeaux != 0:
file1.write(str(x*mass*10**3) + " " + str(dNdeaux/(10**6)) + "\n")
#print i
#print(option, boost, mass, x*mass*10**3, dNdeaux/(10**6))
#print(x, vdNdx[i], evals[i], e2dNde[i])
# if x == 1:
# break
if option is 'e':
#if mass == 5000 and boost > 1:
file1.write(str(x*mass*10**3+1) + " " + "1e-99" + "\n")
file1.write(str(x*mass*10**3+5) + " " + "1e-99" + "\n")
file1.write(str(x*mass*10**3+10) + " " + "1e-99" + "\n")
file1.close()
return (evals,dNde)
if option is 'e2':
return (evals,e2dNde)
if option is 'x':
return (xvals2,vdNdx)
if option is 'x2':
return (xvals2,x2vdNdx)
else:
print('Option '+str(option)+' not supported')
fig=pl.figure(figsize=(15,10))
ax=fig.add_subplot(221)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(1e-7, 1)
ax.set_ylim(1e-7,1e6)
#ax.set_xlim(1e-5, 1)
#ax.set_ylim(1e-2,1e3)
ax.set_xlabel('$x$')
ax.set_ylabel('$dN/dx$')
(Edm,Fdm) = getDMspectrum('x','new',50)
ax.plot(Edm, Fdm, label="m = 0.05 TeV", color='red', linewidth=1)
(Edm,Fdm) = getDMspectrum('x','new',100)
ax.plot(Edm, Fdm, label="m = 0.1 TeV", color='blue', linewidth=1)
(Edm,Fdm) = getDMspectrum('x','new',150)
ax.plot(Edm, Fdm, label="m = 0.15 TeV", color='green', linewidth=1)
(Edm,Fdm) = getDMspectrum('x','new',250)
ax.plot(Edm, Fdm, label="m = 0.25 TeV", color='pink', linewidth=1)
(Edm,Fdm) = getDMspectrum('x','new',500)
ax.plot(Edm, Fdm, label="m = 0.5 TeV", color='#00CCFF', linewidth=1)
(Edm,Fdm) = getDMspectrum('x','new',1000)
ax.plot(Edm, Fdm, label="m = 1 TeV", color='#FF66FF', linewidth=1)
(Edm,Fdm) = getDMspectrum('x','new',5000)
ax.plot(Edm, Fdm, label="m = 5 TeV", color='#CC0066', linewidth=1)
(Edm,Fdm) = getDMspectrum('x','new',10000)
ax.plot(Edm, Fdm, label="m = 10 TeV", color='orange', linewidth=1)
(Edm,Fdm) = getDMspectrum('x','new',50000)
ax.plot(Edm, Fdm, label="m = 50 TeV", color='purple', linewidth=1)
plt.legend(loc=3, prop={'size':12})
ax=fig.add_subplot(223)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(1e-7, 1)
ax.set_ylim(1e-7,1)
ax.set_xlabel('$x$')
ax.set_ylabel('$x^2 dN/dx$')
#(Edm,Fdm) = getDMspectrum('x2','b',10000)
#ax.plot(Edm, Fdm, label="DM", color='pink', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',50)
ax.plot(Edm, Fdm, label="m = 0.05 TeV", color='red', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',100)
ax.plot(Edm, Fdm, label="m = 0.1 TeV", color='blue', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',150)
ax.plot(Edm, Fdm, label="m = 0.15 TeV", color='green', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',250)
ax.plot(Edm, Fdm, label="m = 0.25 TeV", color='pink', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',500)
ax.plot(Edm, Fdm, label="m = 0.5 TeV", color='#00CCFF', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',1000)
ax.plot(Edm, Fdm, label="m = 1 TeV", color='#FF66FF', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',5000)
ax.plot(Edm, Fdm, label="m = 5 TeV", color='#CC0066', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',10000)
ax.plot(Edm, Fdm, label="m = 10 TeV", color='orange', linewidth=1)
(Edm,Fdm) = getDMspectrum('x2','new',50000)
ax.plot(Edm, Fdm, label="m = 50 TeV", color='purple', linewidth=1)
plt.legend(loc=2, prop={'size':12})
ax=fig.add_subplot(222)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(2e-4, 60)
ax.set_ylim(5e-22,1e-5)
ax.set_xlabel('$E$ [TeV]')
ax.set_ylabel('$dN/dE$ [cm$^{-2}$ s$^{-1}$ TeV$^{-1}$]')
#(Edm,Fdm) = getDMspectrum('e','b',10)
#ax.plot(Edm, Fdm, label="DM", color='red', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','new',50)
ax.plot(Edm, Fdm, label="m = 0.05 TeV", color='red', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','new',100)
ax.plot(Edm, Fdm, label="m = 0.1 TeV", color='blue', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','new',150)
ax.plot(Edm, Fdm, label="m = 0.15 TeV", color='green', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','new',250)
ax.plot(Edm, Fdm, label="m = 0.25 TeV", color='pink', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','new',500)
ax.plot(Edm, Fdm, label="m = 0.5 TeV", color='#00CCFF', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','new',1000)
ax.plot(Edm, Fdm, label="m = 1 TeV", color='#FF66FF', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','b',5000,boost=4e4) #######
(Edm,Fdm) = getDMspectrum('e','Tau',5000,boost=2e4) #######
(Edm,Fdm) = getDMspectrum('e','W',5000,boost=4e4) #######
(Edm,Fdm) = getDMspectrum('e','new',5000)
ax.plot(Edm, Fdm, label="m = 5 TeV", color='#CC0066', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','new',10000)
ax.plot(Edm, Fdm, label="m = 10 TeV", color='orange', linewidth=1)
(Edm,Fdm) = getDMspectrum('e','new',50000)
ax.plot(Edm, Fdm, label="m = 50 TeV", color='purple', linewidth=1)
plt.legend(loc=3, prop={'size':12})
ax=fig.add_subplot(224)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(2e-4, 60)
ax.set_ylim(5e-22,1e-12)
ax.set_xlabel('$E$ [TeV]')
ax.set_ylabel('$E^2 dN/dE$ [erg cm$^{-2}$ s$^{-1}$]')
#(Edm,Fdm) = getDMspectrum('e2','b',10)
#ax.plot(Edm, Fdm, label="DM", color='red', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',50)
ax.plot(Edm, Fdm, label="m = 0.05 TeV", color='red', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',100)
ax.plot(Edm, Fdm, label="m = 0.1 TeV", color='blue', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',150)
ax.plot(Edm, Fdm, label="m = 0.15 TeV", color='green', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',250)
ax.plot(Edm, Fdm, label="m = 0.25 TeV", color='pink', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',500)
ax.plot(Edm, Fdm, label="m = 0.5 TeV", color='#00CCFF', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',1000)
ax.plot(Edm, Fdm, label="m = 1 TeV", color='#FF66FF', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',5000)
ax.plot(Edm, Fdm, label="m = 5 TeV", color='#CC0066', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',10000)
ax.plot(Edm, Fdm, label="m = 10 TeV", color='orange', linewidth=1)
(Edm,Fdm) = getDMspectrum('e2','new',50000)
ax.plot(Edm, Fdm, label="m = 50 TeV", color='purple', linewidth=1)
plt.legend(loc=3, prop={'size':12})
#plt.show()
fig=pl.figure()
ax=fig.add_subplot(111)
legends = []
ctain=True
(Emagic, Fmagic) = getMAGIC(magic_file)
ax.plot(Emagic, Fmagic, label="MAGIC", color='#A20025', linewidth=0.7)
#(Evts, Fvts) = getVERITAS(vts_file,index=1)
#vtsplot, = ax.plot(Evts, Fvts, label="VERITAS (50 hr)", color='red', linewidth=2)
#(Ehess, Fhess) = getHESS(hess_file_combined)
#ax.plot(Ehess, Fhess, label="HESS", color='#0050EF')
#(Ehess, Fhess) = getHESS(hess_file_stereo)
#ax.plot(Ehess, Fhess, label="HESS", color="#1BA1E2")
#(Ehess, Fhess) = getHESSEnvelope(hess_file_envelope)
#ax.plot(Ehess, Fhess, label="H.E.S.S.", color="#F0A30A",linewidth=4)
#(Ehawc, Fhawc) = getHAWCFermi(hawc_1yr_file)
#hawcplot1, = ax.plot(Ehawc, Fhawc, label="HAWC-300 - 1yr", color='#008A00', linewidth=2)
#(Ehawc, Fhawc) = getHAWCFermi(hawc_5yr_file)
#hawcplot2, = ax.plot(Ehawc, Fhawc, label="HAWC-300 - 5yr", color='#A4C400', linewidth=2)
#(Efermi, Ffermi) = getHAWCFermi(fermi_b0_file, Escale=MeV)
#ax.plot(Efermi, Ffermi, label="Fermi - b0", color='#bbbbbb')
#(Efermi, Ffermi) = getHAWCFermi(fermi_b30_file, Escale=MeV)
#ax.plot(Efermi, Ffermi, label="Fermi-LAT ($b = 30^{\circ})$ ", color='#00ABA9')
(Efermi, Ffermi) = getHAWCFermi(fermi_b90_file, Escale=MeV)
fermiplot, = ax.plot(Efermi, Ffermi, label="LAT (Pass8) - 10yr", color="#1BA1E2", linewidth=0.7)
#(Ehs, Fhs) = getHAWCFermi(hs_file, Escale=TeV)
#ax.plot(Ehs, Fhs, label="HiSCORE", color="#AA00FF", linestyle='-',linewidth=0.7)
#(Ehs, Fhs) = getHAWCFermi(lhaaso_file, Escale=TeV)
#ax.plot(Ehs, Fhs, label="LHAASO", color="#0050EF", linestyle='-',linewidth=0.7)
(Ecta, Fcta) = getCTA(cta_n_file)
ax.plot(Ecta, Fcta, label="CTA (50h, North)", linestyle='-', color='goldenrod',linewidth=1)
if ctain:
(Ecta, Fcta) = getCTA(cta_s_file)
ctaplot, = ax.plot(Ecta, Fcta, label="CTA (50h, South)", linestyle='-', color='#825A2C',linewidth=1)
#### Fermi IGRB ####
#figrb = np.genfromtxt(pathforaux+"/igrb.dat")
#Emean = 1000*(figrb[:,0] + figrb[:,1])/2.
#Ewidth = (figrb[:,1]-figrb[:,0]) * 1000
#Figrb = [4 * np.pi * (F/Ew) * scale * 1.60218e-6 * 1e-3 * E**2 for (F, scale, E, Ew) in zip(figrb[:,2], figrb[:,5], Emean, Ewidth)]
#Figrb_err = [4 * np.pi * (F_err/Ew) * 1.60218e-6 * 1e-3 * scale * E**2 for (F_err, scale, E, Ew) in zip(figrb[:,3], figrb[:,5], Emean, Ewidth)]
#Figrb_err[-1] = 3e-14
#Flims = figrb[:,3] < 1e-3
#DNC ax.errorbar(Emean/1e6, Figrb, yerr=Figrb_err, xerr=Ewidth/2e6, marker='o',linestyle='',ecolor='red',color='red',mec='red',ms=3,uplims=Flims,capsize=3, linewidth=1)
#DNC ax.fill_between(Emean/1e6, [mean - err for (mean,err) in zip(Figrb, Figrb_err)], [mean + err for (mean,err) in zip(Figrb, Figrb_err)], zorder=0, alpha=0.5, color="#cccccc")
#ax.set_yscale('log')
#ax.set_xscale('log')
#ax.grid('on')
#if ctain:
# first_legend = plt.legend(handles=[fermiplot,vtsplot,hawcplot1,hawcplot2,ctaplot], loc='upper left', bbox_to_anchor=(1.01, 1),fontsize=12)
#else:
# first_legend = plt.legend(handles=[fermiplot,vtsplot,hawcplot1,hawcplot2], loc='upper left', bbox_to_anchor=(1.01, 1),fontsize=12)
#ax.add_artist(first_legend)
#legends.append(first_legend)
#ax.legend(bbox_to_anchor=(1.05, 1), ncol=1, loc=2, fontsize=14)
#ax.plot(hess_file[:,0], hess_file[:,1])
#ax.set_yscale('log')
#ax.set_xscale('log')
#ax.set_xlim(2e-4, 40)
#ax.set_ylim(4e-14,1e-10)
#ax.set_ylim(5e-14,1e-9)
#ax.set_xlabel('$E$ [TeV]')
#ax.set_ylabel('$E^2 d\Phi/dE$ [erg cm$^{-2}$ s$^{-1}$]')
#plotCrabSpectrum(ax, scale=1./(erg*GeV), fill=True, lwf=0.3, fcolor='#009933', alpha=0.05)
#plotCrabSpectrum(ax, scale=1e-1/(erg*GeV), fill=True, lwf=0.3, fcolor='#009933', alpha=0.05)
#plotCrabSpectrum(ax, scale=1e-2/(erg*GeV), fill=True, lwf=0.3, fcolor='#009933', alpha=0.05)
#plotCrabSpectrum(ax, scale=1e-3/(erg*GeV), fill=True, lwf=0.3, fcolor='#009933', alpha=0.05)
# Global fit nu
gamma=2.5
A0= (2/3.) * 6.7e-18 * fullsky / (erg*GeV)
Enorm = 100 * TeV
gamma=2.3
A0=1.5e-18 * fullsky / (GeV*erg)
Enorm = 100 * TeV
#gamma_wb = 2
#A_wb = 1e-8 * fullsky / GeV*erg
#Enorm_wb = 1 * GeV
#gamma=2.0
#A0=1.5e-18 * fullsky / (erg*GeV)
#Enorm = 100 * TeV
#plotSpectrum(ax, color='#009933',redshift=0, scale=1/Ns, gamma=gamma, A=A0, Enorm=Enorm, fcolor='#009933', alpha=0.1)
#plotSpectrum(ax, color='#009933',redshift=0.5, scale=1/Ns, gamma=gamma_wb, A=A_wb, Enorm=Enorm_wb, alpha=0.1, fcolor='#009933')
#plotSpectrum(ax, color='#666666',redshift=0.5, scale=1/Ns, gamma=gamma, A=A0, Enorm=Enorm)
#plotSpectrum(ax, color='#999999',redshift=1, scale=1/Ns, gamma=gamma, A=A0, Enorm=Enorm)
#plotSpectrum(ax, color='#000000',redshift=0, scale=1/Ns, gamma=gamma, A=A0, Enorm=Enorm)
#plotSpectrum(ax, color='#333333',redshift=0.1, scale=1/Ns, gamma=gamma, A=A0, Enorm=Enorm)
#plotSpectrum(ax, color='#666666',redshift=0.5, scale=1/Ns, gamma=gamma, A=A0, Enorm=Enorm)
#plotSpectrum(ax, color='#999999',redshift=1, scale=1/Ns, gamma=gamma, A=A0, Enorm=Enorm)
myalpha=0.015
myfcolor='blue'
mycolor='grey'
annotE=0.5*GeV
myfontsize=10
myrot=30
if 1:
plotCrabSpectrumBroad(ax,color=mycolor,scale=1/erg,alpha=myalpha,fcolor=myfcolor)
ax.annotate('Crab', xy=(annotE,2*annotE**2*CrabSpectrumBroad(annotE)), xycoords='data',
horizontalalignment='center', verticalalignment='center',fontsize=myfontsize,rotation=myrot)
if 1:
plotCrabSpectrumBroad(ax,color=mycolor,scale=1e-1/erg,alpha=myalpha,fcolor=myfcolor)
ax.annotate('10% Crab', xy=(annotE,2e-1*annotE**2*CrabSpectrumBroad(annotE)), xycoords='data',
horizontalalignment='center', verticalalignment='center',fontsize=myfontsize,rotation=myrot)
if 1:
plotCrabSpectrumBroad(ax,color=mycolor,scale=1e-2/erg,alpha=myalpha,fcolor=myfcolor)
ax.annotate('1% Crab', xy=(annotE,2e-2*annotE**2*CrabSpectrumBroad(annotE)), xycoords='data',
horizontalalignment='center', verticalalignment='center',fontsize=myfontsize,rotation=myrot)
if 1:
plotCrabSpectrumBroad(ax,color=mycolor,scale=1e-3/erg,alpha=myalpha,fcolor=myfcolor)
ax.annotate('0.1% Crab', xy=(annotE,2e-3*annotE**2*CrabSpectrumBroad(annotE)), xycoords='data',
horizontalalignment='center', verticalalignment='center',fontsize=myfontsize,rotation=myrot)
if 1:
hdulist1 = pyfits.open('spectrumdmbboost.fits')
hdulist1.info()
datos = hdulist1[1].data
energ = datos['Energy']
ed_energ = datos['ed_Energy']
eu_energ = datos['eu_Energy']
flux = datos['Flux']
e_flux = datos['e_Flux']
plt.errorbar(energ, flux, xerr=(ed_energ, eu_energ), yerr = e_flux, color = 'red', marker = 'o', label = r'spectrum $b\bar b$', fmt = '', zorder = 0)
hdulist1.close()
hdulist2 = pyfits.open('spectrumdmWboost.fits')
hdulist2.info()
datos = hdulist2[1].data
energ = datos['Energy']
ed_energ = datos['ed_Energy']
eu_energ = datos['eu_Energy']
flux = datos['Flux']
e_flux = datos['e_Flux']
plt.errorbar(energ, flux, xerr=(ed_energ, eu_energ), yerr = e_flux, color = 'green', marker = 'o', label = 'spectrum $W^+ W^-$', fmt = '', zorder = 0)
hdulist2.close()
mylinestyle='--'
#mymass=3
myboost=1
(Edm,Fdm) = getDMspectrum('e2','b',5e3,boost=myboost)
dmplot1 = ax.plot(Edm, Fdm, label=r"$m_\chi$ = "+str(5)+r" TeV ($b\bar b$, B$_f$=1e"+str("{:.1f}".format(np.log10(myboost)))+")", color='red', linewidth=1,linestyle=mylinestyle)
(Edm,Fdm) = getDMspectrum('e2','Tau',5e3,boost=myboost)
dmplot2 = ax.plot(Edm, Fdm, label=r"$m_\chi$ = "+str(5)+r" TeV ($\tau^- \tau^+$, B$_f$=1e"+str("{:.1f}".format(np.log10(myboost)))+")", color='blue', linewidth=1,linestyle=mylinestyle)
(Edm,Fdm) = getDMspectrum('e2','W',5e3,boost=myboost)
dmplot3 = ax.plot(Edm, Fdm, label=r"$m_\chi$ = "+str(5)+r" TeV ($W^+ W^-$, B$_f$=1e"+str("{:.1f}".format(np.log10(myboost)))+")", color='green', linewidth=1,linestyle=mylinestyle)
myboost2 = 2e4
myboost3 = 4e4
(Edm,Fdm) = getDMspectrum('e2','b',5e3,boost=myboost3)
dmplot4 = ax.plot(Edm, Fdm, label=r"$m_\chi$ = "+str(5)+r" TeV ($b\bar b$, B$_f$=1e"+str("{:.1f}".format(np.log10(myboost2)))+")", color='pink', linewidth=1,linestyle=mylinestyle)
(Edm,Fdm) = getDMspectrum('e2','Tau',5e3,boost=myboost2)
dmplot5 = ax.plot(Edm, Fdm, label=r"$m_\chi$ = "+str(5)+r" TeV ($\tau^- \tau^+$, B$_f$=1e"+str("{:.1f}".format(np.log10(myboost2)))+")", color='orange', linewidth=1,linestyle=mylinestyle)
(Edm,Fdm) = getDMspectrum('e2','W',5e3,boost=myboost3)
dmplot6 = ax.plot(Edm, Fdm, label=r"$m_\chi$ = "+str(5)+r" TeV ($W^+ W^-$, B$_f$=1e"+str("{:.1f}".format(np.log10(myboost3)))+")", color='purple', linewidth=1,linestyle=mylinestyle)
plt.legend(loc=4, prop={'size':9}) #aportacion mia
dmplots= []
dmplots.append(dmplot1)
dmplots.append(dmplot2)
dmplots.append(dmplot3)
dmplots.append(dmplot4)
dmplots.append(dmplot5)
dmplots.append(dmplot6)
ax.set_xlim(1e-4, 1e3)
ax.set_ylim(1e-16,1e-10)
ax.set_xlabel('$E$ [TeV]')
ax.set_ylabel('$E^2 dN/dE$ [erg cm$^{-2}$ s$^{-1}$]')
#second_legend = plt.legend(handles=dmplots, ncol=1, loc='upper left',bbox_to_anchor=(1.01, .25), fontsize=12)
#ax.add_artist(second_legend)
#legends.append(second_legend)
#dummy = []
#aux_legend = plt.legend(handles=dummy,bbox_to_anchor=(1.5, .2),frameon=False)
#legends.append(aux_legend)
#fig.savefig(pathforfigs+'ic_sensitivities_cta_dm_'+str(mymass)+'_'+str(myboost)+'.pdf',bbox_extra_artists=legends,bbox_inches='tight')
plt.show() #aportacion mia
fig.savefig(pathforfigs+'ic_sensitivities_prueba.pdf',bbox_extra_artists=legends,bbox_inches='tight')
| gpl-3.0 |
nmayorov/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
ifuding/Kaggle | TCCC/Code/philly/PoolGRU.py | 1 | 10263 | import numpy as np
import pandas as pd
import sklearn
import tensorflow as tf
from sklearn import feature_extraction, ensemble, decomposition, pipeline
from sklearn.model_selection import KFold
# from textblob import TextBlob
from nfold_train import nfold_train, models_eval
import time
from time import gmtime, strftime
from tensorflow.python.keras.preprocessing.text import Tokenizer, text_to_word_sequence
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from data_helper import data_helper
import shutil
import os
from contextlib import contextmanager
# from nltk.stem import PorterStemmer
# ps = PorterStemmer()
import gensim
from CNN_Keras import CNN_Model, get_word2vec_embedding
zpolarity = {0:'zero',1:'one',2:'two',3:'three',4:'four',5:'five',6:'six',7:'seven',8:'eight',9:'nine',10:'ten'}
zsign = {-1:'negative', 0.: 'neutral', 1:'positive'}
flags = tf.app.flags
flags.DEFINE_string('input-training-data-path', "../../Data/", 'data dir override')
flags.DEFINE_string('output-model-path', ".", 'model dir override')
flags.DEFINE_string('model_type', "cnn", 'model type')
flags.DEFINE_integer('vocab_size', 300000, 'vocab size')
flags.DEFINE_integer('max_seq_len', 100, 'max sequence length')
flags.DEFINE_integer('nfold', 10, 'number of folds')
flags.DEFINE_integer('ensemble_nfold', 5, 'number of ensemble models')
flags.DEFINE_integer('emb_dim', 300, 'term embedding dim')
flags.DEFINE_string('rnn_unit', 0, 'RNN Units')
flags.DEFINE_integer('epochs', 1, 'number of Epochs')
flags.DEFINE_integer('batch_size', 128, 'Batch size')
flags.DEFINE_bool("load_wv_model", True, "Whether to load word2vec model")
flags.DEFINE_string('wv_model_type', "fast_text", 'word2vec model type')
flags.DEFINE_string('wv_model_file', "wiki.en.vec.indata", 'word2vec model file')
flags.DEFINE_bool("char_split", False, "Whether to split text into character")
flags.DEFINE_string('filter_size', 100, 'CNN filter size')
flags.DEFINE_bool("fix_wv_model", True, "Whether to fix word2vec model")
flags.DEFINE_integer('batch_interval', 1000, 'batch print interval')
flags.DEFINE_float("emb_dropout", 0, "embedding dropout")
flags.DEFINE_string('full_connect_hn', "64, 32", 'full connect hidden units')
flags.DEFINE_float("full_connect_dropout", 0, "full connect drop out")
flags.DEFINE_string('vdcnn_filters', "64, 128, 256", 'vdcnn filters')
flags.DEFINE_integer('vdcc_top_k', 1, 'vdcc top_k')
flags.DEFINE_bool("separate_label_layer", False, "Whether to separate label layer")
flags.DEFINE_bool("stem", False, "Whether to stem")
flags.DEFINE_bool("resnet_hn", False, "Whether to concatenate hn and rcnn")
flags.DEFINE_integer('letter_num', 3, 'letter number to aggregate')
flags.DEFINE_string('kernel_size_list', "1,2,3,4,5,6,7", 'kernel size list')
flags.DEFINE_float("rnn_input_dropout", 0, "rnn input drop out")
flags.DEFINE_float("rnn_state_dropout", 0, "rnn state drop out")
flags.DEFINE_bool("stacking", False, "Whether to stacking")
flags.DEFINE_bool("uniform_init_emb", False, "Whether to uniform init the embedding")
flags.DEFINE_bool("load_stacking_data", False, "Whether to load stacking data")
FLAGS = flags.FLAGS
def load_data():
train = pd.read_csv(FLAGS.input_training_data_path + '/train.csv') #.iloc[:200]
test = pd.read_csv(FLAGS.input_training_data_path + '/test.csv') #.iloc[:200]
# sub1 = pd.read_csv(data_dir + '/submission_ensemble.csv')
nrow = train.shape[0]
print("Train Size: {0}".format(nrow))
print("Test Size: {0}".format(test.shape[0]))
coly = [c for c in train.columns if c not in ['id','comment_text']]
print("Label columns: {0}".format(coly))
y = train[coly]
tid = test['id'].values
if FLAGS.load_stacking_data:
data_dir = "../../Data/2fold/"
svd_features = np.load(data_dir + 'svd.npy')
svd_train = svd_features[:nrow]
svd_test = svd_features[nrow:]
kf = KFold(n_splits=2, shuffle=False)
for train_index, test_index in kf.split(svd_train):
svd_train_part = svd_train[test_index]
break
train_data = np.load(data_dir + 'stacking_train_data.npy')
print(train_data.shape, svd_train_part.shape)
train_data = np.c_[train_data, svd_train_part]
train_label = np.load(data_dir + 'stacking_train_label.npy')
# train_data = train_data[:100]
# train_label = train_label[:100]
test_data = np.load(data_dir + 'stacking_test_data.npy')
emb_weight = None
else:
df = pd.concat([train['comment_text'], test['comment_text']], axis=0)
df = df.fillna("unknown")
data = df.values
# Text to sequence
@contextmanager
def timer(name):
"""
Taken from Konstantin Lopuhin https://www.kaggle.com/lopuhin
in script named : Mercari Golf: 0.3875 CV in 75 LOC, 1900 s
https://www.kaggle.com/lopuhin/mercari-golf-0-3875-cv-in-75-loc-1900-s
"""
t0 = time.time()
yield
print('[{0}] done in {1} s'.format(name, time.time() - t0))
with timer("Performing stemming"):
if FLAGS.stem:
# stem_sentence = lambda s: " ".join(ps.stem(word) for word in s.strip().split())
data = [gensim.parsing.stem_text(comment) for comment in data]
print('Tokenizer...')
if not FLAGS.char_split:
tokenizer = Tokenizer(num_words = FLAGS.vocab_size)
tokenizer.fit_on_texts(data)
data = tokenizer.texts_to_sequences(data)
data = pad_sequences(data, maxlen = FLAGS.max_seq_len)
if FLAGS.load_wv_model:
emb_weight = get_word2vec_embedding(location = FLAGS.input_training_data_path + FLAGS.wv_model_file, \
tokenizer = tokenizer, nb_words = FLAGS.vocab_size, embed_size = FLAGS.emb_dim, \
model_type = FLAGS.wv_model_type, uniform_init_emb = FLAGS.uniform_init_emb)
else:
if FLAGS.uniform_init_emb:
emb_weight = np.random.uniform(0, 1, (FLAGS.vocab_size, FLAGS.emb_dim))
else:
emb_weight = np.zeros((FLAGS.vocab_size, FLAGS.emb_dim))
else:
tokenizer = None
data_helper = data_helper(sequence_max_length = FLAGS.max_seq_len, \
wv_model_path = FLAGS.input_training_data_path + FLAGS.wv_model_file, \
letter_num = FLAGS.letter_num, emb_dim = FLAGS.emb_dim, load_wv_model = FLAGS.load_wv_model)
data, emb_weight, FLAGS.vocab_size = data_helper.text_to_triletter_sequence(data)
train_data, train_label = data[:nrow], y.values[:nrow]
test_data = data[nrow:]
return train_data, train_label, test_data, coly, tid, emb_weight
def sub(mdoels, stacking_data = None, stacking_label = None, stacking_test_data = None, test = None, \
scores_text = None, coly = None, tid = None, sub_re = None):
tmp_model_dir = "./model_dir/"
if not os.path.isdir(tmp_model_dir):
os.makedirs(tmp_model_dir, exist_ok=True)
if FLAGS.stacking:
np.save(os.path.join(tmp_model_dir, "stacking_train_data.npy"), stacking_data)
np.save(os.path.join(tmp_model_dir, "stacking_train_label.npy"), stacking_label)
np.save(os.path.join(tmp_model_dir, "stacking_test_data.npy"), stacking_test_data)
else:
sub2 = pd.DataFrame(np.zeros((test.shape[0], len(coly))), columns = coly)
if FLAGS.load_stacking_data:
sub2[coly] = sub_re
else:
sub2[coly] = models_eval(models, test)
sub2['id'] = tid
for c in coly:
sub2[c] = sub2[c].clip(0+1e12, 1-1e12)
blend = sub2 #blend[sub2.columns]
time_label = strftime('_%Y_%m_%d_%H_%M_%S', gmtime())
sub_name = tmp_model_dir + "sub" + time_label + ".csv"
blend.to_csv(sub_name, index=False)
scores_text_frame = pd.DataFrame(scores_text, columns = ["score_text"])
score_text_file = tmp_model_dir + "score_text" + time_label + ".csv"
scores_text_frame.to_csv(score_text_file, index=False)
scores = scores_text_frame["score_text"]
for i in range(FLAGS.epochs):
scores_epoch = scores.loc[scores.str.startswith('epoch:{0}'.format(i + 1))].map(lambda s: float(s.split()[1]))
print ("Epoch{0} mean:{1} std:{2} min:{3} max:{4} median:{5}".format(i + 1, \
scores_epoch.mean(), scores_epoch.std(), scores_epoch.min(), scores_epoch.max(), scores_epoch.median()))
if not os.path.isdir(FLAGS.output_model_path):
os.makedirs(FLAGS.output_model_path, exist_ok=True)
for fileName in os.listdir(tmp_model_dir):
dst_file = os.path.join(FLAGS.output_model_path, fileName)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(os.path.join(tmp_model_dir, fileName), FLAGS.output_model_path)
if __name__ == "__main__":
print("Training------")
scores_text = []
train_data, train_label, test_data, coly, tid, emb_weight = load_data()
sub_re = np.zeros((test_data.shape[0], len(coly)))
if not FLAGS.load_stacking_data:
# for i in range(train_label.shape[1]):
models, stacking_data, stacking_label, stacking_test_data = nfold_train(train_data, train_label, flags = FLAGS, \
model_types = [FLAGS.model_type], scores = scores_text, emb_weight = emb_weight, test_data = test_data)
#, valide_data = train_data, valide_label = train_label)
else:
for i in range(train_label.shape[1]):
models, stacking_data, stacking_label, stacking_test_data = nfold_train(train_data, train_label[:, i], flags = FLAGS, \
model_types = [FLAGS.model_type], scores = scores_text, emb_weight = emb_weight, test_data = test_data \
# , valide_data = train_data[:100], valide_label = train_label[:100, i]
)
sub_re[:, i] = models_eval(models, test_data)
sub(models, stacking_data = stacking_data, stacking_label = stacking_label, stacking_test_data = stacking_test_data, \
test = test_data, scores_text = scores_text, coly = coly, tid = tid, sub_re = sub_re) | apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.