repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
annayqho/TheCannon | code/aaomega/aaomega_munge_data.py | make_full_ivar | def make_full_ivar():
""" take the scatters and skylines and make final ivars """
# skylines come as an ivar
# don't use them for now, because I don't really trust them...
# skylines = np.load("%s/skylines.npz" %DATA_DIR)['arr_0']
ref_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0']
ref_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0']
test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0']
test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0']
ref_ivar = np.ones(ref_flux.shape) / ref_scat[:,None]**2
test_ivar = np.ones(test_flux.shape) / test_scat[:,None]**2
# ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines)
# test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines)
ref_bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1)
test_bad = np.logical_or(test_flux <= 0, test_flux > 1.1)
SMALL = 1.0 / 1000000000.0
ref_ivar[ref_bad] = SMALL
test_ivar[test_bad] = SMALL
np.savez("%s/ref_ivar_corr.npz" %DATA_DIR, ref_ivar)
np.savez("%s/test_ivar_corr.npz" %DATA_DIR, test_ivar) | python | def make_full_ivar():
""" take the scatters and skylines and make final ivars """
# skylines come as an ivar
# don't use them for now, because I don't really trust them...
# skylines = np.load("%s/skylines.npz" %DATA_DIR)['arr_0']
ref_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0']
ref_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0']
test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0']
test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0']
ref_ivar = np.ones(ref_flux.shape) / ref_scat[:,None]**2
test_ivar = np.ones(test_flux.shape) / test_scat[:,None]**2
# ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines)
# test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines)
ref_bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1)
test_bad = np.logical_or(test_flux <= 0, test_flux > 1.1)
SMALL = 1.0 / 1000000000.0
ref_ivar[ref_bad] = SMALL
test_ivar[test_bad] = SMALL
np.savez("%s/ref_ivar_corr.npz" %DATA_DIR, ref_ivar)
np.savez("%s/test_ivar_corr.npz" %DATA_DIR, test_ivar) | [
"def",
"make_full_ivar",
"(",
")",
":",
"# skylines come as an ivar",
"# don't use them for now, because I don't really trust them...",
"# skylines = np.load(\"%s/skylines.npz\" %DATA_DIR)['arr_0']",
"ref_flux",
"=",
"np",
".",
"load",
"(",
"\"%s/ref_flux_all.npz\"",
"%",
"DATA_DIR",
")",
"[",
"'arr_0'",
"]",
"ref_scat",
"=",
"np",
".",
"load",
"(",
"\"%s/ref_spec_scat_all.npz\"",
"%",
"DATA_DIR",
")",
"[",
"'arr_0'",
"]",
"test_flux",
"=",
"np",
".",
"load",
"(",
"\"%s/test_flux.npz\"",
"%",
"DATA_DIR",
")",
"[",
"'arr_0'",
"]",
"test_scat",
"=",
"np",
".",
"load",
"(",
"\"%s/test_spec_scat.npz\"",
"%",
"DATA_DIR",
")",
"[",
"'arr_0'",
"]",
"ref_ivar",
"=",
"np",
".",
"ones",
"(",
"ref_flux",
".",
"shape",
")",
"/",
"ref_scat",
"[",
":",
",",
"None",
"]",
"**",
"2",
"test_ivar",
"=",
"np",
".",
"ones",
"(",
"test_flux",
".",
"shape",
")",
"/",
"test_scat",
"[",
":",
",",
"None",
"]",
"**",
"2",
"# ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines)",
"# test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines)",
"ref_bad",
"=",
"np",
".",
"logical_or",
"(",
"ref_flux",
"<=",
"0",
",",
"ref_flux",
">",
"1.1",
")",
"test_bad",
"=",
"np",
".",
"logical_or",
"(",
"test_flux",
"<=",
"0",
",",
"test_flux",
">",
"1.1",
")",
"SMALL",
"=",
"1.0",
"/",
"1000000000.0",
"ref_ivar",
"[",
"ref_bad",
"]",
"=",
"SMALL",
"test_ivar",
"[",
"test_bad",
"]",
"=",
"SMALL",
"np",
".",
"savez",
"(",
"\"%s/ref_ivar_corr.npz\"",
"%",
"DATA_DIR",
",",
"ref_ivar",
")",
"np",
".",
"savez",
"(",
"\"%s/test_ivar_corr.npz\"",
"%",
"DATA_DIR",
",",
"test_ivar",
")"
] | take the scatters and skylines and make final ivars | [
"take",
"the",
"scatters",
"and",
"skylines",
"and",
"make",
"final",
"ivars"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/aaomega/aaomega_munge_data.py#L160-L183 |
annayqho/TheCannon | TheCannon/normalization.py | _sinusoid | def _sinusoid(x, p, L, y):
""" Return the sinusoid cont func evaluated at input x for the continuum.
Parameters
----------
x: float or np.array
data, input to function
p: ndarray
coefficients of fitting function
L: float
width of x data
y: float or np.array
output data corresponding to input x
Returns
-------
func: float
function evaluated for the input x
"""
N = int(len(p)/2)
n = np.linspace(0, N, N+1)
k = n*np.pi/L
func = 0
for n in range(0, N):
func += p[2*n]*np.sin(k[n]*x)+p[2*n+1]*np.cos(k[n]*x)
return func | python | def _sinusoid(x, p, L, y):
""" Return the sinusoid cont func evaluated at input x for the continuum.
Parameters
----------
x: float or np.array
data, input to function
p: ndarray
coefficients of fitting function
L: float
width of x data
y: float or np.array
output data corresponding to input x
Returns
-------
func: float
function evaluated for the input x
"""
N = int(len(p)/2)
n = np.linspace(0, N, N+1)
k = n*np.pi/L
func = 0
for n in range(0, N):
func += p[2*n]*np.sin(k[n]*x)+p[2*n+1]*np.cos(k[n]*x)
return func | [
"def",
"_sinusoid",
"(",
"x",
",",
"p",
",",
"L",
",",
"y",
")",
":",
"N",
"=",
"int",
"(",
"len",
"(",
"p",
")",
"/",
"2",
")",
"n",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"N",
",",
"N",
"+",
"1",
")",
"k",
"=",
"n",
"*",
"np",
".",
"pi",
"/",
"L",
"func",
"=",
"0",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"N",
")",
":",
"func",
"+=",
"p",
"[",
"2",
"*",
"n",
"]",
"*",
"np",
".",
"sin",
"(",
"k",
"[",
"n",
"]",
"*",
"x",
")",
"+",
"p",
"[",
"2",
"*",
"n",
"+",
"1",
"]",
"*",
"np",
".",
"cos",
"(",
"k",
"[",
"n",
"]",
"*",
"x",
")",
"return",
"func"
] | Return the sinusoid cont func evaluated at input x for the continuum.
Parameters
----------
x: float or np.array
data, input to function
p: ndarray
coefficients of fitting function
L: float
width of x data
y: float or np.array
output data corresponding to input x
Returns
-------
func: float
function evaluated for the input x | [
"Return",
"the",
"sinusoid",
"cont",
"func",
"evaluated",
"at",
"input",
"x",
"for",
"the",
"continuum",
"."
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L33-L58 |
annayqho/TheCannon | TheCannon/normalization.py | _weighted_median | def _weighted_median(values, weights, quantile):
""" Calculate a weighted median for values above a particular quantile cut
Used in pseudo continuum normalization
Parameters
----------
values: np ndarray of floats
the values to take the median of
weights: np ndarray of floats
the weights associated with the values
quantile: float
the cut applied to the input data
Returns
------
the weighted median
"""
sindx = np.argsort(values)
cvalues = 1. * np.cumsum(weights[sindx])
if cvalues[-1] == 0: # means all the values are 0
return values[0]
cvalues = cvalues / cvalues[-1] # div by largest value
foo = sindx[cvalues > quantile]
if len(foo) == 0:
return values[0]
indx = foo[0]
return values[indx] | python | def _weighted_median(values, weights, quantile):
""" Calculate a weighted median for values above a particular quantile cut
Used in pseudo continuum normalization
Parameters
----------
values: np ndarray of floats
the values to take the median of
weights: np ndarray of floats
the weights associated with the values
quantile: float
the cut applied to the input data
Returns
------
the weighted median
"""
sindx = np.argsort(values)
cvalues = 1. * np.cumsum(weights[sindx])
if cvalues[-1] == 0: # means all the values are 0
return values[0]
cvalues = cvalues / cvalues[-1] # div by largest value
foo = sindx[cvalues > quantile]
if len(foo) == 0:
return values[0]
indx = foo[0]
return values[indx] | [
"def",
"_weighted_median",
"(",
"values",
",",
"weights",
",",
"quantile",
")",
":",
"sindx",
"=",
"np",
".",
"argsort",
"(",
"values",
")",
"cvalues",
"=",
"1.",
"*",
"np",
".",
"cumsum",
"(",
"weights",
"[",
"sindx",
"]",
")",
"if",
"cvalues",
"[",
"-",
"1",
"]",
"==",
"0",
":",
"# means all the values are 0",
"return",
"values",
"[",
"0",
"]",
"cvalues",
"=",
"cvalues",
"/",
"cvalues",
"[",
"-",
"1",
"]",
"# div by largest value",
"foo",
"=",
"sindx",
"[",
"cvalues",
">",
"quantile",
"]",
"if",
"len",
"(",
"foo",
")",
"==",
"0",
":",
"return",
"values",
"[",
"0",
"]",
"indx",
"=",
"foo",
"[",
"0",
"]",
"return",
"values",
"[",
"indx",
"]"
] | Calculate a weighted median for values above a particular quantile cut
Used in pseudo continuum normalization
Parameters
----------
values: np ndarray of floats
the values to take the median of
weights: np ndarray of floats
the weights associated with the values
quantile: float
the cut applied to the input data
Returns
------
the weighted median | [
"Calculate",
"a",
"weighted",
"median",
"for",
"values",
"above",
"a",
"particular",
"quantile",
"cut"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L61-L88 |
annayqho/TheCannon | TheCannon/normalization.py | _find_cont_gaussian_smooth | def _find_cont_gaussian_smooth(wl, fluxes, ivars, w):
""" Returns the weighted mean block of spectra
Parameters
----------
wl: numpy ndarray
wavelength vector
flux: numpy ndarray
block of flux values
ivar: numpy ndarray
block of ivar values
L: float
width of Gaussian used to assign weights
Returns
-------
smoothed_fluxes: numpy ndarray
block of smoothed flux values, mean spectra
"""
print("Finding the continuum")
bot = np.dot(ivars, w.T)
top = np.dot(fluxes*ivars, w.T)
bad = bot == 0
cont = np.zeros(top.shape)
cont[~bad] = top[~bad] / bot[~bad]
return cont | python | def _find_cont_gaussian_smooth(wl, fluxes, ivars, w):
""" Returns the weighted mean block of spectra
Parameters
----------
wl: numpy ndarray
wavelength vector
flux: numpy ndarray
block of flux values
ivar: numpy ndarray
block of ivar values
L: float
width of Gaussian used to assign weights
Returns
-------
smoothed_fluxes: numpy ndarray
block of smoothed flux values, mean spectra
"""
print("Finding the continuum")
bot = np.dot(ivars, w.T)
top = np.dot(fluxes*ivars, w.T)
bad = bot == 0
cont = np.zeros(top.shape)
cont[~bad] = top[~bad] / bot[~bad]
return cont | [
"def",
"_find_cont_gaussian_smooth",
"(",
"wl",
",",
"fluxes",
",",
"ivars",
",",
"w",
")",
":",
"print",
"(",
"\"Finding the continuum\"",
")",
"bot",
"=",
"np",
".",
"dot",
"(",
"ivars",
",",
"w",
".",
"T",
")",
"top",
"=",
"np",
".",
"dot",
"(",
"fluxes",
"*",
"ivars",
",",
"w",
".",
"T",
")",
"bad",
"=",
"bot",
"==",
"0",
"cont",
"=",
"np",
".",
"zeros",
"(",
"top",
".",
"shape",
")",
"cont",
"[",
"~",
"bad",
"]",
"=",
"top",
"[",
"~",
"bad",
"]",
"/",
"bot",
"[",
"~",
"bad",
"]",
"return",
"cont"
] | Returns the weighted mean block of spectra
Parameters
----------
wl: numpy ndarray
wavelength vector
flux: numpy ndarray
block of flux values
ivar: numpy ndarray
block of ivar values
L: float
width of Gaussian used to assign weights
Returns
-------
smoothed_fluxes: numpy ndarray
block of smoothed flux values, mean spectra | [
"Returns",
"the",
"weighted",
"mean",
"block",
"of",
"spectra"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L91-L116 |
annayqho/TheCannon | TheCannon/normalization.py | _cont_norm_gaussian_smooth | def _cont_norm_gaussian_smooth(dataset, L):
""" Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
Returns
-------
dataset: Dataset
updated dataset
"""
print("Gaussian smoothing the entire dataset...")
w = gaussian_weight_matrix(dataset.wl, L)
print("Gaussian smoothing the training set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.tr_flux, dataset.tr_ivar, w)
norm_tr_flux, norm_tr_ivar = _cont_norm(
dataset.tr_flux, dataset.tr_ivar, cont)
print("Gaussian smoothing the test set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.test_flux, dataset.test_ivar, w)
norm_test_flux, norm_test_ivar = _cont_norm(
dataset.test_flux, dataset.test_ivar, cont)
return norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar | python | def _cont_norm_gaussian_smooth(dataset, L):
""" Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
Returns
-------
dataset: Dataset
updated dataset
"""
print("Gaussian smoothing the entire dataset...")
w = gaussian_weight_matrix(dataset.wl, L)
print("Gaussian smoothing the training set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.tr_flux, dataset.tr_ivar, w)
norm_tr_flux, norm_tr_ivar = _cont_norm(
dataset.tr_flux, dataset.tr_ivar, cont)
print("Gaussian smoothing the test set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.test_flux, dataset.test_ivar, w)
norm_test_flux, norm_test_ivar = _cont_norm(
dataset.test_flux, dataset.test_ivar, cont)
return norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar | [
"def",
"_cont_norm_gaussian_smooth",
"(",
"dataset",
",",
"L",
")",
":",
"print",
"(",
"\"Gaussian smoothing the entire dataset...\"",
")",
"w",
"=",
"gaussian_weight_matrix",
"(",
"dataset",
".",
"wl",
",",
"L",
")",
"print",
"(",
"\"Gaussian smoothing the training set\"",
")",
"cont",
"=",
"_find_cont_gaussian_smooth",
"(",
"dataset",
".",
"wl",
",",
"dataset",
".",
"tr_flux",
",",
"dataset",
".",
"tr_ivar",
",",
"w",
")",
"norm_tr_flux",
",",
"norm_tr_ivar",
"=",
"_cont_norm",
"(",
"dataset",
".",
"tr_flux",
",",
"dataset",
".",
"tr_ivar",
",",
"cont",
")",
"print",
"(",
"\"Gaussian smoothing the test set\"",
")",
"cont",
"=",
"_find_cont_gaussian_smooth",
"(",
"dataset",
".",
"wl",
",",
"dataset",
".",
"test_flux",
",",
"dataset",
".",
"test_ivar",
",",
"w",
")",
"norm_test_flux",
",",
"norm_test_ivar",
"=",
"_cont_norm",
"(",
"dataset",
".",
"test_flux",
",",
"dataset",
".",
"test_ivar",
",",
"cont",
")",
"return",
"norm_tr_flux",
",",
"norm_tr_ivar",
",",
"norm_test_flux",
",",
"norm_test_ivar"
] | Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
Returns
-------
dataset: Dataset
updated dataset | [
"Continuum",
"normalize",
"by",
"dividing",
"by",
"a",
"Gaussian",
"-",
"weighted",
"smoothed",
"spectrum"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L119-L147 |
annayqho/TheCannon | TheCannon/normalization.py | _find_cont_fitfunc | def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1):
""" Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
if n_proc == 1:
for jj in range(nstars):
flux = fluxes[jj,:]
ivar = ivars[jj,:]
pix = np.arange(0, npixels)
y = flux[contmask]
x = pix[contmask]
yivar = ivar[contmask]
yivar[yivar == 0] = SMALL**2
if ffunc=="sinusoid":
p0 = np.ones(deg*2) # one for cos, one for sin
L = max(x)-min(x)
pcont_func = _partial_func(_sinusoid, L=L, y=flux)
popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0,
sigma=1./np.sqrt(yivar))
elif ffunc=="chebyshev":
fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg)
for element in pix:
if ffunc=="sinusoid":
cont[jj,element] = _sinusoid(element, popt, L=L, y=flux)
elif ffunc=="chebyshev":
cont[jj,element] = fit(element)
else:
# start mp.Pool
pool = mp.Pool(processes=n_proc)
mp_results = []
for i in xrange(nstars):
mp_results.append(pool.apply_async(\
_find_cont_fitfunc,
(fluxes[i, :].reshape((1, -1)),
ivars[i, :].reshape((1, -1)),
contmask[:]),
{'deg':deg, 'ffunc':ffunc}))
# close mp.Pool
pool.close()
pool.join()
cont = np.array([mp_results[i].get().flatten() for i in xrange(nstars)])
return cont | python | def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1):
""" Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
if n_proc == 1:
for jj in range(nstars):
flux = fluxes[jj,:]
ivar = ivars[jj,:]
pix = np.arange(0, npixels)
y = flux[contmask]
x = pix[contmask]
yivar = ivar[contmask]
yivar[yivar == 0] = SMALL**2
if ffunc=="sinusoid":
p0 = np.ones(deg*2) # one for cos, one for sin
L = max(x)-min(x)
pcont_func = _partial_func(_sinusoid, L=L, y=flux)
popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0,
sigma=1./np.sqrt(yivar))
elif ffunc=="chebyshev":
fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg)
for element in pix:
if ffunc=="sinusoid":
cont[jj,element] = _sinusoid(element, popt, L=L, y=flux)
elif ffunc=="chebyshev":
cont[jj,element] = fit(element)
else:
# start mp.Pool
pool = mp.Pool(processes=n_proc)
mp_results = []
for i in xrange(nstars):
mp_results.append(pool.apply_async(\
_find_cont_fitfunc,
(fluxes[i, :].reshape((1, -1)),
ivars[i, :].reshape((1, -1)),
contmask[:]),
{'deg':deg, 'ffunc':ffunc}))
# close mp.Pool
pool.close()
pool.join()
cont = np.array([mp_results[i].get().flatten() for i in xrange(nstars)])
return cont | [
"def",
"_find_cont_fitfunc",
"(",
"fluxes",
",",
"ivars",
",",
"contmask",
",",
"deg",
",",
"ffunc",
",",
"n_proc",
"=",
"1",
")",
":",
"nstars",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"npixels",
"=",
"fluxes",
".",
"shape",
"[",
"1",
"]",
"cont",
"=",
"np",
".",
"zeros",
"(",
"fluxes",
".",
"shape",
")",
"if",
"n_proc",
"==",
"1",
":",
"for",
"jj",
"in",
"range",
"(",
"nstars",
")",
":",
"flux",
"=",
"fluxes",
"[",
"jj",
",",
":",
"]",
"ivar",
"=",
"ivars",
"[",
"jj",
",",
":",
"]",
"pix",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"npixels",
")",
"y",
"=",
"flux",
"[",
"contmask",
"]",
"x",
"=",
"pix",
"[",
"contmask",
"]",
"yivar",
"=",
"ivar",
"[",
"contmask",
"]",
"yivar",
"[",
"yivar",
"==",
"0",
"]",
"=",
"SMALL",
"**",
"2",
"if",
"ffunc",
"==",
"\"sinusoid\"",
":",
"p0",
"=",
"np",
".",
"ones",
"(",
"deg",
"*",
"2",
")",
"# one for cos, one for sin",
"L",
"=",
"max",
"(",
"x",
")",
"-",
"min",
"(",
"x",
")",
"pcont_func",
"=",
"_partial_func",
"(",
"_sinusoid",
",",
"L",
"=",
"L",
",",
"y",
"=",
"flux",
")",
"popt",
",",
"pcov",
"=",
"opt",
".",
"curve_fit",
"(",
"pcont_func",
",",
"x",
",",
"y",
",",
"p0",
"=",
"p0",
",",
"sigma",
"=",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"yivar",
")",
")",
"elif",
"ffunc",
"==",
"\"chebyshev\"",
":",
"fit",
"=",
"np",
".",
"polynomial",
".",
"chebyshev",
".",
"Chebyshev",
".",
"fit",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"w",
"=",
"yivar",
",",
"deg",
"=",
"deg",
")",
"for",
"element",
"in",
"pix",
":",
"if",
"ffunc",
"==",
"\"sinusoid\"",
":",
"cont",
"[",
"jj",
",",
"element",
"]",
"=",
"_sinusoid",
"(",
"element",
",",
"popt",
",",
"L",
"=",
"L",
",",
"y",
"=",
"flux",
")",
"elif",
"ffunc",
"==",
"\"chebyshev\"",
":",
"cont",
"[",
"jj",
",",
"element",
"]",
"=",
"fit",
"(",
"element",
")",
"else",
":",
"# start mp.Pool",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"processes",
"=",
"n_proc",
")",
"mp_results",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"nstars",
")",
":",
"mp_results",
".",
"append",
"(",
"pool",
".",
"apply_async",
"(",
"_find_cont_fitfunc",
",",
"(",
"fluxes",
"[",
"i",
",",
":",
"]",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"ivars",
"[",
"i",
",",
":",
"]",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"contmask",
"[",
":",
"]",
")",
",",
"{",
"'deg'",
":",
"deg",
",",
"'ffunc'",
":",
"ffunc",
"}",
")",
")",
"# close mp.Pool",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"cont",
"=",
"np",
".",
"array",
"(",
"[",
"mp_results",
"[",
"i",
"]",
".",
"get",
"(",
")",
".",
"flatten",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"nstars",
")",
"]",
")",
"return",
"cont"
] | Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes | [
"Fit",
"a",
"continuum",
"to",
"a",
"continuum",
"pixels",
"in",
"a",
"segment",
"of",
"spectra"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L150-L220 |
annayqho/TheCannon | TheCannon/normalization.py | _find_cont_fitfunc_regions | def _find_cont_fitfunc_regions(fluxes, ivars, contmask, deg, ranges, ffunc,
n_proc=1):
""" Run fit_cont, dealing with spectrum in regions or chunks
This is useful if a spectrum has gaps.
Parameters
----------
fluxes: ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
if ffunc=="chebyshev":
output = _find_cont_fitfunc(fluxes[:,start:stop],
ivars[:,start:stop],
contmask[start:stop],
deg=deg, ffunc="chebyshev",
n_proc=n_proc)
elif ffunc=="sinusoid":
output = _find_cont_fitfunc(fluxes[:,start:stop],
ivars[:,start:stop],
contmask[start:stop],
deg=deg, ffunc="sinusoid",
n_proc=n_proc)
cont[:, start:stop] = output
return cont | python | def _find_cont_fitfunc_regions(fluxes, ivars, contmask, deg, ranges, ffunc,
n_proc=1):
""" Run fit_cont, dealing with spectrum in regions or chunks
This is useful if a spectrum has gaps.
Parameters
----------
fluxes: ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
if ffunc=="chebyshev":
output = _find_cont_fitfunc(fluxes[:,start:stop],
ivars[:,start:stop],
contmask[start:stop],
deg=deg, ffunc="chebyshev",
n_proc=n_proc)
elif ffunc=="sinusoid":
output = _find_cont_fitfunc(fluxes[:,start:stop],
ivars[:,start:stop],
contmask[start:stop],
deg=deg, ffunc="sinusoid",
n_proc=n_proc)
cont[:, start:stop] = output
return cont | [
"def",
"_find_cont_fitfunc_regions",
"(",
"fluxes",
",",
"ivars",
",",
"contmask",
",",
"deg",
",",
"ranges",
",",
"ffunc",
",",
"n_proc",
"=",
"1",
")",
":",
"nstars",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"npixels",
"=",
"fluxes",
".",
"shape",
"[",
"1",
"]",
"cont",
"=",
"np",
".",
"zeros",
"(",
"fluxes",
".",
"shape",
")",
"for",
"chunk",
"in",
"ranges",
":",
"start",
"=",
"chunk",
"[",
"0",
"]",
"stop",
"=",
"chunk",
"[",
"1",
"]",
"if",
"ffunc",
"==",
"\"chebyshev\"",
":",
"output",
"=",
"_find_cont_fitfunc",
"(",
"fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"contmask",
"[",
"start",
":",
"stop",
"]",
",",
"deg",
"=",
"deg",
",",
"ffunc",
"=",
"\"chebyshev\"",
",",
"n_proc",
"=",
"n_proc",
")",
"elif",
"ffunc",
"==",
"\"sinusoid\"",
":",
"output",
"=",
"_find_cont_fitfunc",
"(",
"fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"contmask",
"[",
"start",
":",
"stop",
"]",
",",
"deg",
"=",
"deg",
",",
"ffunc",
"=",
"\"sinusoid\"",
",",
"n_proc",
"=",
"n_proc",
")",
"cont",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"return",
"cont"
] | Run fit_cont, dealing with spectrum in regions or chunks
This is useful if a spectrum has gaps.
Parameters
----------
fluxes: ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes | [
"Run",
"fit_cont",
"dealing",
"with",
"spectrum",
"in",
"regions",
"or",
"chunks"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L223-L271 |
annayqho/TheCannon | TheCannon/normalization.py | _find_cont_running_quantile | def _find_cont_running_quantile(wl, fluxes, ivars, q, delta_lambda,
verbose=False):
""" Perform continuum normalization using a running quantile
Parameters
----------
wl: numpy ndarray
wavelength vector
fluxes: numpy ndarray of shape (nstars, npixels)
pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
q: float
the desired quantile cut
delta_lambda: int
the number of pixels over which the median is calculated
Output
------
norm_fluxes: numpy ndarray of shape (nstars, npixels)
normalized pixel intensities
norm_ivars: numpy ndarray of shape (nstars, npixels)
rescaled pixel invariances
"""
cont = np.zeros(fluxes.shape)
nstars = fluxes.shape[0]
for jj in range(nstars):
if verbose:
print("cont_norm_q(): working on star [%s/%s]..." % (jj+1, nstars))
flux = fluxes[jj,:]
ivar = ivars[jj,:]
for ll, lam in enumerate(wl):
indx = (np.where(abs(wl-lam) < delta_lambda))[0]
flux_cut = flux[indx]
ivar_cut = ivar[indx]
cont[jj, ll] = _weighted_median(flux_cut, ivar_cut, q)
return cont | python | def _find_cont_running_quantile(wl, fluxes, ivars, q, delta_lambda,
verbose=False):
""" Perform continuum normalization using a running quantile
Parameters
----------
wl: numpy ndarray
wavelength vector
fluxes: numpy ndarray of shape (nstars, npixels)
pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
q: float
the desired quantile cut
delta_lambda: int
the number of pixels over which the median is calculated
Output
------
norm_fluxes: numpy ndarray of shape (nstars, npixels)
normalized pixel intensities
norm_ivars: numpy ndarray of shape (nstars, npixels)
rescaled pixel invariances
"""
cont = np.zeros(fluxes.shape)
nstars = fluxes.shape[0]
for jj in range(nstars):
if verbose:
print("cont_norm_q(): working on star [%s/%s]..." % (jj+1, nstars))
flux = fluxes[jj,:]
ivar = ivars[jj,:]
for ll, lam in enumerate(wl):
indx = (np.where(abs(wl-lam) < delta_lambda))[0]
flux_cut = flux[indx]
ivar_cut = ivar[indx]
cont[jj, ll] = _weighted_median(flux_cut, ivar_cut, q)
return cont | [
"def",
"_find_cont_running_quantile",
"(",
"wl",
",",
"fluxes",
",",
"ivars",
",",
"q",
",",
"delta_lambda",
",",
"verbose",
"=",
"False",
")",
":",
"cont",
"=",
"np",
".",
"zeros",
"(",
"fluxes",
".",
"shape",
")",
"nstars",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"for",
"jj",
"in",
"range",
"(",
"nstars",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"cont_norm_q(): working on star [%s/%s]...\"",
"%",
"(",
"jj",
"+",
"1",
",",
"nstars",
")",
")",
"flux",
"=",
"fluxes",
"[",
"jj",
",",
":",
"]",
"ivar",
"=",
"ivars",
"[",
"jj",
",",
":",
"]",
"for",
"ll",
",",
"lam",
"in",
"enumerate",
"(",
"wl",
")",
":",
"indx",
"=",
"(",
"np",
".",
"where",
"(",
"abs",
"(",
"wl",
"-",
"lam",
")",
"<",
"delta_lambda",
")",
")",
"[",
"0",
"]",
"flux_cut",
"=",
"flux",
"[",
"indx",
"]",
"ivar_cut",
"=",
"ivar",
"[",
"indx",
"]",
"cont",
"[",
"jj",
",",
"ll",
"]",
"=",
"_weighted_median",
"(",
"flux_cut",
",",
"ivar_cut",
",",
"q",
")",
"return",
"cont"
] | Perform continuum normalization using a running quantile
Parameters
----------
wl: numpy ndarray
wavelength vector
fluxes: numpy ndarray of shape (nstars, npixels)
pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
q: float
the desired quantile cut
delta_lambda: int
the number of pixels over which the median is calculated
Output
------
norm_fluxes: numpy ndarray of shape (nstars, npixels)
normalized pixel intensities
norm_ivars: numpy ndarray of shape (nstars, npixels)
rescaled pixel invariances | [
"Perform",
"continuum",
"normalization",
"using",
"a",
"running",
"quantile"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L274-L310 |
annayqho/TheCannon | TheCannon/normalization.py | _cont_norm_running_quantile_mp | def _cont_norm_running_quantile_mp(wl, fluxes, ivars, q, delta_lambda,
n_proc=2, verbose=False):
"""
The same as _cont_norm_running_quantile() above,
but using multi-processing.
Bo Zhang (NAOC)
"""
nStar = fluxes.shape[0]
# start mp.Pool
mp_results = []
pool = mp.Pool(processes=n_proc)
for i in xrange(nStar):
mp_results.append(pool.apply_async(\
_find_cont_running_quantile,
(wl, fluxes[i, :].reshape((1, -1)), ivars[i, :].reshape((1, -1)),
q, delta_lambda), {'verbose': False}))
if verbose:
print('@Bo Zhang: continuum normalizing star [%d/%d] ...'\
% (i + 1, nStar))
# close mp.Pool
pool.close()
pool.join()
# reshape results --> cont
cont = np.zeros_like(fluxes)
for i in xrange(nStar):
cont[i, :] = mp_results[i].get() #.flatten()
norm_fluxes = np.ones(fluxes.shape)
norm_fluxes[cont!=0] = fluxes[cont!=0] / cont[cont!=0]
norm_ivars = cont**2 * ivars
print('@Bo Zhang: continuum normalization finished!')
return norm_fluxes, norm_ivars | python | def _cont_norm_running_quantile_mp(wl, fluxes, ivars, q, delta_lambda,
n_proc=2, verbose=False):
"""
The same as _cont_norm_running_quantile() above,
but using multi-processing.
Bo Zhang (NAOC)
"""
nStar = fluxes.shape[0]
# start mp.Pool
mp_results = []
pool = mp.Pool(processes=n_proc)
for i in xrange(nStar):
mp_results.append(pool.apply_async(\
_find_cont_running_quantile,
(wl, fluxes[i, :].reshape((1, -1)), ivars[i, :].reshape((1, -1)),
q, delta_lambda), {'verbose': False}))
if verbose:
print('@Bo Zhang: continuum normalizing star [%d/%d] ...'\
% (i + 1, nStar))
# close mp.Pool
pool.close()
pool.join()
# reshape results --> cont
cont = np.zeros_like(fluxes)
for i in xrange(nStar):
cont[i, :] = mp_results[i].get() #.flatten()
norm_fluxes = np.ones(fluxes.shape)
norm_fluxes[cont!=0] = fluxes[cont!=0] / cont[cont!=0]
norm_ivars = cont**2 * ivars
print('@Bo Zhang: continuum normalization finished!')
return norm_fluxes, norm_ivars | [
"def",
"_cont_norm_running_quantile_mp",
"(",
"wl",
",",
"fluxes",
",",
"ivars",
",",
"q",
",",
"delta_lambda",
",",
"n_proc",
"=",
"2",
",",
"verbose",
"=",
"False",
")",
":",
"nStar",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"# start mp.Pool",
"mp_results",
"=",
"[",
"]",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"processes",
"=",
"n_proc",
")",
"for",
"i",
"in",
"xrange",
"(",
"nStar",
")",
":",
"mp_results",
".",
"append",
"(",
"pool",
".",
"apply_async",
"(",
"_find_cont_running_quantile",
",",
"(",
"wl",
",",
"fluxes",
"[",
"i",
",",
":",
"]",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"ivars",
"[",
"i",
",",
":",
"]",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"q",
",",
"delta_lambda",
")",
",",
"{",
"'verbose'",
":",
"False",
"}",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"'@Bo Zhang: continuum normalizing star [%d/%d] ...'",
"%",
"(",
"i",
"+",
"1",
",",
"nStar",
")",
")",
"# close mp.Pool",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"# reshape results --> cont",
"cont",
"=",
"np",
".",
"zeros_like",
"(",
"fluxes",
")",
"for",
"i",
"in",
"xrange",
"(",
"nStar",
")",
":",
"cont",
"[",
"i",
",",
":",
"]",
"=",
"mp_results",
"[",
"i",
"]",
".",
"get",
"(",
")",
"#.flatten()",
"norm_fluxes",
"=",
"np",
".",
"ones",
"(",
"fluxes",
".",
"shape",
")",
"norm_fluxes",
"[",
"cont",
"!=",
"0",
"]",
"=",
"fluxes",
"[",
"cont",
"!=",
"0",
"]",
"/",
"cont",
"[",
"cont",
"!=",
"0",
"]",
"norm_ivars",
"=",
"cont",
"**",
"2",
"*",
"ivars",
"print",
"(",
"'@Bo Zhang: continuum normalization finished!'",
")",
"return",
"norm_fluxes",
",",
"norm_ivars"
] | The same as _cont_norm_running_quantile() above,
but using multi-processing.
Bo Zhang (NAOC) | [
"The",
"same",
"as",
"_cont_norm_running_quantile",
"()",
"above",
"but",
"using",
"multi",
"-",
"processing",
"."
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L337-L371 |
annayqho/TheCannon | TheCannon/normalization.py | _cont_norm_running_quantile_regions | def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda,
ranges, verbose=True):
""" Perform continuum normalization using running quantile, for spectrum
that comes in chunks
"""
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm_running_quantile(
wl[start:stop], fluxes[:,start:stop],
ivars[:,start:stop], q, delta_lambda)
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
return norm_fluxes, norm_ivars | python | def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda,
ranges, verbose=True):
""" Perform continuum normalization using running quantile, for spectrum
that comes in chunks
"""
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm_running_quantile(
wl[start:stop], fluxes[:,start:stop],
ivars[:,start:stop], q, delta_lambda)
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
return norm_fluxes, norm_ivars | [
"def",
"_cont_norm_running_quantile_regions",
"(",
"wl",
",",
"fluxes",
",",
"ivars",
",",
"q",
",",
"delta_lambda",
",",
"ranges",
",",
"verbose",
"=",
"True",
")",
":",
"print",
"(",
"\"contnorm.py: continuum norm using running quantile\"",
")",
"print",
"(",
"\"Taking spectra in %s chunks\"",
"%",
"len",
"(",
"ranges",
")",
")",
"nstars",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"norm_fluxes",
"=",
"np",
".",
"zeros",
"(",
"fluxes",
".",
"shape",
")",
"norm_ivars",
"=",
"np",
".",
"zeros",
"(",
"ivars",
".",
"shape",
")",
"for",
"chunk",
"in",
"ranges",
":",
"start",
"=",
"chunk",
"[",
"0",
"]",
"stop",
"=",
"chunk",
"[",
"1",
"]",
"output",
"=",
"_cont_norm_running_quantile",
"(",
"wl",
"[",
"start",
":",
"stop",
"]",
",",
"fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"q",
",",
"delta_lambda",
")",
"norm_fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"[",
"0",
"]",
"norm_ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"[",
"1",
"]",
"return",
"norm_fluxes",
",",
"norm_ivars"
] | Perform continuum normalization using running quantile, for spectrum
that comes in chunks | [
"Perform",
"continuum",
"normalization",
"using",
"running",
"quantile",
"for",
"spectrum",
"that",
"comes",
"in",
"chunks"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L380-L398 |
annayqho/TheCannon | TheCannon/normalization.py | _cont_norm_running_quantile_regions_mp | def _cont_norm_running_quantile_regions_mp(wl, fluxes, ivars, q, delta_lambda,
ranges, n_proc=2, verbose=False):
"""
Perform continuum normalization using running quantile, for spectrum
that comes in chunks.
The same as _cont_norm_running_quantile_regions(),
but using multi-processing.
Bo Zhang (NAOC)
"""
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
# nstars = fluxes.shape[0]
nchunks = len(ranges)
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for i in xrange(nchunks):
chunk = ranges[i, :]
start = chunk[0]
stop = chunk[1]
if verbose:
print('@Bo Zhang: Going to normalize Chunk [%d/%d], pixel:[%d, %d] ...'
% (i+1, nchunks, start, stop))
output = _cont_norm_running_quantile_mp(
wl[start:stop], fluxes[:, start:stop],
ivars[:, start:stop], q, delta_lambda,
n_proc=n_proc, verbose=verbose)
norm_fluxes[:, start:stop] = output[0]
norm_ivars[:, start:stop] = output[1]
return norm_fluxes, norm_ivars | python | def _cont_norm_running_quantile_regions_mp(wl, fluxes, ivars, q, delta_lambda,
ranges, n_proc=2, verbose=False):
"""
Perform continuum normalization using running quantile, for spectrum
that comes in chunks.
The same as _cont_norm_running_quantile_regions(),
but using multi-processing.
Bo Zhang (NAOC)
"""
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
# nstars = fluxes.shape[0]
nchunks = len(ranges)
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for i in xrange(nchunks):
chunk = ranges[i, :]
start = chunk[0]
stop = chunk[1]
if verbose:
print('@Bo Zhang: Going to normalize Chunk [%d/%d], pixel:[%d, %d] ...'
% (i+1, nchunks, start, stop))
output = _cont_norm_running_quantile_mp(
wl[start:stop], fluxes[:, start:stop],
ivars[:, start:stop], q, delta_lambda,
n_proc=n_proc, verbose=verbose)
norm_fluxes[:, start:stop] = output[0]
norm_ivars[:, start:stop] = output[1]
return norm_fluxes, norm_ivars | [
"def",
"_cont_norm_running_quantile_regions_mp",
"(",
"wl",
",",
"fluxes",
",",
"ivars",
",",
"q",
",",
"delta_lambda",
",",
"ranges",
",",
"n_proc",
"=",
"2",
",",
"verbose",
"=",
"False",
")",
":",
"print",
"(",
"\"contnorm.py: continuum norm using running quantile\"",
")",
"print",
"(",
"\"Taking spectra in %s chunks\"",
"%",
"len",
"(",
"ranges",
")",
")",
"# nstars = fluxes.shape[0]",
"nchunks",
"=",
"len",
"(",
"ranges",
")",
"norm_fluxes",
"=",
"np",
".",
"zeros",
"(",
"fluxes",
".",
"shape",
")",
"norm_ivars",
"=",
"np",
".",
"zeros",
"(",
"ivars",
".",
"shape",
")",
"for",
"i",
"in",
"xrange",
"(",
"nchunks",
")",
":",
"chunk",
"=",
"ranges",
"[",
"i",
",",
":",
"]",
"start",
"=",
"chunk",
"[",
"0",
"]",
"stop",
"=",
"chunk",
"[",
"1",
"]",
"if",
"verbose",
":",
"print",
"(",
"'@Bo Zhang: Going to normalize Chunk [%d/%d], pixel:[%d, %d] ...'",
"%",
"(",
"i",
"+",
"1",
",",
"nchunks",
",",
"start",
",",
"stop",
")",
")",
"output",
"=",
"_cont_norm_running_quantile_mp",
"(",
"wl",
"[",
"start",
":",
"stop",
"]",
",",
"fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"q",
",",
"delta_lambda",
",",
"n_proc",
"=",
"n_proc",
",",
"verbose",
"=",
"verbose",
")",
"norm_fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"[",
"0",
"]",
"norm_ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"[",
"1",
"]",
"return",
"norm_fluxes",
",",
"norm_ivars"
] | Perform continuum normalization using running quantile, for spectrum
that comes in chunks.
The same as _cont_norm_running_quantile_regions(),
but using multi-processing.
Bo Zhang (NAOC) | [
"Perform",
"continuum",
"normalization",
"using",
"running",
"quantile",
"for",
"spectrum",
"that",
"comes",
"in",
"chunks",
"."
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L401-L431 |
annayqho/TheCannon | TheCannon/normalization.py | _cont_norm | def _cont_norm(fluxes, ivars, cont):
""" Continuum-normalize a continuous segment of spectra.
Parameters
----------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
contmask: boolean mask
True indicates that pixel is continuum
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
norm_fluxes = np.ones(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
bad = cont == 0.
norm_fluxes = np.ones(fluxes.shape)
norm_fluxes[~bad] = fluxes[~bad] / cont[~bad]
norm_ivars = cont**2 * ivars
return norm_fluxes, norm_ivars | python | def _cont_norm(fluxes, ivars, cont):
""" Continuum-normalize a continuous segment of spectra.
Parameters
----------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
contmask: boolean mask
True indicates that pixel is continuum
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
norm_fluxes = np.ones(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
bad = cont == 0.
norm_fluxes = np.ones(fluxes.shape)
norm_fluxes[~bad] = fluxes[~bad] / cont[~bad]
norm_ivars = cont**2 * ivars
return norm_fluxes, norm_ivars | [
"def",
"_cont_norm",
"(",
"fluxes",
",",
"ivars",
",",
"cont",
")",
":",
"nstars",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"npixels",
"=",
"fluxes",
".",
"shape",
"[",
"1",
"]",
"norm_fluxes",
"=",
"np",
".",
"ones",
"(",
"fluxes",
".",
"shape",
")",
"norm_ivars",
"=",
"np",
".",
"zeros",
"(",
"ivars",
".",
"shape",
")",
"bad",
"=",
"cont",
"==",
"0.",
"norm_fluxes",
"=",
"np",
".",
"ones",
"(",
"fluxes",
".",
"shape",
")",
"norm_fluxes",
"[",
"~",
"bad",
"]",
"=",
"fluxes",
"[",
"~",
"bad",
"]",
"/",
"cont",
"[",
"~",
"bad",
"]",
"norm_ivars",
"=",
"cont",
"**",
"2",
"*",
"ivars",
"return",
"norm_fluxes",
",",
"norm_ivars"
] | Continuum-normalize a continuous segment of spectra.
Parameters
----------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
contmask: boolean mask
True indicates that pixel is continuum
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances | [
"Continuum",
"-",
"normalize",
"a",
"continuous",
"segment",
"of",
"spectra",
"."
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L434-L461 |
annayqho/TheCannon | TheCannon/normalization.py | _cont_norm_regions | def _cont_norm_regions(fluxes, ivars, cont, ranges):
""" Perform continuum normalization for spectra in chunks
Useful for spectra that have gaps
Parameters
---------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
cont: numpy ndarray
the continuum
ranges: list or np ndarray
the chunks that the spectrum should be split into
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances
"""
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm(fluxes[:,start:stop],
ivars[:,start:stop],
cont[:,start:stop])
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
for jj in range(nstars):
bad = (norm_ivars[jj,:] == 0.)
norm_fluxes[jj,:][bad] = 1.
return norm_fluxes, norm_ivars | python | def _cont_norm_regions(fluxes, ivars, cont, ranges):
""" Perform continuum normalization for spectra in chunks
Useful for spectra that have gaps
Parameters
---------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
cont: numpy ndarray
the continuum
ranges: list or np ndarray
the chunks that the spectrum should be split into
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances
"""
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm(fluxes[:,start:stop],
ivars[:,start:stop],
cont[:,start:stop])
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
for jj in range(nstars):
bad = (norm_ivars[jj,:] == 0.)
norm_fluxes[jj,:][bad] = 1.
return norm_fluxes, norm_ivars | [
"def",
"_cont_norm_regions",
"(",
"fluxes",
",",
"ivars",
",",
"cont",
",",
"ranges",
")",
":",
"nstars",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"norm_fluxes",
"=",
"np",
".",
"zeros",
"(",
"fluxes",
".",
"shape",
")",
"norm_ivars",
"=",
"np",
".",
"zeros",
"(",
"ivars",
".",
"shape",
")",
"for",
"chunk",
"in",
"ranges",
":",
"start",
"=",
"chunk",
"[",
"0",
"]",
"stop",
"=",
"chunk",
"[",
"1",
"]",
"output",
"=",
"_cont_norm",
"(",
"fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
",",
"cont",
"[",
":",
",",
"start",
":",
"stop",
"]",
")",
"norm_fluxes",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"[",
"0",
"]",
"norm_ivars",
"[",
":",
",",
"start",
":",
"stop",
"]",
"=",
"output",
"[",
"1",
"]",
"for",
"jj",
"in",
"range",
"(",
"nstars",
")",
":",
"bad",
"=",
"(",
"norm_ivars",
"[",
"jj",
",",
":",
"]",
"==",
"0.",
")",
"norm_fluxes",
"[",
"jj",
",",
":",
"]",
"[",
"bad",
"]",
"=",
"1.",
"return",
"norm_fluxes",
",",
"norm_ivars"
] | Perform continuum normalization for spectra in chunks
Useful for spectra that have gaps
Parameters
---------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
cont: numpy ndarray
the continuum
ranges: list or np ndarray
the chunks that the spectrum should be split into
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances | [
"Perform",
"continuum",
"normalization",
"for",
"spectra",
"in",
"chunks"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L464-L501 |
annayqho/TheCannon | TheCannon/model.py | CannonModel.train | def train(self, ds):
""" Run training step: solve for best-fit spectral model """
if self.useErrors:
self.coeffs, self.scatters, self.new_tr_labels, self.chisqs, self.pivots, self.scales = _train_model_new(ds)
else:
self.coeffs, self.scatters, self.chisqs, self.pivots, self.scales = _train_model(ds) | python | def train(self, ds):
""" Run training step: solve for best-fit spectral model """
if self.useErrors:
self.coeffs, self.scatters, self.new_tr_labels, self.chisqs, self.pivots, self.scales = _train_model_new(ds)
else:
self.coeffs, self.scatters, self.chisqs, self.pivots, self.scales = _train_model(ds) | [
"def",
"train",
"(",
"self",
",",
"ds",
")",
":",
"if",
"self",
".",
"useErrors",
":",
"self",
".",
"coeffs",
",",
"self",
".",
"scatters",
",",
"self",
".",
"new_tr_labels",
",",
"self",
".",
"chisqs",
",",
"self",
".",
"pivots",
",",
"self",
".",
"scales",
"=",
"_train_model_new",
"(",
"ds",
")",
"else",
":",
"self",
".",
"coeffs",
",",
"self",
".",
"scatters",
",",
"self",
".",
"chisqs",
",",
"self",
".",
"pivots",
",",
"self",
".",
"scales",
"=",
"_train_model",
"(",
"ds",
")"
] | Run training step: solve for best-fit spectral model | [
"Run",
"training",
"step",
":",
"solve",
"for",
"best",
"-",
"fit",
"spectral",
"model"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/model.py#L36-L41 |
annayqho/TheCannon | TheCannon/model.py | CannonModel.infer_spectra | def infer_spectra(self, ds):
"""
After inferring labels for the test spectra,
infer the model spectra and update the dataset
model_spectra attribute.
Parameters
----------
ds: Dataset object
"""
lvec_all = _get_lvec(ds.test_label_vals, self.pivots, self.scales, derivs=False)
self.model_spectra = np.dot(lvec_all, self.coeffs.T) | python | def infer_spectra(self, ds):
"""
After inferring labels for the test spectra,
infer the model spectra and update the dataset
model_spectra attribute.
Parameters
----------
ds: Dataset object
"""
lvec_all = _get_lvec(ds.test_label_vals, self.pivots, self.scales, derivs=False)
self.model_spectra = np.dot(lvec_all, self.coeffs.T) | [
"def",
"infer_spectra",
"(",
"self",
",",
"ds",
")",
":",
"lvec_all",
"=",
"_get_lvec",
"(",
"ds",
".",
"test_label_vals",
",",
"self",
".",
"pivots",
",",
"self",
".",
"scales",
",",
"derivs",
"=",
"False",
")",
"self",
".",
"model_spectra",
"=",
"np",
".",
"dot",
"(",
"lvec_all",
",",
"self",
".",
"coeffs",
".",
"T",
")"
] | After inferring labels for the test spectra,
infer the model spectra and update the dataset
model_spectra attribute.
Parameters
----------
ds: Dataset object | [
"After",
"inferring",
"labels",
"for",
"the",
"test",
"spectra",
"infer",
"the",
"model",
"spectra",
"and",
"update",
"the",
"dataset",
"model_spectra",
"attribute",
".",
"Parameters",
"----------",
"ds",
":",
"Dataset",
"object"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/model.py#L66-L77 |
annayqho/TheCannon | TheCannon/model.py | CannonModel.plot_contpix | def plot_contpix(self, x, y, contpix_x, contpix_y, figname):
""" Plot baseline spec with continuum pix overlaid
Parameters
----------
"""
fig, axarr = plt.subplots(2, sharex=True)
plt.xlabel(r"Wavelength $\lambda (\AA)$")
plt.xlim(min(x), max(x))
ax = axarr[0]
ax.step(x, y, where='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.legend(loc='lower right',
prop={'family':'serif', 'size':'small'})
ax.set_title("Baseline Spectrum with Continuum Pixels")
ax.set_ylabel(r'$\theta_0$')
ax = axarr[1]
ax.step(x, y, where='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.set_title("Baseline Spectrum with Continuum Pixels, Zoomed")
ax.legend(loc='upper right', prop={'family':'serif',
'size':'small'})
ax.set_ylabel(r'$\theta_0$')
ax.set_ylim(0.95, 1.05)
print("Diagnostic plot: fitted 0th order spec w/ cont pix")
print("Saved as %s.png" % (figname))
plt.savefig(figname)
plt.close() | python | def plot_contpix(self, x, y, contpix_x, contpix_y, figname):
""" Plot baseline spec with continuum pix overlaid
Parameters
----------
"""
fig, axarr = plt.subplots(2, sharex=True)
plt.xlabel(r"Wavelength $\lambda (\AA)$")
plt.xlim(min(x), max(x))
ax = axarr[0]
ax.step(x, y, where='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.legend(loc='lower right',
prop={'family':'serif', 'size':'small'})
ax.set_title("Baseline Spectrum with Continuum Pixels")
ax.set_ylabel(r'$\theta_0$')
ax = axarr[1]
ax.step(x, y, where='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.set_title("Baseline Spectrum with Continuum Pixels, Zoomed")
ax.legend(loc='upper right', prop={'family':'serif',
'size':'small'})
ax.set_ylabel(r'$\theta_0$')
ax.set_ylim(0.95, 1.05)
print("Diagnostic plot: fitted 0th order spec w/ cont pix")
print("Saved as %s.png" % (figname))
plt.savefig(figname)
plt.close() | [
"def",
"plot_contpix",
"(",
"self",
",",
"x",
",",
"y",
",",
"contpix_x",
",",
"contpix_y",
",",
"figname",
")",
":",
"fig",
",",
"axarr",
"=",
"plt",
".",
"subplots",
"(",
"2",
",",
"sharex",
"=",
"True",
")",
"plt",
".",
"xlabel",
"(",
"r\"Wavelength $\\lambda (\\AA)$\"",
")",
"plt",
".",
"xlim",
"(",
"min",
"(",
"x",
")",
",",
"max",
"(",
"x",
")",
")",
"ax",
"=",
"axarr",
"[",
"0",
"]",
"ax",
".",
"step",
"(",
"x",
",",
"y",
",",
"where",
"=",
"'mid'",
",",
"c",
"=",
"'k'",
",",
"linewidth",
"=",
"0.3",
",",
"label",
"=",
"r'$\\theta_0$'",
"+",
"\"= the leading fit coefficient\"",
")",
"ax",
".",
"scatter",
"(",
"contpix_x",
",",
"contpix_y",
",",
"s",
"=",
"1",
",",
"color",
"=",
"'r'",
",",
"label",
"=",
"\"continuum pixels\"",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"'lower right'",
",",
"prop",
"=",
"{",
"'family'",
":",
"'serif'",
",",
"'size'",
":",
"'small'",
"}",
")",
"ax",
".",
"set_title",
"(",
"\"Baseline Spectrum with Continuum Pixels\"",
")",
"ax",
".",
"set_ylabel",
"(",
"r'$\\theta_0$'",
")",
"ax",
"=",
"axarr",
"[",
"1",
"]",
"ax",
".",
"step",
"(",
"x",
",",
"y",
",",
"where",
"=",
"'mid'",
",",
"c",
"=",
"'k'",
",",
"linewidth",
"=",
"0.3",
",",
"label",
"=",
"r'$\\theta_0$'",
"+",
"\"= the leading fit coefficient\"",
")",
"ax",
".",
"scatter",
"(",
"contpix_x",
",",
"contpix_y",
",",
"s",
"=",
"1",
",",
"color",
"=",
"'r'",
",",
"label",
"=",
"\"continuum pixels\"",
")",
"ax",
".",
"set_title",
"(",
"\"Baseline Spectrum with Continuum Pixels, Zoomed\"",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
",",
"prop",
"=",
"{",
"'family'",
":",
"'serif'",
",",
"'size'",
":",
"'small'",
"}",
")",
"ax",
".",
"set_ylabel",
"(",
"r'$\\theta_0$'",
")",
"ax",
".",
"set_ylim",
"(",
"0.95",
",",
"1.05",
")",
"print",
"(",
"\"Diagnostic plot: fitted 0th order spec w/ cont pix\"",
")",
"print",
"(",
"\"Saved as %s.png\"",
"%",
"(",
"figname",
")",
")",
"plt",
".",
"savefig",
"(",
"figname",
")",
"plt",
".",
"close",
"(",
")"
] | Plot baseline spec with continuum pix overlaid
Parameters
---------- | [
"Plot",
"baseline",
"spec",
"with",
"continuum",
"pix",
"overlaid"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/model.py#L80-L111 |
annayqho/TheCannon | TheCannon/model.py | CannonModel.diagnostics_contpix | def diagnostics_contpix(self, data, nchunks=10, fig = "baseline_spec_with_cont_pix"):
""" Call plot_contpix once for each nth of the spectrum """
if data.contmask is None:
print("No contmask set")
else:
coeffs_all = self.coeffs
wl = data.wl
baseline_spec = coeffs_all[:,0]
contmask = data.contmask
contpix_x = wl[contmask]
contpix_y = baseline_spec[contmask]
rem = len(wl)%nchunks
wl_split = np.array(np.split(wl[0:len(wl)-rem],nchunks))
baseline_spec_split = np.array(
np.split(baseline_spec[0:len(wl)-rem],nchunks))
nchunks = wl_split.shape[0]
for i in range(nchunks):
fig_chunk = fig + "_%s" %str(i)
wl_chunk = wl_split[i,:]
baseline_spec_chunk = baseline_spec_split[i,:]
take = np.logical_and(
contpix_x>wl_chunk[0], contpix_x<wl_chunk[-1])
self.plot_contpix(
wl_chunk, baseline_spec_chunk,
contpix_x[take], contpix_y[take], fig_chunk) | python | def diagnostics_contpix(self, data, nchunks=10, fig = "baseline_spec_with_cont_pix"):
""" Call plot_contpix once for each nth of the spectrum """
if data.contmask is None:
print("No contmask set")
else:
coeffs_all = self.coeffs
wl = data.wl
baseline_spec = coeffs_all[:,0]
contmask = data.contmask
contpix_x = wl[contmask]
contpix_y = baseline_spec[contmask]
rem = len(wl)%nchunks
wl_split = np.array(np.split(wl[0:len(wl)-rem],nchunks))
baseline_spec_split = np.array(
np.split(baseline_spec[0:len(wl)-rem],nchunks))
nchunks = wl_split.shape[0]
for i in range(nchunks):
fig_chunk = fig + "_%s" %str(i)
wl_chunk = wl_split[i,:]
baseline_spec_chunk = baseline_spec_split[i,:]
take = np.logical_and(
contpix_x>wl_chunk[0], contpix_x<wl_chunk[-1])
self.plot_contpix(
wl_chunk, baseline_spec_chunk,
contpix_x[take], contpix_y[take], fig_chunk) | [
"def",
"diagnostics_contpix",
"(",
"self",
",",
"data",
",",
"nchunks",
"=",
"10",
",",
"fig",
"=",
"\"baseline_spec_with_cont_pix\"",
")",
":",
"if",
"data",
".",
"contmask",
"is",
"None",
":",
"print",
"(",
"\"No contmask set\"",
")",
"else",
":",
"coeffs_all",
"=",
"self",
".",
"coeffs",
"wl",
"=",
"data",
".",
"wl",
"baseline_spec",
"=",
"coeffs_all",
"[",
":",
",",
"0",
"]",
"contmask",
"=",
"data",
".",
"contmask",
"contpix_x",
"=",
"wl",
"[",
"contmask",
"]",
"contpix_y",
"=",
"baseline_spec",
"[",
"contmask",
"]",
"rem",
"=",
"len",
"(",
"wl",
")",
"%",
"nchunks",
"wl_split",
"=",
"np",
".",
"array",
"(",
"np",
".",
"split",
"(",
"wl",
"[",
"0",
":",
"len",
"(",
"wl",
")",
"-",
"rem",
"]",
",",
"nchunks",
")",
")",
"baseline_spec_split",
"=",
"np",
".",
"array",
"(",
"np",
".",
"split",
"(",
"baseline_spec",
"[",
"0",
":",
"len",
"(",
"wl",
")",
"-",
"rem",
"]",
",",
"nchunks",
")",
")",
"nchunks",
"=",
"wl_split",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"nchunks",
")",
":",
"fig_chunk",
"=",
"fig",
"+",
"\"_%s\"",
"%",
"str",
"(",
"i",
")",
"wl_chunk",
"=",
"wl_split",
"[",
"i",
",",
":",
"]",
"baseline_spec_chunk",
"=",
"baseline_spec_split",
"[",
"i",
",",
":",
"]",
"take",
"=",
"np",
".",
"logical_and",
"(",
"contpix_x",
">",
"wl_chunk",
"[",
"0",
"]",
",",
"contpix_x",
"<",
"wl_chunk",
"[",
"-",
"1",
"]",
")",
"self",
".",
"plot_contpix",
"(",
"wl_chunk",
",",
"baseline_spec_chunk",
",",
"contpix_x",
"[",
"take",
"]",
",",
"contpix_y",
"[",
"take",
"]",
",",
"fig_chunk",
")"
] | Call plot_contpix once for each nth of the spectrum | [
"Call",
"plot_contpix",
"once",
"for",
"each",
"nth",
"of",
"the",
"spectrum"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/model.py#L114-L138 |
annayqho/TheCannon | TheCannon/model.py | CannonModel.diagnostics_plot_chisq | def diagnostics_plot_chisq(self, ds, figname = "modelfit_chisqs.png"):
""" Produce a set of diagnostic plots for the model
Parameters
----------
(optional) chisq_dist_plot_name: str
Filename of output saved plot
"""
label_names = ds.get_plotting_labels()
lams = ds.wl
pivots = self.pivots
npixels = len(lams)
nlabels = len(pivots)
chisqs = self.chisqs
coeffs = self.coeffs
scatters = self.scatters
# Histogram of the chi squareds of ind. stars
plt.hist(np.sum(chisqs, axis=0), color='lightblue', alpha=0.7,
bins=int(np.sqrt(len(chisqs))))
dof = len(lams) - coeffs.shape[1] # for one star
plt.axvline(x=dof, c='k', linewidth=2, label="DOF")
plt.legend()
plt.title("Distribution of " + r"$\chi^2$" + " of the Model Fit")
plt.ylabel("Count")
plt.xlabel(r"$\chi^2$" + " of Individual Star")
print("Diagnostic plot: histogram of the red chi squareds of the fit")
print("Saved as %s" %figname)
plt.savefig(figname)
plt.close() | python | def diagnostics_plot_chisq(self, ds, figname = "modelfit_chisqs.png"):
""" Produce a set of diagnostic plots for the model
Parameters
----------
(optional) chisq_dist_plot_name: str
Filename of output saved plot
"""
label_names = ds.get_plotting_labels()
lams = ds.wl
pivots = self.pivots
npixels = len(lams)
nlabels = len(pivots)
chisqs = self.chisqs
coeffs = self.coeffs
scatters = self.scatters
# Histogram of the chi squareds of ind. stars
plt.hist(np.sum(chisqs, axis=0), color='lightblue', alpha=0.7,
bins=int(np.sqrt(len(chisqs))))
dof = len(lams) - coeffs.shape[1] # for one star
plt.axvline(x=dof, c='k', linewidth=2, label="DOF")
plt.legend()
plt.title("Distribution of " + r"$\chi^2$" + " of the Model Fit")
plt.ylabel("Count")
plt.xlabel(r"$\chi^2$" + " of Individual Star")
print("Diagnostic plot: histogram of the red chi squareds of the fit")
print("Saved as %s" %figname)
plt.savefig(figname)
plt.close() | [
"def",
"diagnostics_plot_chisq",
"(",
"self",
",",
"ds",
",",
"figname",
"=",
"\"modelfit_chisqs.png\"",
")",
":",
"label_names",
"=",
"ds",
".",
"get_plotting_labels",
"(",
")",
"lams",
"=",
"ds",
".",
"wl",
"pivots",
"=",
"self",
".",
"pivots",
"npixels",
"=",
"len",
"(",
"lams",
")",
"nlabels",
"=",
"len",
"(",
"pivots",
")",
"chisqs",
"=",
"self",
".",
"chisqs",
"coeffs",
"=",
"self",
".",
"coeffs",
"scatters",
"=",
"self",
".",
"scatters",
"# Histogram of the chi squareds of ind. stars",
"plt",
".",
"hist",
"(",
"np",
".",
"sum",
"(",
"chisqs",
",",
"axis",
"=",
"0",
")",
",",
"color",
"=",
"'lightblue'",
",",
"alpha",
"=",
"0.7",
",",
"bins",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"len",
"(",
"chisqs",
")",
")",
")",
")",
"dof",
"=",
"len",
"(",
"lams",
")",
"-",
"coeffs",
".",
"shape",
"[",
"1",
"]",
"# for one star",
"plt",
".",
"axvline",
"(",
"x",
"=",
"dof",
",",
"c",
"=",
"'k'",
",",
"linewidth",
"=",
"2",
",",
"label",
"=",
"\"DOF\"",
")",
"plt",
".",
"legend",
"(",
")",
"plt",
".",
"title",
"(",
"\"Distribution of \"",
"+",
"r\"$\\chi^2$\"",
"+",
"\" of the Model Fit\"",
")",
"plt",
".",
"ylabel",
"(",
"\"Count\"",
")",
"plt",
".",
"xlabel",
"(",
"r\"$\\chi^2$\"",
"+",
"\" of Individual Star\"",
")",
"print",
"(",
"\"Diagnostic plot: histogram of the red chi squareds of the fit\"",
")",
"print",
"(",
"\"Saved as %s\"",
"%",
"figname",
")",
"plt",
".",
"savefig",
"(",
"figname",
")",
"plt",
".",
"close",
"(",
")"
] | Produce a set of diagnostic plots for the model
Parameters
----------
(optional) chisq_dist_plot_name: str
Filename of output saved plot | [
"Produce",
"a",
"set",
"of",
"diagnostic",
"plots",
"for",
"the",
"model"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/model.py#L213-L242 |
annayqho/TheCannon | code/lamost/mass_age/cn/calc_astroseismic_mass.py | calc_mass | def calc_mass(nu_max, delta_nu, teff):
""" asteroseismic scaling relations """
NU_MAX = 3140.0 # microHz
DELTA_NU = 135.03 # microHz
TEFF = 5777.0
return (nu_max/NU_MAX)**3 * (delta_nu/DELTA_NU)**(-4) * (teff/TEFF)**1.5 | python | def calc_mass(nu_max, delta_nu, teff):
""" asteroseismic scaling relations """
NU_MAX = 3140.0 # microHz
DELTA_NU = 135.03 # microHz
TEFF = 5777.0
return (nu_max/NU_MAX)**3 * (delta_nu/DELTA_NU)**(-4) * (teff/TEFF)**1.5 | [
"def",
"calc_mass",
"(",
"nu_max",
",",
"delta_nu",
",",
"teff",
")",
":",
"NU_MAX",
"=",
"3140.0",
"# microHz",
"DELTA_NU",
"=",
"135.03",
"# microHz",
"TEFF",
"=",
"5777.0",
"return",
"(",
"nu_max",
"/",
"NU_MAX",
")",
"**",
"3",
"*",
"(",
"delta_nu",
"/",
"DELTA_NU",
")",
"**",
"(",
"-",
"4",
")",
"*",
"(",
"teff",
"/",
"TEFF",
")",
"**",
"1.5"
] | asteroseismic scaling relations | [
"asteroseismic",
"scaling",
"relations"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/mass_age/cn/calc_astroseismic_mass.py#L3-L8 |
annayqho/TheCannon | code/lamost/mass_age/mass_age_functions.py | calc_mass_2 | def calc_mass_2(mh,cm,nm,teff,logg):
""" Table A2 in Martig 2016 """
CplusN = calc_sum(mh,cm,nm)
t = teff/4000.
return (95.8689 - 10.4042*mh - 0.7266*mh**2
+ 41.3642*cm - 5.3242*cm*mh - 46.7792*cm**2
+ 15.0508*nm - 0.9342*nm*mh - 30.5159*nm*cm - 1.6083*nm**2
- 67.6093*CplusN + 7.0486*CplusN*mh + 133.5775*CplusN*cm + 38.9439*CplusN*nm - 88.9948*CplusN**2
- 144.1765*t + 5.1180*t*mh - 73.7690*t*cm - 15.2927*t*nm + 101.7482*t*CplusN + 27.7690*t**2
- 9.4246*logg + 1.5159*logg*mh + 16.0412*logg*cm + 1.3549*logg*nm - 18.6527*logg*CplusN + 28.8015*logg*t - 4.0982*logg**2) | python | def calc_mass_2(mh,cm,nm,teff,logg):
""" Table A2 in Martig 2016 """
CplusN = calc_sum(mh,cm,nm)
t = teff/4000.
return (95.8689 - 10.4042*mh - 0.7266*mh**2
+ 41.3642*cm - 5.3242*cm*mh - 46.7792*cm**2
+ 15.0508*nm - 0.9342*nm*mh - 30.5159*nm*cm - 1.6083*nm**2
- 67.6093*CplusN + 7.0486*CplusN*mh + 133.5775*CplusN*cm + 38.9439*CplusN*nm - 88.9948*CplusN**2
- 144.1765*t + 5.1180*t*mh - 73.7690*t*cm - 15.2927*t*nm + 101.7482*t*CplusN + 27.7690*t**2
- 9.4246*logg + 1.5159*logg*mh + 16.0412*logg*cm + 1.3549*logg*nm - 18.6527*logg*CplusN + 28.8015*logg*t - 4.0982*logg**2) | [
"def",
"calc_mass_2",
"(",
"mh",
",",
"cm",
",",
"nm",
",",
"teff",
",",
"logg",
")",
":",
"CplusN",
"=",
"calc_sum",
"(",
"mh",
",",
"cm",
",",
"nm",
")",
"t",
"=",
"teff",
"/",
"4000.",
"return",
"(",
"95.8689",
"-",
"10.4042",
"*",
"mh",
"-",
"0.7266",
"*",
"mh",
"**",
"2",
"+",
"41.3642",
"*",
"cm",
"-",
"5.3242",
"*",
"cm",
"*",
"mh",
"-",
"46.7792",
"*",
"cm",
"**",
"2",
"+",
"15.0508",
"*",
"nm",
"-",
"0.9342",
"*",
"nm",
"*",
"mh",
"-",
"30.5159",
"*",
"nm",
"*",
"cm",
"-",
"1.6083",
"*",
"nm",
"**",
"2",
"-",
"67.6093",
"*",
"CplusN",
"+",
"7.0486",
"*",
"CplusN",
"*",
"mh",
"+",
"133.5775",
"*",
"CplusN",
"*",
"cm",
"+",
"38.9439",
"*",
"CplusN",
"*",
"nm",
"-",
"88.9948",
"*",
"CplusN",
"**",
"2",
"-",
"144.1765",
"*",
"t",
"+",
"5.1180",
"*",
"t",
"*",
"mh",
"-",
"73.7690",
"*",
"t",
"*",
"cm",
"-",
"15.2927",
"*",
"t",
"*",
"nm",
"+",
"101.7482",
"*",
"t",
"*",
"CplusN",
"+",
"27.7690",
"*",
"t",
"**",
"2",
"-",
"9.4246",
"*",
"logg",
"+",
"1.5159",
"*",
"logg",
"*",
"mh",
"+",
"16.0412",
"*",
"logg",
"*",
"cm",
"+",
"1.3549",
"*",
"logg",
"*",
"nm",
"-",
"18.6527",
"*",
"logg",
"*",
"CplusN",
"+",
"28.8015",
"*",
"logg",
"*",
"t",
"-",
"4.0982",
"*",
"logg",
"**",
"2",
")"
] | Table A2 in Martig 2016 | [
"Table",
"A2",
"in",
"Martig",
"2016"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/mass_age/mass_age_functions.py#L39-L48 |
annayqho/TheCannon | TheCannon/helpers/corner/corner.py | corner | def corner(xs, bins=20, range=None, weights=None, color="k",
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False, use_math_text=False,
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like (ndim,) (optional)
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str (optional)
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float (optional)
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
use_math_text : bool (optional)
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int (optional)
Maximum number of ticks to try to use
top_ticks : bool (optional)
If true, label the top ticks of each axis
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
hist_kwargs : dict (optional)
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs : (optional)
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [float(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], weights=weights,
range=range[i], **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=weights,
range=range[i])
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None and truths[i] is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
weights=weights)
q_m, q_p = q_50-q_16, q_84-q_50
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, bins=[bins[j], bins[i]],
**hist2d_kwargs)
if truths is not None:
if truths[i] is not None and truths[j] is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
if truths[j] is not None:
ax.axvline(truths[j], color=truth_color)
if truths[i] is not None:
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
return fig | python | def corner(xs, bins=20, range=None, weights=None, color="k",
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False, use_math_text=False,
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like (ndim,) (optional)
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str (optional)
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float (optional)
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
use_math_text : bool (optional)
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int (optional)
Maximum number of ticks to try to use
top_ticks : bool (optional)
If true, label the top ticks of each axis
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
hist_kwargs : dict (optional)
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs : (optional)
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [float(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], weights=weights,
range=range[i], **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=weights,
range=range[i])
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None and truths[i] is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
weights=weights)
q_m, q_p = q_50-q_16, q_84-q_50
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, bins=[bins[j], bins[i]],
**hist2d_kwargs)
if truths is not None:
if truths[i] is not None and truths[j] is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
if truths[j] is not None:
ax.axvline(truths[j], color=truth_color)
if truths[i] is not None:
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
return fig | [
"def",
"corner",
"(",
"xs",
",",
"bins",
"=",
"20",
",",
"range",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"color",
"=",
"\"k\"",
",",
"smooth",
"=",
"None",
",",
"smooth1d",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"label_kwargs",
"=",
"None",
",",
"show_titles",
"=",
"False",
",",
"title_fmt",
"=",
"\".2f\"",
",",
"title_kwargs",
"=",
"None",
",",
"truths",
"=",
"None",
",",
"truth_color",
"=",
"\"#4682b4\"",
",",
"scale_hist",
"=",
"False",
",",
"quantiles",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"fig",
"=",
"None",
",",
"max_n_ticks",
"=",
"5",
",",
"top_ticks",
"=",
"False",
",",
"use_math_text",
"=",
"False",
",",
"hist_kwargs",
"=",
"None",
",",
"*",
"*",
"hist2d_kwargs",
")",
":",
"if",
"quantiles",
"is",
"None",
":",
"quantiles",
"=",
"[",
"]",
"if",
"title_kwargs",
"is",
"None",
":",
"title_kwargs",
"=",
"dict",
"(",
")",
"if",
"label_kwargs",
"is",
"None",
":",
"label_kwargs",
"=",
"dict",
"(",
")",
"# Try filling in labels from pandas.DataFrame columns.",
"if",
"labels",
"is",
"None",
":",
"try",
":",
"labels",
"=",
"xs",
".",
"columns",
"except",
"AttributeError",
":",
"pass",
"# Deal with 1D sample lists.",
"xs",
"=",
"np",
".",
"atleast_1d",
"(",
"xs",
")",
"if",
"len",
"(",
"xs",
".",
"shape",
")",
"==",
"1",
":",
"xs",
"=",
"np",
".",
"atleast_2d",
"(",
"xs",
")",
"else",
":",
"assert",
"len",
"(",
"xs",
".",
"shape",
")",
"==",
"2",
",",
"\"The input sample array must be 1- or 2-D.\"",
"xs",
"=",
"xs",
".",
"T",
"assert",
"xs",
".",
"shape",
"[",
"0",
"]",
"<=",
"xs",
".",
"shape",
"[",
"1",
"]",
",",
"\"I don't believe that you want more \"",
"\"dimensions than samples!\"",
"# Parse the weight array.",
"if",
"weights",
"is",
"not",
"None",
":",
"weights",
"=",
"np",
".",
"asarray",
"(",
"weights",
")",
"if",
"weights",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Weights must be 1-D\"",
")",
"if",
"xs",
".",
"shape",
"[",
"1",
"]",
"!=",
"weights",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"Lengths of weights must match number of samples\"",
")",
"# Parse the parameter ranges.",
"if",
"range",
"is",
"None",
":",
"if",
"\"extents\"",
"in",
"hist2d_kwargs",
":",
"logging",
".",
"warn",
"(",
"\"Deprecated keyword argument 'extents'. \"",
"\"Use 'range' instead.\"",
")",
"range",
"=",
"hist2d_kwargs",
".",
"pop",
"(",
"\"extents\"",
")",
"else",
":",
"range",
"=",
"[",
"[",
"x",
".",
"min",
"(",
")",
",",
"x",
".",
"max",
"(",
")",
"]",
"for",
"x",
"in",
"xs",
"]",
"# Check for parameters that never change.",
"m",
"=",
"np",
".",
"array",
"(",
"[",
"e",
"[",
"0",
"]",
"==",
"e",
"[",
"1",
"]",
"for",
"e",
"in",
"range",
"]",
",",
"dtype",
"=",
"bool",
")",
"if",
"np",
".",
"any",
"(",
"m",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"It looks like the parameter(s) in \"",
"\"column(s) {0} have no dynamic range. \"",
"\"Please provide a `range` argument.\"",
")",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"map",
"(",
"\"{0}\"",
".",
"format",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"m",
")",
")",
"[",
"m",
"]",
")",
")",
")",
")",
"else",
":",
"# If any of the extents are percentiles, convert them to ranges.",
"# Also make sure it's a normal list.",
"range",
"=",
"list",
"(",
"range",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"range",
")",
":",
"try",
":",
"emin",
",",
"emax",
"=",
"range",
"[",
"i",
"]",
"except",
"TypeError",
":",
"q",
"=",
"[",
"0.5",
"-",
"0.5",
"*",
"range",
"[",
"i",
"]",
",",
"0.5",
"+",
"0.5",
"*",
"range",
"[",
"i",
"]",
"]",
"range",
"[",
"i",
"]",
"=",
"quantile",
"(",
"xs",
"[",
"i",
"]",
",",
"q",
",",
"weights",
"=",
"weights",
")",
"if",
"len",
"(",
"range",
")",
"!=",
"xs",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"Dimension mismatch between samples and range\"",
")",
"# Parse the bin specifications.",
"try",
":",
"bins",
"=",
"[",
"float",
"(",
"bins",
")",
"for",
"_",
"in",
"range",
"]",
"except",
"TypeError",
":",
"if",
"len",
"(",
"bins",
")",
"!=",
"len",
"(",
"range",
")",
":",
"raise",
"ValueError",
"(",
"\"Dimension mismatch between bins and range\"",
")",
"# Some magic numbers for pretty axis layout.",
"K",
"=",
"len",
"(",
"xs",
")",
"factor",
"=",
"2.0",
"# size of one side of one panel",
"lbdim",
"=",
"0.5",
"*",
"factor",
"# size of left/bottom margin",
"trdim",
"=",
"0.2",
"*",
"factor",
"# size of top/right margin",
"whspace",
"=",
"0.05",
"# w/hspace size",
"plotdim",
"=",
"factor",
"*",
"K",
"+",
"factor",
"*",
"(",
"K",
"-",
"1.",
")",
"*",
"whspace",
"dim",
"=",
"lbdim",
"+",
"plotdim",
"+",
"trdim",
"# Create a new figure if one wasn't provided.",
"if",
"fig",
"is",
"None",
":",
"fig",
",",
"axes",
"=",
"pl",
".",
"subplots",
"(",
"K",
",",
"K",
",",
"figsize",
"=",
"(",
"dim",
",",
"dim",
")",
")",
"else",
":",
"try",
":",
"axes",
"=",
"np",
".",
"array",
"(",
"fig",
".",
"axes",
")",
".",
"reshape",
"(",
"(",
"K",
",",
"K",
")",
")",
"except",
":",
"raise",
"ValueError",
"(",
"\"Provided figure has {0} axes, but data has \"",
"\"dimensions K={1}\"",
".",
"format",
"(",
"len",
"(",
"fig",
".",
"axes",
")",
",",
"K",
")",
")",
"# Format the figure.",
"lb",
"=",
"lbdim",
"/",
"dim",
"tr",
"=",
"(",
"lbdim",
"+",
"plotdim",
")",
"/",
"dim",
"fig",
".",
"subplots_adjust",
"(",
"left",
"=",
"lb",
",",
"bottom",
"=",
"lb",
",",
"right",
"=",
"tr",
",",
"top",
"=",
"tr",
",",
"wspace",
"=",
"whspace",
",",
"hspace",
"=",
"whspace",
")",
"# Set up the default histogram keywords.",
"if",
"hist_kwargs",
"is",
"None",
":",
"hist_kwargs",
"=",
"dict",
"(",
")",
"hist_kwargs",
"[",
"\"color\"",
"]",
"=",
"hist_kwargs",
".",
"get",
"(",
"\"color\"",
",",
"color",
")",
"if",
"smooth1d",
"is",
"None",
":",
"hist_kwargs",
"[",
"\"histtype\"",
"]",
"=",
"hist_kwargs",
".",
"get",
"(",
"\"histtype\"",
",",
"\"step\"",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"xs",
")",
":",
"# Deal with masked arrays.",
"if",
"hasattr",
"(",
"x",
",",
"\"compressed\"",
")",
":",
"x",
"=",
"x",
".",
"compressed",
"(",
")",
"if",
"np",
".",
"shape",
"(",
"xs",
")",
"[",
"0",
"]",
"==",
"1",
":",
"ax",
"=",
"axes",
"else",
":",
"ax",
"=",
"axes",
"[",
"i",
",",
"i",
"]",
"# Plot the histograms.",
"if",
"smooth1d",
"is",
"None",
":",
"n",
",",
"_",
",",
"_",
"=",
"ax",
".",
"hist",
"(",
"x",
",",
"bins",
"=",
"bins",
"[",
"i",
"]",
",",
"weights",
"=",
"weights",
",",
"range",
"=",
"range",
"[",
"i",
"]",
",",
"*",
"*",
"hist_kwargs",
")",
"else",
":",
"if",
"gaussian_filter",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"\"Please install scipy for smoothing\"",
")",
"n",
",",
"b",
"=",
"np",
".",
"histogram",
"(",
"x",
",",
"bins",
"=",
"bins",
"[",
"i",
"]",
",",
"weights",
"=",
"weights",
",",
"range",
"=",
"range",
"[",
"i",
"]",
")",
"n",
"=",
"gaussian_filter",
"(",
"n",
",",
"smooth1d",
")",
"x0",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"zip",
"(",
"b",
"[",
":",
"-",
"1",
"]",
",",
"b",
"[",
"1",
":",
"]",
")",
")",
")",
".",
"flatten",
"(",
")",
"y0",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"zip",
"(",
"n",
",",
"n",
")",
")",
")",
".",
"flatten",
"(",
")",
"ax",
".",
"plot",
"(",
"x0",
",",
"y0",
",",
"*",
"*",
"hist_kwargs",
")",
"if",
"truths",
"is",
"not",
"None",
"and",
"truths",
"[",
"i",
"]",
"is",
"not",
"None",
":",
"ax",
".",
"axvline",
"(",
"truths",
"[",
"i",
"]",
",",
"color",
"=",
"truth_color",
")",
"# Plot quantiles if wanted.",
"if",
"len",
"(",
"quantiles",
")",
">",
"0",
":",
"qvalues",
"=",
"quantile",
"(",
"x",
",",
"quantiles",
",",
"weights",
"=",
"weights",
")",
"for",
"q",
"in",
"qvalues",
":",
"ax",
".",
"axvline",
"(",
"q",
",",
"ls",
"=",
"\"dashed\"",
",",
"color",
"=",
"color",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Quantiles:\"",
")",
"print",
"(",
"[",
"item",
"for",
"item",
"in",
"zip",
"(",
"quantiles",
",",
"qvalues",
")",
"]",
")",
"if",
"show_titles",
":",
"title",
"=",
"None",
"if",
"title_fmt",
"is",
"not",
"None",
":",
"# Compute the quantiles for the title. This might redo",
"# unneeded computation but who cares.",
"q_16",
",",
"q_50",
",",
"q_84",
"=",
"quantile",
"(",
"x",
",",
"[",
"0.16",
",",
"0.5",
",",
"0.84",
"]",
",",
"weights",
"=",
"weights",
")",
"q_m",
",",
"q_p",
"=",
"q_50",
"-",
"q_16",
",",
"q_84",
"-",
"q_50",
"# Format the quantile display.",
"fmt",
"=",
"\"{{0:{0}}}\"",
".",
"format",
"(",
"title_fmt",
")",
".",
"format",
"title",
"=",
"r\"${{{0}}}_{{-{1}}}^{{+{2}}}$\"",
"title",
"=",
"title",
".",
"format",
"(",
"fmt",
"(",
"q_50",
")",
",",
"fmt",
"(",
"q_m",
")",
",",
"fmt",
"(",
"q_p",
")",
")",
"# Add in the column name if it's given.",
"if",
"labels",
"is",
"not",
"None",
":",
"title",
"=",
"\"{0} = {1}\"",
".",
"format",
"(",
"labels",
"[",
"i",
"]",
",",
"title",
")",
"elif",
"labels",
"is",
"not",
"None",
":",
"title",
"=",
"\"{0}\"",
".",
"format",
"(",
"labels",
"[",
"i",
"]",
")",
"if",
"title",
"is",
"not",
"None",
":",
"ax",
".",
"set_title",
"(",
"title",
",",
"*",
"*",
"title_kwargs",
")",
"# Set up the axes.",
"ax",
".",
"set_xlim",
"(",
"range",
"[",
"i",
"]",
")",
"if",
"scale_hist",
":",
"maxn",
"=",
"np",
".",
"max",
"(",
"n",
")",
"ax",
".",
"set_ylim",
"(",
"-",
"0.1",
"*",
"maxn",
",",
"1.1",
"*",
"maxn",
")",
"else",
":",
"ax",
".",
"set_ylim",
"(",
"0",
",",
"1.1",
"*",
"np",
".",
"max",
"(",
"n",
")",
")",
"ax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"ax",
".",
"xaxis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"max_n_ticks",
",",
"prune",
"=",
"\"lower\"",
")",
")",
"if",
"i",
"<",
"K",
"-",
"1",
":",
"if",
"top_ticks",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"\"top\"",
")",
"[",
"l",
".",
"set_rotation",
"(",
"45",
")",
"for",
"l",
"in",
"ax",
".",
"get_xticklabels",
"(",
")",
"]",
"else",
":",
"ax",
".",
"set_xticklabels",
"(",
"[",
"]",
")",
"else",
":",
"[",
"l",
".",
"set_rotation",
"(",
"45",
")",
"for",
"l",
"in",
"ax",
".",
"get_xticklabels",
"(",
")",
"]",
"if",
"labels",
"is",
"not",
"None",
":",
"ax",
".",
"set_xlabel",
"(",
"labels",
"[",
"i",
"]",
",",
"*",
"*",
"label_kwargs",
")",
"ax",
".",
"xaxis",
".",
"set_label_coords",
"(",
"0.5",
",",
"-",
"0.3",
")",
"# use MathText for axes ticks",
"ax",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
"useMathText",
"=",
"use_math_text",
")",
")",
"for",
"j",
",",
"y",
"in",
"enumerate",
"(",
"xs",
")",
":",
"if",
"np",
".",
"shape",
"(",
"xs",
")",
"[",
"0",
"]",
"==",
"1",
":",
"ax",
"=",
"axes",
"else",
":",
"ax",
"=",
"axes",
"[",
"i",
",",
"j",
"]",
"if",
"j",
">",
"i",
":",
"ax",
".",
"set_frame_on",
"(",
"False",
")",
"ax",
".",
"set_xticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"continue",
"elif",
"j",
"==",
"i",
":",
"continue",
"# Deal with masked arrays.",
"if",
"hasattr",
"(",
"y",
",",
"\"compressed\"",
")",
":",
"y",
"=",
"y",
".",
"compressed",
"(",
")",
"hist2d",
"(",
"y",
",",
"x",
",",
"ax",
"=",
"ax",
",",
"range",
"=",
"[",
"range",
"[",
"j",
"]",
",",
"range",
"[",
"i",
"]",
"]",
",",
"weights",
"=",
"weights",
",",
"color",
"=",
"color",
",",
"smooth",
"=",
"smooth",
",",
"bins",
"=",
"[",
"bins",
"[",
"j",
"]",
",",
"bins",
"[",
"i",
"]",
"]",
",",
"*",
"*",
"hist2d_kwargs",
")",
"if",
"truths",
"is",
"not",
"None",
":",
"if",
"truths",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"truths",
"[",
"j",
"]",
"is",
"not",
"None",
":",
"ax",
".",
"plot",
"(",
"truths",
"[",
"j",
"]",
",",
"truths",
"[",
"i",
"]",
",",
"\"s\"",
",",
"color",
"=",
"truth_color",
")",
"if",
"truths",
"[",
"j",
"]",
"is",
"not",
"None",
":",
"ax",
".",
"axvline",
"(",
"truths",
"[",
"j",
"]",
",",
"color",
"=",
"truth_color",
")",
"if",
"truths",
"[",
"i",
"]",
"is",
"not",
"None",
":",
"ax",
".",
"axhline",
"(",
"truths",
"[",
"i",
"]",
",",
"color",
"=",
"truth_color",
")",
"ax",
".",
"xaxis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"max_n_ticks",
",",
"prune",
"=",
"\"lower\"",
")",
")",
"ax",
".",
"yaxis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"max_n_ticks",
",",
"prune",
"=",
"\"lower\"",
")",
")",
"if",
"i",
"<",
"K",
"-",
"1",
":",
"ax",
".",
"set_xticklabels",
"(",
"[",
"]",
")",
"else",
":",
"[",
"l",
".",
"set_rotation",
"(",
"45",
")",
"for",
"l",
"in",
"ax",
".",
"get_xticklabels",
"(",
")",
"]",
"if",
"labels",
"is",
"not",
"None",
":",
"ax",
".",
"set_xlabel",
"(",
"labels",
"[",
"j",
"]",
",",
"*",
"*",
"label_kwargs",
")",
"ax",
".",
"xaxis",
".",
"set_label_coords",
"(",
"0.5",
",",
"-",
"0.3",
")",
"# use MathText for axes ticks",
"ax",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
"useMathText",
"=",
"use_math_text",
")",
")",
"if",
"j",
">",
"0",
":",
"ax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"else",
":",
"[",
"l",
".",
"set_rotation",
"(",
"45",
")",
"for",
"l",
"in",
"ax",
".",
"get_yticklabels",
"(",
")",
"]",
"if",
"labels",
"is",
"not",
"None",
":",
"ax",
".",
"set_ylabel",
"(",
"labels",
"[",
"i",
"]",
",",
"*",
"*",
"label_kwargs",
")",
"ax",
".",
"yaxis",
".",
"set_label_coords",
"(",
"-",
"0.3",
",",
"0.5",
")",
"# use MathText for axes ticks",
"ax",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
"useMathText",
"=",
"use_math_text",
")",
")",
"return",
"fig"
] | Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like (ndim,) (optional)
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str (optional)
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float (optional)
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
use_math_text : bool (optional)
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int (optional)
Maximum number of ticks to try to use
top_ticks : bool (optional)
If true, label the top ticks of each axis
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
hist_kwargs : dict (optional)
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs : (optional)
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots. | [
"Make",
"a",
"*",
"sick",
"*",
"corner",
"plot",
"showing",
"the",
"projections",
"of",
"a",
"data",
"set",
"in",
"a",
"multi",
"-",
"dimensional",
"space",
".",
"kwargs",
"are",
"passed",
"to",
"hist2d",
"()",
"or",
"used",
"for",
"matplotlib",
"styling",
"."
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/helpers/corner/corner.py#L43-L392 |
annayqho/TheCannon | TheCannon/helpers/corner/corner.py | quantile | def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist() | python | def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist() | [
"def",
"quantile",
"(",
"x",
",",
"q",
",",
"weights",
"=",
"None",
")",
":",
"if",
"weights",
"is",
"None",
":",
"return",
"np",
".",
"percentile",
"(",
"x",
",",
"[",
"100.",
"*",
"qi",
"for",
"qi",
"in",
"q",
"]",
")",
"else",
":",
"idx",
"=",
"np",
".",
"argsort",
"(",
"x",
")",
"xsorted",
"=",
"x",
"[",
"idx",
"]",
"cdf",
"=",
"np",
".",
"add",
".",
"accumulate",
"(",
"weights",
"[",
"idx",
"]",
")",
"cdf",
"/=",
"cdf",
"[",
"-",
"1",
"]",
"return",
"np",
".",
"interp",
"(",
"q",
",",
"cdf",
",",
"xsorted",
")",
".",
"tolist",
"(",
")"
] | Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x | [
"Like",
"numpy",
".",
"percentile",
"but",
":"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/helpers/corner/corner.py#L395-L411 |
annayqho/TheCannon | TheCannon/helpers/corner/corner.py | hist2d | def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, plot_datapoints=True, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
**kwargs):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x, y : array_like (nsamples,)
The samples.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes (optional)
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool (optional)
Draw the individual data points.
plot_density : bool (optional)
Draw the density colormap.
plot_contours : bool (optional)
Draw the contours.
no_fill_contours : bool (optional)
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool (optional)
Fill the contours.
contour_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict (optional)
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
if ax is None:
ax = pl.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warn("Deprecated keyword argument 'extent'. "
"Use 'range' instead.")
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=range, weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m):
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(range[0])
ax.set_ylim(range[1]) | python | def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, plot_datapoints=True, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
**kwargs):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x, y : array_like (nsamples,)
The samples.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes (optional)
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool (optional)
Draw the individual data points.
plot_density : bool (optional)
Draw the density colormap.
plot_contours : bool (optional)
Draw the contours.
no_fill_contours : bool (optional)
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool (optional)
Fill the contours.
contour_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict (optional)
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
if ax is None:
ax = pl.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warn("Deprecated keyword argument 'extent'. "
"Use 'range' instead.")
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=range, weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m):
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(range[0])
ax.set_ylim(range[1]) | [
"def",
"hist2d",
"(",
"x",
",",
"y",
",",
"bins",
"=",
"20",
",",
"range",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"levels",
"=",
"None",
",",
"smooth",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"color",
"=",
"None",
",",
"plot_datapoints",
"=",
"True",
",",
"plot_density",
"=",
"True",
",",
"plot_contours",
"=",
"True",
",",
"no_fill_contours",
"=",
"False",
",",
"fill_contours",
"=",
"False",
",",
"contour_kwargs",
"=",
"None",
",",
"contourf_kwargs",
"=",
"None",
",",
"data_kwargs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"pl",
".",
"gca",
"(",
")",
"# Set the default range based on the data range if not provided.",
"if",
"range",
"is",
"None",
":",
"if",
"\"extent\"",
"in",
"kwargs",
":",
"logging",
".",
"warn",
"(",
"\"Deprecated keyword argument 'extent'. \"",
"\"Use 'range' instead.\"",
")",
"range",
"=",
"kwargs",
"[",
"\"extent\"",
"]",
"else",
":",
"range",
"=",
"[",
"[",
"x",
".",
"min",
"(",
")",
",",
"x",
".",
"max",
"(",
")",
"]",
",",
"[",
"y",
".",
"min",
"(",
")",
",",
"y",
".",
"max",
"(",
")",
"]",
"]",
"# Set up the default plotting arguments.",
"if",
"color",
"is",
"None",
":",
"color",
"=",
"\"k\"",
"# Choose the default \"sigma\" contour levels.",
"if",
"levels",
"is",
"None",
":",
"levels",
"=",
"1.0",
"-",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"np",
".",
"arange",
"(",
"0.5",
",",
"2.1",
",",
"0.5",
")",
"**",
"2",
")",
"# This is the color map for the density plot, over-plotted to indicate the",
"# density of the points near the center.",
"density_cmap",
"=",
"LinearSegmentedColormap",
".",
"from_list",
"(",
"\"density_cmap\"",
",",
"[",
"color",
",",
"(",
"1",
",",
"1",
",",
"1",
",",
"0",
")",
"]",
")",
"# This color map is used to hide the points at the high density areas.",
"white_cmap",
"=",
"LinearSegmentedColormap",
".",
"from_list",
"(",
"\"white_cmap\"",
",",
"[",
"(",
"1",
",",
"1",
",",
"1",
")",
",",
"(",
"1",
",",
"1",
",",
"1",
")",
"]",
",",
"N",
"=",
"2",
")",
"# This \"color map\" is the list of colors for the contour levels if the",
"# contours are filled.",
"rgba_color",
"=",
"colorConverter",
".",
"to_rgba",
"(",
"color",
")",
"contour_cmap",
"=",
"[",
"list",
"(",
"rgba_color",
")",
"for",
"l",
"in",
"levels",
"]",
"+",
"[",
"rgba_color",
"]",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"levels",
")",
":",
"contour_cmap",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
"*=",
"float",
"(",
"i",
")",
"/",
"(",
"len",
"(",
"levels",
")",
"+",
"1",
")",
"# We'll make the 2D histogram to directly estimate the density.",
"try",
":",
"H",
",",
"X",
",",
"Y",
"=",
"np",
".",
"histogram2d",
"(",
"x",
".",
"flatten",
"(",
")",
",",
"y",
".",
"flatten",
"(",
")",
",",
"bins",
"=",
"bins",
",",
"range",
"=",
"range",
",",
"weights",
"=",
"weights",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"It looks like at least one of your sample columns \"",
"\"have no dynamic range. You could try using the \"",
"\"'range' argument.\"",
")",
"if",
"smooth",
"is",
"not",
"None",
":",
"if",
"gaussian_filter",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"\"Please install scipy for smoothing\"",
")",
"H",
"=",
"gaussian_filter",
"(",
"H",
",",
"smooth",
")",
"# Compute the density levels.",
"Hflat",
"=",
"H",
".",
"flatten",
"(",
")",
"inds",
"=",
"np",
".",
"argsort",
"(",
"Hflat",
")",
"[",
":",
":",
"-",
"1",
"]",
"Hflat",
"=",
"Hflat",
"[",
"inds",
"]",
"sm",
"=",
"np",
".",
"cumsum",
"(",
"Hflat",
")",
"sm",
"/=",
"sm",
"[",
"-",
"1",
"]",
"V",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"levels",
")",
")",
"for",
"i",
",",
"v0",
"in",
"enumerate",
"(",
"levels",
")",
":",
"try",
":",
"V",
"[",
"i",
"]",
"=",
"Hflat",
"[",
"sm",
"<=",
"v0",
"]",
"[",
"-",
"1",
"]",
"except",
":",
"V",
"[",
"i",
"]",
"=",
"Hflat",
"[",
"0",
"]",
"V",
".",
"sort",
"(",
")",
"m",
"=",
"np",
".",
"diff",
"(",
"V",
")",
"==",
"0",
"if",
"np",
".",
"any",
"(",
"m",
")",
":",
"logging",
".",
"warning",
"(",
"\"Too few points to create valid contours\"",
")",
"while",
"np",
".",
"any",
"(",
"m",
")",
":",
"V",
"[",
"np",
".",
"where",
"(",
"m",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
"*=",
"1.0",
"-",
"1e-4",
"m",
"=",
"np",
".",
"diff",
"(",
"V",
")",
"==",
"0",
"V",
".",
"sort",
"(",
")",
"# Compute the bin centers.",
"X1",
",",
"Y1",
"=",
"0.5",
"*",
"(",
"X",
"[",
"1",
":",
"]",
"+",
"X",
"[",
":",
"-",
"1",
"]",
")",
",",
"0.5",
"*",
"(",
"Y",
"[",
"1",
":",
"]",
"+",
"Y",
"[",
":",
"-",
"1",
"]",
")",
"# Extend the array for the sake of the contours at the plot edges.",
"H2",
"=",
"H",
".",
"min",
"(",
")",
"+",
"np",
".",
"zeros",
"(",
"(",
"H",
".",
"shape",
"[",
"0",
"]",
"+",
"4",
",",
"H",
".",
"shape",
"[",
"1",
"]",
"+",
"4",
")",
")",
"H2",
"[",
"2",
":",
"-",
"2",
",",
"2",
":",
"-",
"2",
"]",
"=",
"H",
"H2",
"[",
"2",
":",
"-",
"2",
",",
"1",
"]",
"=",
"H",
"[",
":",
",",
"0",
"]",
"H2",
"[",
"2",
":",
"-",
"2",
",",
"-",
"2",
"]",
"=",
"H",
"[",
":",
",",
"-",
"1",
"]",
"H2",
"[",
"1",
",",
"2",
":",
"-",
"2",
"]",
"=",
"H",
"[",
"0",
"]",
"H2",
"[",
"-",
"2",
",",
"2",
":",
"-",
"2",
"]",
"=",
"H",
"[",
"-",
"1",
"]",
"H2",
"[",
"1",
",",
"1",
"]",
"=",
"H",
"[",
"0",
",",
"0",
"]",
"H2",
"[",
"1",
",",
"-",
"2",
"]",
"=",
"H",
"[",
"0",
",",
"-",
"1",
"]",
"H2",
"[",
"-",
"2",
",",
"1",
"]",
"=",
"H",
"[",
"-",
"1",
",",
"0",
"]",
"H2",
"[",
"-",
"2",
",",
"-",
"2",
"]",
"=",
"H",
"[",
"-",
"1",
",",
"-",
"1",
"]",
"X2",
"=",
"np",
".",
"concatenate",
"(",
"[",
"X1",
"[",
"0",
"]",
"+",
"np",
".",
"array",
"(",
"[",
"-",
"2",
",",
"-",
"1",
"]",
")",
"*",
"np",
".",
"diff",
"(",
"X1",
"[",
":",
"2",
"]",
")",
",",
"X1",
",",
"X1",
"[",
"-",
"1",
"]",
"+",
"np",
".",
"array",
"(",
"[",
"1",
",",
"2",
"]",
")",
"*",
"np",
".",
"diff",
"(",
"X1",
"[",
"-",
"2",
":",
"]",
")",
",",
"]",
")",
"Y2",
"=",
"np",
".",
"concatenate",
"(",
"[",
"Y1",
"[",
"0",
"]",
"+",
"np",
".",
"array",
"(",
"[",
"-",
"2",
",",
"-",
"1",
"]",
")",
"*",
"np",
".",
"diff",
"(",
"Y1",
"[",
":",
"2",
"]",
")",
",",
"Y1",
",",
"Y1",
"[",
"-",
"1",
"]",
"+",
"np",
".",
"array",
"(",
"[",
"1",
",",
"2",
"]",
")",
"*",
"np",
".",
"diff",
"(",
"Y1",
"[",
"-",
"2",
":",
"]",
")",
",",
"]",
")",
"if",
"plot_datapoints",
":",
"if",
"data_kwargs",
"is",
"None",
":",
"data_kwargs",
"=",
"dict",
"(",
")",
"data_kwargs",
"[",
"\"color\"",
"]",
"=",
"data_kwargs",
".",
"get",
"(",
"\"color\"",
",",
"color",
")",
"data_kwargs",
"[",
"\"ms\"",
"]",
"=",
"data_kwargs",
".",
"get",
"(",
"\"ms\"",
",",
"2.0",
")",
"data_kwargs",
"[",
"\"mec\"",
"]",
"=",
"data_kwargs",
".",
"get",
"(",
"\"mec\"",
",",
"\"none\"",
")",
"data_kwargs",
"[",
"\"alpha\"",
"]",
"=",
"data_kwargs",
".",
"get",
"(",
"\"alpha\"",
",",
"0.1",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"\"o\"",
",",
"zorder",
"=",
"-",
"1",
",",
"rasterized",
"=",
"True",
",",
"*",
"*",
"data_kwargs",
")",
"# Plot the base fill to hide the densest data points.",
"if",
"(",
"plot_contours",
"or",
"plot_density",
")",
"and",
"not",
"no_fill_contours",
":",
"ax",
".",
"contourf",
"(",
"X2",
",",
"Y2",
",",
"H2",
".",
"T",
",",
"[",
"V",
".",
"min",
"(",
")",
",",
"H",
".",
"max",
"(",
")",
"]",
",",
"cmap",
"=",
"white_cmap",
",",
"antialiased",
"=",
"False",
")",
"if",
"plot_contours",
"and",
"fill_contours",
":",
"if",
"contourf_kwargs",
"is",
"None",
":",
"contourf_kwargs",
"=",
"dict",
"(",
")",
"contourf_kwargs",
"[",
"\"colors\"",
"]",
"=",
"contourf_kwargs",
".",
"get",
"(",
"\"colors\"",
",",
"contour_cmap",
")",
"contourf_kwargs",
"[",
"\"antialiased\"",
"]",
"=",
"contourf_kwargs",
".",
"get",
"(",
"\"antialiased\"",
",",
"False",
")",
"ax",
".",
"contourf",
"(",
"X2",
",",
"Y2",
",",
"H2",
".",
"T",
",",
"np",
".",
"concatenate",
"(",
"[",
"[",
"0",
"]",
",",
"V",
",",
"[",
"H",
".",
"max",
"(",
")",
"*",
"(",
"1",
"+",
"1e-4",
")",
"]",
"]",
")",
",",
"*",
"*",
"contourf_kwargs",
")",
"# Plot the density map. This can't be plotted at the same time as the",
"# contour fills.",
"elif",
"plot_density",
":",
"ax",
".",
"pcolor",
"(",
"X",
",",
"Y",
",",
"H",
".",
"max",
"(",
")",
"-",
"H",
".",
"T",
",",
"cmap",
"=",
"density_cmap",
")",
"# Plot the contour edge colors.",
"if",
"plot_contours",
":",
"if",
"contour_kwargs",
"is",
"None",
":",
"contour_kwargs",
"=",
"dict",
"(",
")",
"contour_kwargs",
"[",
"\"colors\"",
"]",
"=",
"contour_kwargs",
".",
"get",
"(",
"\"colors\"",
",",
"color",
")",
"ax",
".",
"contour",
"(",
"X2",
",",
"Y2",
",",
"H2",
".",
"T",
",",
"V",
",",
"*",
"*",
"contour_kwargs",
")",
"ax",
".",
"set_xlim",
"(",
"range",
"[",
"0",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"range",
"[",
"1",
"]",
")"
] | Plot a 2-D histogram of samples.
Parameters
----------
x, y : array_like (nsamples,)
The samples.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes (optional)
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool (optional)
Draw the individual data points.
plot_density : bool (optional)
Draw the density colormap.
plot_contours : bool (optional)
Draw the contours.
no_fill_contours : bool (optional)
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool (optional)
Fill the contours.
contour_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict (optional)
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points. | [
"Plot",
"a",
"2",
"-",
"D",
"histogram",
"of",
"samples",
"."
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/helpers/corner/corner.py#L414-L592 |
annayqho/TheCannon | code/lamost/xcalib_5labels/paper_plots/distance_cut.py | calc_dist | def calc_dist(lamost_point, training_points, coeffs):
""" avg dist from one lamost point to nearest 10 training points """
diff2 = (training_points - lamost_point)**2
dist = np.sqrt(np.sum(diff2*coeffs, axis=1))
return np.mean(dist[dist.argsort()][0:10]) | python | def calc_dist(lamost_point, training_points, coeffs):
""" avg dist from one lamost point to nearest 10 training points """
diff2 = (training_points - lamost_point)**2
dist = np.sqrt(np.sum(diff2*coeffs, axis=1))
return np.mean(dist[dist.argsort()][0:10]) | [
"def",
"calc_dist",
"(",
"lamost_point",
",",
"training_points",
",",
"coeffs",
")",
":",
"diff2",
"=",
"(",
"training_points",
"-",
"lamost_point",
")",
"**",
"2",
"dist",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"diff2",
"*",
"coeffs",
",",
"axis",
"=",
"1",
")",
")",
"return",
"np",
".",
"mean",
"(",
"dist",
"[",
"dist",
".",
"argsort",
"(",
")",
"]",
"[",
"0",
":",
"10",
"]",
")"
] | avg dist from one lamost point to nearest 10 training points | [
"avg",
"dist",
"from",
"one",
"lamost",
"point",
"to",
"nearest",
"10",
"training",
"points"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/xcalib_5labels/paper_plots/distance_cut.py#L12-L16 |
datosgobar/textar | textar/text_classifier.py | TextClassifier.make_classifier | def make_classifier(self, name, ids, labels):
"""Entrenar un clasificador SVM sobre los textos cargados.
Crea un clasificador que se guarda en el objeto bajo el nombre `name`.
Args:
name (str): Nombre para el clasidicador.
ids (list): Se espera una lista de N ids de textos ya almacenados
en el TextClassifier.
labels (list): Se espera una lista de N etiquetas. Una por cada id
de texto presente en ids.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
"""
if not all(np.in1d(ids, self.ids)):
raise ValueError("Hay ids de textos que no se encuentran \
almacenados.")
setattr(self, name, SGDClassifier())
classifier = getattr(self, name)
indices = np.searchsorted(self.ids, ids)
classifier.fit(self.tfidf_mat[indices, :], labels) | python | def make_classifier(self, name, ids, labels):
"""Entrenar un clasificador SVM sobre los textos cargados.
Crea un clasificador que se guarda en el objeto bajo el nombre `name`.
Args:
name (str): Nombre para el clasidicador.
ids (list): Se espera una lista de N ids de textos ya almacenados
en el TextClassifier.
labels (list): Se espera una lista de N etiquetas. Una por cada id
de texto presente en ids.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
"""
if not all(np.in1d(ids, self.ids)):
raise ValueError("Hay ids de textos que no se encuentran \
almacenados.")
setattr(self, name, SGDClassifier())
classifier = getattr(self, name)
indices = np.searchsorted(self.ids, ids)
classifier.fit(self.tfidf_mat[indices, :], labels) | [
"def",
"make_classifier",
"(",
"self",
",",
"name",
",",
"ids",
",",
"labels",
")",
":",
"if",
"not",
"all",
"(",
"np",
".",
"in1d",
"(",
"ids",
",",
"self",
".",
"ids",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Hay ids de textos que no se encuentran \\\n almacenados.\"",
")",
"setattr",
"(",
"self",
",",
"name",
",",
"SGDClassifier",
"(",
")",
")",
"classifier",
"=",
"getattr",
"(",
"self",
",",
"name",
")",
"indices",
"=",
"np",
".",
"searchsorted",
"(",
"self",
".",
"ids",
",",
"ids",
")",
"classifier",
".",
"fit",
"(",
"self",
".",
"tfidf_mat",
"[",
"indices",
",",
":",
"]",
",",
"labels",
")"
] | Entrenar un clasificador SVM sobre los textos cargados.
Crea un clasificador que se guarda en el objeto bajo el nombre `name`.
Args:
name (str): Nombre para el clasidicador.
ids (list): Se espera una lista de N ids de textos ya almacenados
en el TextClassifier.
labels (list): Se espera una lista de N etiquetas. Una por cada id
de texto presente en ids.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_ | [
"Entrenar",
"un",
"clasificador",
"SVM",
"sobre",
"los",
"textos",
"cargados",
"."
] | train | https://github.com/datosgobar/textar/blob/44fb5b537561facae0cdfe4fe1d108dfa26cfc6b/textar/text_classifier.py#L63-L83 |
datosgobar/textar | textar/text_classifier.py | TextClassifier.retrain | def retrain(self, name, ids, labels):
"""Reentrenar parcialmente un clasificador SVM.
Args:
name (str): Nombre para el clasidicador.
ids (list): Se espera una lista de N ids de textos ya almacenados
en el TextClassifier.
labels (list): Se espera una lista de N etiquetas. Una por cada id
de texto presente en ids.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
"""
if not all(np.in1d(ids, self.ids)):
raise ValueError("Hay ids de textos que no se encuentran \
almacenados.")
try:
classifier = getattr(self, name)
except AttributeError:
raise AttributeError("No hay ningun clasificador con ese nombre.")
indices = np.in1d(self.ids, ids)
if isinstance(labels, str):
labels = [labels]
classifier.partial_fit(self.tfidf_mat[indices, :], labels) | python | def retrain(self, name, ids, labels):
"""Reentrenar parcialmente un clasificador SVM.
Args:
name (str): Nombre para el clasidicador.
ids (list): Se espera una lista de N ids de textos ya almacenados
en el TextClassifier.
labels (list): Se espera una lista de N etiquetas. Una por cada id
de texto presente en ids.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
"""
if not all(np.in1d(ids, self.ids)):
raise ValueError("Hay ids de textos que no se encuentran \
almacenados.")
try:
classifier = getattr(self, name)
except AttributeError:
raise AttributeError("No hay ningun clasificador con ese nombre.")
indices = np.in1d(self.ids, ids)
if isinstance(labels, str):
labels = [labels]
classifier.partial_fit(self.tfidf_mat[indices, :], labels) | [
"def",
"retrain",
"(",
"self",
",",
"name",
",",
"ids",
",",
"labels",
")",
":",
"if",
"not",
"all",
"(",
"np",
".",
"in1d",
"(",
"ids",
",",
"self",
".",
"ids",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Hay ids de textos que no se encuentran \\\n almacenados.\"",
")",
"try",
":",
"classifier",
"=",
"getattr",
"(",
"self",
",",
"name",
")",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"No hay ningun clasificador con ese nombre.\"",
")",
"indices",
"=",
"np",
".",
"in1d",
"(",
"self",
".",
"ids",
",",
"ids",
")",
"if",
"isinstance",
"(",
"labels",
",",
"str",
")",
":",
"labels",
"=",
"[",
"labels",
"]",
"classifier",
".",
"partial_fit",
"(",
"self",
".",
"tfidf_mat",
"[",
"indices",
",",
":",
"]",
",",
"labels",
")"
] | Reentrenar parcialmente un clasificador SVM.
Args:
name (str): Nombre para el clasidicador.
ids (list): Se espera una lista de N ids de textos ya almacenados
en el TextClassifier.
labels (list): Se espera una lista de N etiquetas. Una por cada id
de texto presente en ids.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_ | [
"Reentrenar",
"parcialmente",
"un",
"clasificador",
"SVM",
"."
] | train | https://github.com/datosgobar/textar/blob/44fb5b537561facae0cdfe4fe1d108dfa26cfc6b/textar/text_classifier.py#L85-L107 |
datosgobar/textar | textar/text_classifier.py | TextClassifier.classify | def classify(self, classifier_name, examples, max_labels=None,
goodness_of_fit=False):
"""Usar un clasificador SVM para etiquetar textos nuevos.
Args:
classifier_name (str): Nombre del clasidicador a usar.
examples (list or str): Se espera un ejemplo o una lista de
ejemplos a clasificar en texto plano o en ids.
max_labels (int, optional): Cantidad de etiquetas a devolver para
cada ejemplo. Si se devuelve mas de una el orden corresponde a
la plausibilidad de cada etiqueta. Si es None devuelve todas
las etiquetas posibles.
goodness_of_fit (bool, optional): Indica si devuelve o no una
medida de cuan buenas son las etiquetas.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
Returns:
tuple (array, array): (labels_considerados, puntajes)
labels_considerados: Las etiquetas que se consideraron para
clasificar.
puntajes: Cuanto más alto el puntaje, más probable es que la
etiqueta considerada sea la adecuada.
"""
classifier = getattr(self, classifier_name)
texts_vectors = self._make_text_vectors(examples)
return classifier.classes_, classifier.decision_function(texts_vectors) | python | def classify(self, classifier_name, examples, max_labels=None,
goodness_of_fit=False):
"""Usar un clasificador SVM para etiquetar textos nuevos.
Args:
classifier_name (str): Nombre del clasidicador a usar.
examples (list or str): Se espera un ejemplo o una lista de
ejemplos a clasificar en texto plano o en ids.
max_labels (int, optional): Cantidad de etiquetas a devolver para
cada ejemplo. Si se devuelve mas de una el orden corresponde a
la plausibilidad de cada etiqueta. Si es None devuelve todas
las etiquetas posibles.
goodness_of_fit (bool, optional): Indica si devuelve o no una
medida de cuan buenas son las etiquetas.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
Returns:
tuple (array, array): (labels_considerados, puntajes)
labels_considerados: Las etiquetas que se consideraron para
clasificar.
puntajes: Cuanto más alto el puntaje, más probable es que la
etiqueta considerada sea la adecuada.
"""
classifier = getattr(self, classifier_name)
texts_vectors = self._make_text_vectors(examples)
return classifier.classes_, classifier.decision_function(texts_vectors) | [
"def",
"classify",
"(",
"self",
",",
"classifier_name",
",",
"examples",
",",
"max_labels",
"=",
"None",
",",
"goodness_of_fit",
"=",
"False",
")",
":",
"classifier",
"=",
"getattr",
"(",
"self",
",",
"classifier_name",
")",
"texts_vectors",
"=",
"self",
".",
"_make_text_vectors",
"(",
"examples",
")",
"return",
"classifier",
".",
"classes_",
",",
"classifier",
".",
"decision_function",
"(",
"texts_vectors",
")"
] | Usar un clasificador SVM para etiquetar textos nuevos.
Args:
classifier_name (str): Nombre del clasidicador a usar.
examples (list or str): Se espera un ejemplo o una lista de
ejemplos a clasificar en texto plano o en ids.
max_labels (int, optional): Cantidad de etiquetas a devolver para
cada ejemplo. Si se devuelve mas de una el orden corresponde a
la plausibilidad de cada etiqueta. Si es None devuelve todas
las etiquetas posibles.
goodness_of_fit (bool, optional): Indica si devuelve o no una
medida de cuan buenas son las etiquetas.
Nota:
Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
Returns:
tuple (array, array): (labels_considerados, puntajes)
labels_considerados: Las etiquetas que se consideraron para
clasificar.
puntajes: Cuanto más alto el puntaje, más probable es que la
etiqueta considerada sea la adecuada. | [
"Usar",
"un",
"clasificador",
"SVM",
"para",
"etiquetar",
"textos",
"nuevos",
"."
] | train | https://github.com/datosgobar/textar/blob/44fb5b537561facae0cdfe4fe1d108dfa26cfc6b/textar/text_classifier.py#L109-L135 |
datosgobar/textar | textar/text_classifier.py | TextClassifier._make_text_vectors | def _make_text_vectors(self, examples):
"""Funcion para generar los vectores tf-idf de una lista de textos.
Args:
examples (list or str): Se espera un ejemplo o una lista de:
o bien ids, o bien textos.
Returns:
textvec (sparse matrix): Devuelve una matriz sparse que contiene
los vectores TF-IDF para los ejemplos que se pasan de entrada.
El tamaño de la matriz es de (N, T) donde N es la cantidad de
ejemplos y T es la cantidad de términos en el vocabulario.
"""
if isinstance(examples, str):
if examples in self.ids:
textvec = self.tfidf_mat[self.ids == examples, :]
else:
textvec = self.vectorizer.transform([examples])
textvec = self.transformer.transform(textvec)
elif type(examples) is list:
if all(np.in1d(examples, self.ids)):
textvec = self.tfidf_mat[np.in1d(self.ids, examples)]
elif not any(np.in1d(examples, self.ids)):
textvec = self.vectorizer.transform(examples)
textvec = self.transformer.transform(textvec)
else:
raise ValueError("Las listas de ejemplos deben ser todos ids\
de textos almacenados o todos textos planos")
else:
raise TypeError("Los ejemplos no son del tipo de dato adecuado.")
return textvec | python | def _make_text_vectors(self, examples):
"""Funcion para generar los vectores tf-idf de una lista de textos.
Args:
examples (list or str): Se espera un ejemplo o una lista de:
o bien ids, o bien textos.
Returns:
textvec (sparse matrix): Devuelve una matriz sparse que contiene
los vectores TF-IDF para los ejemplos que se pasan de entrada.
El tamaño de la matriz es de (N, T) donde N es la cantidad de
ejemplos y T es la cantidad de términos en el vocabulario.
"""
if isinstance(examples, str):
if examples in self.ids:
textvec = self.tfidf_mat[self.ids == examples, :]
else:
textvec = self.vectorizer.transform([examples])
textvec = self.transformer.transform(textvec)
elif type(examples) is list:
if all(np.in1d(examples, self.ids)):
textvec = self.tfidf_mat[np.in1d(self.ids, examples)]
elif not any(np.in1d(examples, self.ids)):
textvec = self.vectorizer.transform(examples)
textvec = self.transformer.transform(textvec)
else:
raise ValueError("Las listas de ejemplos deben ser todos ids\
de textos almacenados o todos textos planos")
else:
raise TypeError("Los ejemplos no son del tipo de dato adecuado.")
return textvec | [
"def",
"_make_text_vectors",
"(",
"self",
",",
"examples",
")",
":",
"if",
"isinstance",
"(",
"examples",
",",
"str",
")",
":",
"if",
"examples",
"in",
"self",
".",
"ids",
":",
"textvec",
"=",
"self",
".",
"tfidf_mat",
"[",
"self",
".",
"ids",
"==",
"examples",
",",
":",
"]",
"else",
":",
"textvec",
"=",
"self",
".",
"vectorizer",
".",
"transform",
"(",
"[",
"examples",
"]",
")",
"textvec",
"=",
"self",
".",
"transformer",
".",
"transform",
"(",
"textvec",
")",
"elif",
"type",
"(",
"examples",
")",
"is",
"list",
":",
"if",
"all",
"(",
"np",
".",
"in1d",
"(",
"examples",
",",
"self",
".",
"ids",
")",
")",
":",
"textvec",
"=",
"self",
".",
"tfidf_mat",
"[",
"np",
".",
"in1d",
"(",
"self",
".",
"ids",
",",
"examples",
")",
"]",
"elif",
"not",
"any",
"(",
"np",
".",
"in1d",
"(",
"examples",
",",
"self",
".",
"ids",
")",
")",
":",
"textvec",
"=",
"self",
".",
"vectorizer",
".",
"transform",
"(",
"examples",
")",
"textvec",
"=",
"self",
".",
"transformer",
".",
"transform",
"(",
"textvec",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Las listas de ejemplos deben ser todos ids\\\n de textos almacenados o todos textos planos\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Los ejemplos no son del tipo de dato adecuado.\"",
")",
"return",
"textvec"
] | Funcion para generar los vectores tf-idf de una lista de textos.
Args:
examples (list or str): Se espera un ejemplo o una lista de:
o bien ids, o bien textos.
Returns:
textvec (sparse matrix): Devuelve una matriz sparse que contiene
los vectores TF-IDF para los ejemplos que se pasan de entrada.
El tamaño de la matriz es de (N, T) donde N es la cantidad de
ejemplos y T es la cantidad de términos en el vocabulario. | [
"Funcion",
"para",
"generar",
"los",
"vectores",
"tf",
"-",
"idf",
"de",
"una",
"lista",
"de",
"textos",
"."
] | train | https://github.com/datosgobar/textar/blob/44fb5b537561facae0cdfe4fe1d108dfa26cfc6b/textar/text_classifier.py#L137-L167 |
datosgobar/textar | textar/text_classifier.py | TextClassifier.get_similar | def get_similar(self, example, max_similars=3, similarity_cutoff=None,
term_diff_max_rank=10, filter_list=None,
term_diff_cutoff=None):
"""Devuelve textos similares al ejemplo dentro de los textos entrenados.
Nota:
Usa la distancia de coseno del vector de features TF-IDF
Args:
example (str): Se espera un id de texto o un texto a partir del
cual se buscaran otros textos similares.
max_similars (int, optional): Cantidad de textos similares a
devolver.
similarity_cutoff (float, optional): Valor umbral de similaridad
para definir que dos textos son similares entre si.
term_diff_max_rank (int, optional): Este valor sirve para controlar
el umbral con el que los terminos son considerados importantes
a la hora de recuperar textos (no afecta el funcionamiento de
que textos se consideran cercanos, solo la cantidad de terminos
que se devuelven en best_words).
filter_list (list): Lista de ids de textos en la cual buscar textos
similares.
term_diff_cutoff (float): Deprecado. Se quitara en el futuro.
Returns:
tuple (list, list, list): (text_ids, sorted_dist, best_words)
text_ids (list of str): Devuelve los ids de los textos
sugeridos.
sorted_dist (list of float): Devuelve la distancia entre las
opciones sugeridas y el ejemplo dado como entrada.
best_words (list of list): Para cada sugerencia devuelve las
palabras mas relevantes que se usaron para seleccionar esa
sugerencia.
"""
if term_diff_cutoff:
warnings.warn('Deprecado. Quedo sin uso. Se quitara en el futuro.',
DeprecationWarning)
if filter_list:
if max_similars > len(filter_list):
raise ValueError("No se pueden pedir mas sugerencias que la \
cantidad de textos en `filter_list`.")
else:
filt_idx = np.in1d(self.ids, filter_list)
elif max_similars > self.term_mat.shape[0]:
raise ValueError("No se pueden pedir mas sugerencias que la \
cantidad de textos que hay almacenados.")
else:
filt_idx = np.ones(len(self.ids), dtype=bool)
# Saco los textos compuestos solo por stop_words
good_ids = np.array(np.sum(self.term_mat, 1) > 0).squeeze()
filt_idx = filt_idx & good_ids
filt_idx_to_general_idx = np.flatnonzero(filt_idx)
if example in self.ids:
index = self.ids == example
exmpl_vec = self.tfidf_mat[index, :]
distances = np.squeeze(pairwise_distances(self.tfidf_mat[filt_idx],
exmpl_vec))
# Pongo la distancia a si mismo como inf, par que no se devuelva a
# si mismo como una opcion
if filter_list and example in filter_list:
distances[filter_list.index(example)] = np.inf
elif not filter_list:
idx_example = np.searchsorted(self.ids, example)
filt_idx_example = np.searchsorted(np.flatnonzero(filt_idx),
idx_example)
distances[filt_idx_example] = np.inf
else:
exmpl_vec = self.vectorizer.transform([example]) # contar terminos
exmpl_vec = self.transformer.transform(exmpl_vec) # calcular tfidf
distances = np.squeeze(pairwise_distances(self.tfidf_mat[filt_idx],
exmpl_vec))
if np.sum(exmpl_vec) == 0:
return [], [], []
sorted_indices = np.argsort(distances)
closest_n = sorted_indices[:max_similars]
sorted_dist = distances[closest_n]
if similarity_cutoff:
closest_n = closest_n[sorted_dist < similarity_cutoff]
sorted_dist = sorted_dist[sorted_dist < similarity_cutoff]
best_words = []
# Calculo palabras relevantes para cada sugerencia
best_example = np.squeeze(exmpl_vec.toarray())
sorted_example_weights = np.flipud(np.argsort(best_example))
truncated_max_rank = min(term_diff_max_rank, np.sum(best_example > 0))
best_example_words = sorted_example_weights[:truncated_max_rank]
for suggested in closest_n:
suggested_idx = filt_idx_to_general_idx[suggested]
test_vec = np.squeeze(self.tfidf_mat[suggested_idx, :].toarray())
sorted_test_weights = np.flipud(np.argsort(test_vec))
truncated_max_rank = min(term_diff_max_rank,
np.sum(test_vec > 0))
best_test = sorted_test_weights[:truncated_max_rank]
best_words_ids = np.intersect1d(best_example_words, best_test)
best_words.append([k for k, v in
self.vectorizer.vocabulary_.items()
if v in best_words_ids])
# Filtro dentro de las buscadas
if filter_list:
text_ids = self.ids[filt_idx_to_general_idx[closest_n]]
else:
text_ids = self.ids[closest_n]
return list(text_ids), list(sorted_dist), best_words | python | def get_similar(self, example, max_similars=3, similarity_cutoff=None,
term_diff_max_rank=10, filter_list=None,
term_diff_cutoff=None):
"""Devuelve textos similares al ejemplo dentro de los textos entrenados.
Nota:
Usa la distancia de coseno del vector de features TF-IDF
Args:
example (str): Se espera un id de texto o un texto a partir del
cual se buscaran otros textos similares.
max_similars (int, optional): Cantidad de textos similares a
devolver.
similarity_cutoff (float, optional): Valor umbral de similaridad
para definir que dos textos son similares entre si.
term_diff_max_rank (int, optional): Este valor sirve para controlar
el umbral con el que los terminos son considerados importantes
a la hora de recuperar textos (no afecta el funcionamiento de
que textos se consideran cercanos, solo la cantidad de terminos
que se devuelven en best_words).
filter_list (list): Lista de ids de textos en la cual buscar textos
similares.
term_diff_cutoff (float): Deprecado. Se quitara en el futuro.
Returns:
tuple (list, list, list): (text_ids, sorted_dist, best_words)
text_ids (list of str): Devuelve los ids de los textos
sugeridos.
sorted_dist (list of float): Devuelve la distancia entre las
opciones sugeridas y el ejemplo dado como entrada.
best_words (list of list): Para cada sugerencia devuelve las
palabras mas relevantes que se usaron para seleccionar esa
sugerencia.
"""
if term_diff_cutoff:
warnings.warn('Deprecado. Quedo sin uso. Se quitara en el futuro.',
DeprecationWarning)
if filter_list:
if max_similars > len(filter_list):
raise ValueError("No se pueden pedir mas sugerencias que la \
cantidad de textos en `filter_list`.")
else:
filt_idx = np.in1d(self.ids, filter_list)
elif max_similars > self.term_mat.shape[0]:
raise ValueError("No se pueden pedir mas sugerencias que la \
cantidad de textos que hay almacenados.")
else:
filt_idx = np.ones(len(self.ids), dtype=bool)
# Saco los textos compuestos solo por stop_words
good_ids = np.array(np.sum(self.term_mat, 1) > 0).squeeze()
filt_idx = filt_idx & good_ids
filt_idx_to_general_idx = np.flatnonzero(filt_idx)
if example in self.ids:
index = self.ids == example
exmpl_vec = self.tfidf_mat[index, :]
distances = np.squeeze(pairwise_distances(self.tfidf_mat[filt_idx],
exmpl_vec))
# Pongo la distancia a si mismo como inf, par que no se devuelva a
# si mismo como una opcion
if filter_list and example in filter_list:
distances[filter_list.index(example)] = np.inf
elif not filter_list:
idx_example = np.searchsorted(self.ids, example)
filt_idx_example = np.searchsorted(np.flatnonzero(filt_idx),
idx_example)
distances[filt_idx_example] = np.inf
else:
exmpl_vec = self.vectorizer.transform([example]) # contar terminos
exmpl_vec = self.transformer.transform(exmpl_vec) # calcular tfidf
distances = np.squeeze(pairwise_distances(self.tfidf_mat[filt_idx],
exmpl_vec))
if np.sum(exmpl_vec) == 0:
return [], [], []
sorted_indices = np.argsort(distances)
closest_n = sorted_indices[:max_similars]
sorted_dist = distances[closest_n]
if similarity_cutoff:
closest_n = closest_n[sorted_dist < similarity_cutoff]
sorted_dist = sorted_dist[sorted_dist < similarity_cutoff]
best_words = []
# Calculo palabras relevantes para cada sugerencia
best_example = np.squeeze(exmpl_vec.toarray())
sorted_example_weights = np.flipud(np.argsort(best_example))
truncated_max_rank = min(term_diff_max_rank, np.sum(best_example > 0))
best_example_words = sorted_example_weights[:truncated_max_rank]
for suggested in closest_n:
suggested_idx = filt_idx_to_general_idx[suggested]
test_vec = np.squeeze(self.tfidf_mat[suggested_idx, :].toarray())
sorted_test_weights = np.flipud(np.argsort(test_vec))
truncated_max_rank = min(term_diff_max_rank,
np.sum(test_vec > 0))
best_test = sorted_test_weights[:truncated_max_rank]
best_words_ids = np.intersect1d(best_example_words, best_test)
best_words.append([k for k, v in
self.vectorizer.vocabulary_.items()
if v in best_words_ids])
# Filtro dentro de las buscadas
if filter_list:
text_ids = self.ids[filt_idx_to_general_idx[closest_n]]
else:
text_ids = self.ids[closest_n]
return list(text_ids), list(sorted_dist), best_words | [
"def",
"get_similar",
"(",
"self",
",",
"example",
",",
"max_similars",
"=",
"3",
",",
"similarity_cutoff",
"=",
"None",
",",
"term_diff_max_rank",
"=",
"10",
",",
"filter_list",
"=",
"None",
",",
"term_diff_cutoff",
"=",
"None",
")",
":",
"if",
"term_diff_cutoff",
":",
"warnings",
".",
"warn",
"(",
"'Deprecado. Quedo sin uso. Se quitara en el futuro.'",
",",
"DeprecationWarning",
")",
"if",
"filter_list",
":",
"if",
"max_similars",
">",
"len",
"(",
"filter_list",
")",
":",
"raise",
"ValueError",
"(",
"\"No se pueden pedir mas sugerencias que la \\\n cantidad de textos en `filter_list`.\"",
")",
"else",
":",
"filt_idx",
"=",
"np",
".",
"in1d",
"(",
"self",
".",
"ids",
",",
"filter_list",
")",
"elif",
"max_similars",
">",
"self",
".",
"term_mat",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"No se pueden pedir mas sugerencias que la \\\n cantidad de textos que hay almacenados.\"",
")",
"else",
":",
"filt_idx",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"self",
".",
"ids",
")",
",",
"dtype",
"=",
"bool",
")",
"# Saco los textos compuestos solo por stop_words",
"good_ids",
"=",
"np",
".",
"array",
"(",
"np",
".",
"sum",
"(",
"self",
".",
"term_mat",
",",
"1",
")",
">",
"0",
")",
".",
"squeeze",
"(",
")",
"filt_idx",
"=",
"filt_idx",
"&",
"good_ids",
"filt_idx_to_general_idx",
"=",
"np",
".",
"flatnonzero",
"(",
"filt_idx",
")",
"if",
"example",
"in",
"self",
".",
"ids",
":",
"index",
"=",
"self",
".",
"ids",
"==",
"example",
"exmpl_vec",
"=",
"self",
".",
"tfidf_mat",
"[",
"index",
",",
":",
"]",
"distances",
"=",
"np",
".",
"squeeze",
"(",
"pairwise_distances",
"(",
"self",
".",
"tfidf_mat",
"[",
"filt_idx",
"]",
",",
"exmpl_vec",
")",
")",
"# Pongo la distancia a si mismo como inf, par que no se devuelva a",
"# si mismo como una opcion",
"if",
"filter_list",
"and",
"example",
"in",
"filter_list",
":",
"distances",
"[",
"filter_list",
".",
"index",
"(",
"example",
")",
"]",
"=",
"np",
".",
"inf",
"elif",
"not",
"filter_list",
":",
"idx_example",
"=",
"np",
".",
"searchsorted",
"(",
"self",
".",
"ids",
",",
"example",
")",
"filt_idx_example",
"=",
"np",
".",
"searchsorted",
"(",
"np",
".",
"flatnonzero",
"(",
"filt_idx",
")",
",",
"idx_example",
")",
"distances",
"[",
"filt_idx_example",
"]",
"=",
"np",
".",
"inf",
"else",
":",
"exmpl_vec",
"=",
"self",
".",
"vectorizer",
".",
"transform",
"(",
"[",
"example",
"]",
")",
"# contar terminos",
"exmpl_vec",
"=",
"self",
".",
"transformer",
".",
"transform",
"(",
"exmpl_vec",
")",
"# calcular tfidf",
"distances",
"=",
"np",
".",
"squeeze",
"(",
"pairwise_distances",
"(",
"self",
".",
"tfidf_mat",
"[",
"filt_idx",
"]",
",",
"exmpl_vec",
")",
")",
"if",
"np",
".",
"sum",
"(",
"exmpl_vec",
")",
"==",
"0",
":",
"return",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"sorted_indices",
"=",
"np",
".",
"argsort",
"(",
"distances",
")",
"closest_n",
"=",
"sorted_indices",
"[",
":",
"max_similars",
"]",
"sorted_dist",
"=",
"distances",
"[",
"closest_n",
"]",
"if",
"similarity_cutoff",
":",
"closest_n",
"=",
"closest_n",
"[",
"sorted_dist",
"<",
"similarity_cutoff",
"]",
"sorted_dist",
"=",
"sorted_dist",
"[",
"sorted_dist",
"<",
"similarity_cutoff",
"]",
"best_words",
"=",
"[",
"]",
"# Calculo palabras relevantes para cada sugerencia",
"best_example",
"=",
"np",
".",
"squeeze",
"(",
"exmpl_vec",
".",
"toarray",
"(",
")",
")",
"sorted_example_weights",
"=",
"np",
".",
"flipud",
"(",
"np",
".",
"argsort",
"(",
"best_example",
")",
")",
"truncated_max_rank",
"=",
"min",
"(",
"term_diff_max_rank",
",",
"np",
".",
"sum",
"(",
"best_example",
">",
"0",
")",
")",
"best_example_words",
"=",
"sorted_example_weights",
"[",
":",
"truncated_max_rank",
"]",
"for",
"suggested",
"in",
"closest_n",
":",
"suggested_idx",
"=",
"filt_idx_to_general_idx",
"[",
"suggested",
"]",
"test_vec",
"=",
"np",
".",
"squeeze",
"(",
"self",
".",
"tfidf_mat",
"[",
"suggested_idx",
",",
":",
"]",
".",
"toarray",
"(",
")",
")",
"sorted_test_weights",
"=",
"np",
".",
"flipud",
"(",
"np",
".",
"argsort",
"(",
"test_vec",
")",
")",
"truncated_max_rank",
"=",
"min",
"(",
"term_diff_max_rank",
",",
"np",
".",
"sum",
"(",
"test_vec",
">",
"0",
")",
")",
"best_test",
"=",
"sorted_test_weights",
"[",
":",
"truncated_max_rank",
"]",
"best_words_ids",
"=",
"np",
".",
"intersect1d",
"(",
"best_example_words",
",",
"best_test",
")",
"best_words",
".",
"append",
"(",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"vectorizer",
".",
"vocabulary_",
".",
"items",
"(",
")",
"if",
"v",
"in",
"best_words_ids",
"]",
")",
"# Filtro dentro de las buscadas",
"if",
"filter_list",
":",
"text_ids",
"=",
"self",
".",
"ids",
"[",
"filt_idx_to_general_idx",
"[",
"closest_n",
"]",
"]",
"else",
":",
"text_ids",
"=",
"self",
".",
"ids",
"[",
"closest_n",
"]",
"return",
"list",
"(",
"text_ids",
")",
",",
"list",
"(",
"sorted_dist",
")",
",",
"best_words"
] | Devuelve textos similares al ejemplo dentro de los textos entrenados.
Nota:
Usa la distancia de coseno del vector de features TF-IDF
Args:
example (str): Se espera un id de texto o un texto a partir del
cual se buscaran otros textos similares.
max_similars (int, optional): Cantidad de textos similares a
devolver.
similarity_cutoff (float, optional): Valor umbral de similaridad
para definir que dos textos son similares entre si.
term_diff_max_rank (int, optional): Este valor sirve para controlar
el umbral con el que los terminos son considerados importantes
a la hora de recuperar textos (no afecta el funcionamiento de
que textos se consideran cercanos, solo la cantidad de terminos
que se devuelven en best_words).
filter_list (list): Lista de ids de textos en la cual buscar textos
similares.
term_diff_cutoff (float): Deprecado. Se quitara en el futuro.
Returns:
tuple (list, list, list): (text_ids, sorted_dist, best_words)
text_ids (list of str): Devuelve los ids de los textos
sugeridos.
sorted_dist (list of float): Devuelve la distancia entre las
opciones sugeridas y el ejemplo dado como entrada.
best_words (list of list): Para cada sugerencia devuelve las
palabras mas relevantes que se usaron para seleccionar esa
sugerencia. | [
"Devuelve",
"textos",
"similares",
"al",
"ejemplo",
"dentro",
"de",
"los",
"textos",
"entrenados",
"."
] | train | https://github.com/datosgobar/textar/blob/44fb5b537561facae0cdfe4fe1d108dfa26cfc6b/textar/text_classifier.py#L169-L274 |
datosgobar/textar | textar/text_classifier.py | TextClassifier.reload_texts | def reload_texts(self, texts, ids, vocabulary=None):
"""Calcula los vectores de terminos de textos y los almacena.
A diferencia de :func:`~TextClassifier.TextClassifier.store_text` esta
funcion borra cualquier informacion almacenada y comienza el conteo
desde cero. Se usa para redefinir el vocabulario sobre el que se
construyen los vectores.
Args:
texts (list): Una lista de N textos a incorporar.
ids (list): Una lista de N ids alfanumericos para los textos.
"""
self._check_id_length(ids)
self.ids = np.array(sorted(ids))
if vocabulary:
self.vectorizer.vocabulary = vocabulary
sorted_texts = [x for (y, x) in sorted(zip(ids, texts))]
self.term_mat = self.vectorizer.fit_transform(sorted_texts)
self._update_tfidf() | python | def reload_texts(self, texts, ids, vocabulary=None):
"""Calcula los vectores de terminos de textos y los almacena.
A diferencia de :func:`~TextClassifier.TextClassifier.store_text` esta
funcion borra cualquier informacion almacenada y comienza el conteo
desde cero. Se usa para redefinir el vocabulario sobre el que se
construyen los vectores.
Args:
texts (list): Una lista de N textos a incorporar.
ids (list): Una lista de N ids alfanumericos para los textos.
"""
self._check_id_length(ids)
self.ids = np.array(sorted(ids))
if vocabulary:
self.vectorizer.vocabulary = vocabulary
sorted_texts = [x for (y, x) in sorted(zip(ids, texts))]
self.term_mat = self.vectorizer.fit_transform(sorted_texts)
self._update_tfidf() | [
"def",
"reload_texts",
"(",
"self",
",",
"texts",
",",
"ids",
",",
"vocabulary",
"=",
"None",
")",
":",
"self",
".",
"_check_id_length",
"(",
"ids",
")",
"self",
".",
"ids",
"=",
"np",
".",
"array",
"(",
"sorted",
"(",
"ids",
")",
")",
"if",
"vocabulary",
":",
"self",
".",
"vectorizer",
".",
"vocabulary",
"=",
"vocabulary",
"sorted_texts",
"=",
"[",
"x",
"for",
"(",
"y",
",",
"x",
")",
"in",
"sorted",
"(",
"zip",
"(",
"ids",
",",
"texts",
")",
")",
"]",
"self",
".",
"term_mat",
"=",
"self",
".",
"vectorizer",
".",
"fit_transform",
"(",
"sorted_texts",
")",
"self",
".",
"_update_tfidf",
"(",
")"
] | Calcula los vectores de terminos de textos y los almacena.
A diferencia de :func:`~TextClassifier.TextClassifier.store_text` esta
funcion borra cualquier informacion almacenada y comienza el conteo
desde cero. Se usa para redefinir el vocabulario sobre el que se
construyen los vectores.
Args:
texts (list): Una lista de N textos a incorporar.
ids (list): Una lista de N ids alfanumericos para los textos. | [
"Calcula",
"los",
"vectores",
"de",
"terminos",
"de",
"textos",
"y",
"los",
"almacena",
"."
] | train | https://github.com/datosgobar/textar/blob/44fb5b537561facae0cdfe4fe1d108dfa26cfc6b/textar/text_classifier.py#L276-L294 |
sckott/pygbif | pygbif/species/name_suggest.py | name_suggest | def name_suggest(q=None, datasetKey=None, rank=None, limit=100, offset=None, **kwargs):
'''
A quick and simple autocomplete service that returns up to 20 name usages by
doing prefix matching against the scientific name. Results are ordered by relevance.
:param q: [str] Simple search parameter. The value for this parameter can be a
simple word or a phrase. Wildcards can be added to the simple word parameters only,
e.g. ``q=*puma*`` (Required)
:param datasetKey: [str] Filters by the checklist dataset key (a uuid, see examples)
:param rank: [str] A taxonomic rank. One of ``class``, ``cultivar``, ``cultivar_group``, ``domain``, ``family``,
``form``, ``genus``, ``informal``, ``infrageneric_name``, ``infraorder``, ``infraspecific_name``,
``infrasubspecific_name``, ``kingdom``, ``order``, ``phylum``, ``section``, ``series``, ``species``, ``strain``, ``subclass``,
``subfamily``, ``subform``, ``subgenus``, ``subkingdom``, ``suborder``, ``subphylum``, ``subsection``, ``subseries``,
``subspecies``, ``subtribe``, ``subvariety``, ``superclass``, ``superfamily``, ``superorder``, ``superphylum``,
``suprageneric_name``, ``tribe``, ``unranked``, or ``variety``.
:param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
:return: A dictionary
References: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_suggest(q='Puma concolor')
x = species.name_suggest(q='Puma')
species.name_suggest(q='Puma', rank="genus")
species.name_suggest(q='Puma', rank="subspecies")
species.name_suggest(q='Puma', rank="species")
species.name_suggest(q='Puma', rank="infraspecific_name")
species.name_suggest(q='Puma', limit=2)
'''
url = gbif_baseurl + 'species/suggest'
args = {'q':q, 'rank':rank, 'offset':offset, 'limit':limit}
return gbif_GET(url, args, **kwargs) | python | def name_suggest(q=None, datasetKey=None, rank=None, limit=100, offset=None, **kwargs):
'''
A quick and simple autocomplete service that returns up to 20 name usages by
doing prefix matching against the scientific name. Results are ordered by relevance.
:param q: [str] Simple search parameter. The value for this parameter can be a
simple word or a phrase. Wildcards can be added to the simple word parameters only,
e.g. ``q=*puma*`` (Required)
:param datasetKey: [str] Filters by the checklist dataset key (a uuid, see examples)
:param rank: [str] A taxonomic rank. One of ``class``, ``cultivar``, ``cultivar_group``, ``domain``, ``family``,
``form``, ``genus``, ``informal``, ``infrageneric_name``, ``infraorder``, ``infraspecific_name``,
``infrasubspecific_name``, ``kingdom``, ``order``, ``phylum``, ``section``, ``series``, ``species``, ``strain``, ``subclass``,
``subfamily``, ``subform``, ``subgenus``, ``subkingdom``, ``suborder``, ``subphylum``, ``subsection``, ``subseries``,
``subspecies``, ``subtribe``, ``subvariety``, ``superclass``, ``superfamily``, ``superorder``, ``superphylum``,
``suprageneric_name``, ``tribe``, ``unranked``, or ``variety``.
:param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
:return: A dictionary
References: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_suggest(q='Puma concolor')
x = species.name_suggest(q='Puma')
species.name_suggest(q='Puma', rank="genus")
species.name_suggest(q='Puma', rank="subspecies")
species.name_suggest(q='Puma', rank="species")
species.name_suggest(q='Puma', rank="infraspecific_name")
species.name_suggest(q='Puma', limit=2)
'''
url = gbif_baseurl + 'species/suggest'
args = {'q':q, 'rank':rank, 'offset':offset, 'limit':limit}
return gbif_GET(url, args, **kwargs) | [
"def",
"name_suggest",
"(",
"q",
"=",
"None",
",",
"datasetKey",
"=",
"None",
",",
"rank",
"=",
"None",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'species/suggest'",
"args",
"=",
"{",
"'q'",
":",
"q",
",",
"'rank'",
":",
"rank",
",",
"'offset'",
":",
"offset",
",",
"'limit'",
":",
"limit",
"}",
"return",
"gbif_GET",
"(",
"url",
",",
"args",
",",
"*",
"*",
"kwargs",
")"
] | A quick and simple autocomplete service that returns up to 20 name usages by
doing prefix matching against the scientific name. Results are ordered by relevance.
:param q: [str] Simple search parameter. The value for this parameter can be a
simple word or a phrase. Wildcards can be added to the simple word parameters only,
e.g. ``q=*puma*`` (Required)
:param datasetKey: [str] Filters by the checklist dataset key (a uuid, see examples)
:param rank: [str] A taxonomic rank. One of ``class``, ``cultivar``, ``cultivar_group``, ``domain``, ``family``,
``form``, ``genus``, ``informal``, ``infrageneric_name``, ``infraorder``, ``infraspecific_name``,
``infrasubspecific_name``, ``kingdom``, ``order``, ``phylum``, ``section``, ``series``, ``species``, ``strain``, ``subclass``,
``subfamily``, ``subform``, ``subgenus``, ``subkingdom``, ``suborder``, ``subphylum``, ``subsection``, ``subseries``,
``subspecies``, ``subtribe``, ``subvariety``, ``superclass``, ``superfamily``, ``superorder``, ``superphylum``,
``suprageneric_name``, ``tribe``, ``unranked``, or ``variety``.
:param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
:return: A dictionary
References: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_suggest(q='Puma concolor')
x = species.name_suggest(q='Puma')
species.name_suggest(q='Puma', rank="genus")
species.name_suggest(q='Puma', rank="subspecies")
species.name_suggest(q='Puma', rank="species")
species.name_suggest(q='Puma', rank="infraspecific_name")
species.name_suggest(q='Puma', limit=2) | [
"A",
"quick",
"and",
"simple",
"autocomplete",
"service",
"that",
"returns",
"up",
"to",
"20",
"name",
"usages",
"by",
"doing",
"prefix",
"matching",
"against",
"the",
"scientific",
"name",
".",
"Results",
"are",
"ordered",
"by",
"relevance",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/species/name_suggest.py#L3-L39 |
sckott/pygbif | pygbif/registry/datasets.py | dataset_metrics | def dataset_metrics(uuid, **kwargs):
'''
Get details on a GBIF dataset.
:param uuid: [str] One or more dataset UUIDs. See examples.
References: http://www.gbif.org/developer/registry#datasetMetrics
Usage::
from pygbif import registry
registry.dataset_metrics(uuid='3f8a1297-3259-4700-91fc-acc4170b27ce')
registry.dataset_metrics(uuid='66dd0960-2d7d-46ee-a491-87b9adcfe7b1')
registry.dataset_metrics(uuid=['3f8a1297-3259-4700-91fc-acc4170b27ce', '66dd0960-2d7d-46ee-a491-87b9adcfe7b1'])
'''
def getdata(x, **kwargs):
url = gbif_baseurl + 'dataset/' + x + '/metrics'
return gbif_GET(url, {}, **kwargs)
if len2(uuid) == 1:
return getdata(uuid)
else:
return [getdata(x) for x in uuid] | python | def dataset_metrics(uuid, **kwargs):
'''
Get details on a GBIF dataset.
:param uuid: [str] One or more dataset UUIDs. See examples.
References: http://www.gbif.org/developer/registry#datasetMetrics
Usage::
from pygbif import registry
registry.dataset_metrics(uuid='3f8a1297-3259-4700-91fc-acc4170b27ce')
registry.dataset_metrics(uuid='66dd0960-2d7d-46ee-a491-87b9adcfe7b1')
registry.dataset_metrics(uuid=['3f8a1297-3259-4700-91fc-acc4170b27ce', '66dd0960-2d7d-46ee-a491-87b9adcfe7b1'])
'''
def getdata(x, **kwargs):
url = gbif_baseurl + 'dataset/' + x + '/metrics'
return gbif_GET(url, {}, **kwargs)
if len2(uuid) == 1:
return getdata(uuid)
else:
return [getdata(x) for x in uuid] | [
"def",
"dataset_metrics",
"(",
"uuid",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"getdata",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'dataset/'",
"+",
"x",
"+",
"'/metrics'",
"return",
"gbif_GET",
"(",
"url",
",",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
"if",
"len2",
"(",
"uuid",
")",
"==",
"1",
":",
"return",
"getdata",
"(",
"uuid",
")",
"else",
":",
"return",
"[",
"getdata",
"(",
"x",
")",
"for",
"x",
"in",
"uuid",
"]"
] | Get details on a GBIF dataset.
:param uuid: [str] One or more dataset UUIDs. See examples.
References: http://www.gbif.org/developer/registry#datasetMetrics
Usage::
from pygbif import registry
registry.dataset_metrics(uuid='3f8a1297-3259-4700-91fc-acc4170b27ce')
registry.dataset_metrics(uuid='66dd0960-2d7d-46ee-a491-87b9adcfe7b1')
registry.dataset_metrics(uuid=['3f8a1297-3259-4700-91fc-acc4170b27ce', '66dd0960-2d7d-46ee-a491-87b9adcfe7b1']) | [
"Get",
"details",
"on",
"a",
"GBIF",
"dataset",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/registry/datasets.py#L4-L26 |
sckott/pygbif | pygbif/registry/datasets.py | datasets | def datasets(data = 'all', type = None, uuid = None, query = None, id = None,
limit = 100, offset = None, **kwargs):
'''
Search for datasets and dataset metadata.
:param data: [str] The type of data to get. Default: ``all``
:param type: [str] Type of dataset, options include ``OCCURRENCE``, etc.
:param uuid: [str] UUID of the data node provider. This must be specified if data
is anything other than ``all``.
:param query: [str] Query term(s). Only used when ``data = 'all'``
:param id: [int] A metadata document id.
References http://www.gbif.org/developer/registry#datasets
Usage::
from pygbif import registry
registry.datasets(limit=5)
registry.datasets(type="OCCURRENCE")
registry.datasets(uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='contact', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657", id=598)
registry.datasets(data=['deleted','duplicate'])
registry.datasets(data=['deleted','duplicate'], limit=1)
'''
args = {'q': query, 'type': type, 'limit': limit, 'offset': offset}
data_choices = ['all', 'organization', 'contact', 'endpoint',
'identifier', 'tag', 'machinetag', 'comment',
'constituents', 'document', 'metadata', 'deleted',
'duplicate', 'subDataset', 'withNoEndpoint']
check_data(data, data_choices)
if len2(data) ==1:
return datasets_fetch(data, uuid, args, **kwargs)
else:
return [datasets_fetch(x, uuid, args, **kwargs) for x in data] | python | def datasets(data = 'all', type = None, uuid = None, query = None, id = None,
limit = 100, offset = None, **kwargs):
'''
Search for datasets and dataset metadata.
:param data: [str] The type of data to get. Default: ``all``
:param type: [str] Type of dataset, options include ``OCCURRENCE``, etc.
:param uuid: [str] UUID of the data node provider. This must be specified if data
is anything other than ``all``.
:param query: [str] Query term(s). Only used when ``data = 'all'``
:param id: [int] A metadata document id.
References http://www.gbif.org/developer/registry#datasets
Usage::
from pygbif import registry
registry.datasets(limit=5)
registry.datasets(type="OCCURRENCE")
registry.datasets(uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='contact', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657", id=598)
registry.datasets(data=['deleted','duplicate'])
registry.datasets(data=['deleted','duplicate'], limit=1)
'''
args = {'q': query, 'type': type, 'limit': limit, 'offset': offset}
data_choices = ['all', 'organization', 'contact', 'endpoint',
'identifier', 'tag', 'machinetag', 'comment',
'constituents', 'document', 'metadata', 'deleted',
'duplicate', 'subDataset', 'withNoEndpoint']
check_data(data, data_choices)
if len2(data) ==1:
return datasets_fetch(data, uuid, args, **kwargs)
else:
return [datasets_fetch(x, uuid, args, **kwargs) for x in data] | [
"def",
"datasets",
"(",
"data",
"=",
"'all'",
",",
"type",
"=",
"None",
",",
"uuid",
"=",
"None",
",",
"query",
"=",
"None",
",",
"id",
"=",
"None",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"{",
"'q'",
":",
"query",
",",
"'type'",
":",
"type",
",",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
"}",
"data_choices",
"=",
"[",
"'all'",
",",
"'organization'",
",",
"'contact'",
",",
"'endpoint'",
",",
"'identifier'",
",",
"'tag'",
",",
"'machinetag'",
",",
"'comment'",
",",
"'constituents'",
",",
"'document'",
",",
"'metadata'",
",",
"'deleted'",
",",
"'duplicate'",
",",
"'subDataset'",
",",
"'withNoEndpoint'",
"]",
"check_data",
"(",
"data",
",",
"data_choices",
")",
"if",
"len2",
"(",
"data",
")",
"==",
"1",
":",
"return",
"datasets_fetch",
"(",
"data",
",",
"uuid",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"[",
"datasets_fetch",
"(",
"x",
",",
"uuid",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"x",
"in",
"data",
"]"
] | Search for datasets and dataset metadata.
:param data: [str] The type of data to get. Default: ``all``
:param type: [str] Type of dataset, options include ``OCCURRENCE``, etc.
:param uuid: [str] UUID of the data node provider. This must be specified if data
is anything other than ``all``.
:param query: [str] Query term(s). Only used when ``data = 'all'``
:param id: [int] A metadata document id.
References http://www.gbif.org/developer/registry#datasets
Usage::
from pygbif import registry
registry.datasets(limit=5)
registry.datasets(type="OCCURRENCE")
registry.datasets(uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='contact', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657", id=598)
registry.datasets(data=['deleted','duplicate'])
registry.datasets(data=['deleted','duplicate'], limit=1) | [
"Search",
"for",
"datasets",
"and",
"dataset",
"metadata",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/registry/datasets.py#L28-L63 |
sckott/pygbif | pygbif/registry/datasets.py | dataset_suggest | def dataset_suggest(q=None, type=None, keyword=None, owningOrg=None,
publishingOrg=None, hostingOrg=None, publishingCountry=None, decade=None,
limit = 100, offset = None, **kwargs):
'''
Search that returns up to 20 matching datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
registry.dataset_suggest(q="Amazon", type="OCCURRENCE")
# Suggest datasets tagged with keyword "france".
registry.dataset_suggest(keyword="france")
# Suggest datasets owned by the organization with key
# "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN).
registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862")
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_suggest(q="amsterdam")
# Limited search
registry.dataset_suggest(type="OCCURRENCE", limit=2)
registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10)
# Return just descriptions
registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True)
# Search by decade
registry.dataset_suggest(decade=1980, limit = 30)
'''
url = gbif_baseurl + 'dataset/suggest'
args = {'q': q, 'type': type, 'keyword': keyword,
'publishingOrg': publishingOrg, 'hostingOrg': hostingOrg,
'owningOrg': owningOrg, 'decade': decade,
'publishingCountry': publishingCountry,
'limit': limit, 'offset': offset}
out = gbif_GET(url, args, **kwargs)
return out | python | def dataset_suggest(q=None, type=None, keyword=None, owningOrg=None,
publishingOrg=None, hostingOrg=None, publishingCountry=None, decade=None,
limit = 100, offset = None, **kwargs):
'''
Search that returns up to 20 matching datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
registry.dataset_suggest(q="Amazon", type="OCCURRENCE")
# Suggest datasets tagged with keyword "france".
registry.dataset_suggest(keyword="france")
# Suggest datasets owned by the organization with key
# "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN).
registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862")
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_suggest(q="amsterdam")
# Limited search
registry.dataset_suggest(type="OCCURRENCE", limit=2)
registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10)
# Return just descriptions
registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True)
# Search by decade
registry.dataset_suggest(decade=1980, limit = 30)
'''
url = gbif_baseurl + 'dataset/suggest'
args = {'q': q, 'type': type, 'keyword': keyword,
'publishingOrg': publishingOrg, 'hostingOrg': hostingOrg,
'owningOrg': owningOrg, 'decade': decade,
'publishingCountry': publishingCountry,
'limit': limit, 'offset': offset}
out = gbif_GET(url, args, **kwargs)
return out | [
"def",
"dataset_suggest",
"(",
"q",
"=",
"None",
",",
"type",
"=",
"None",
",",
"keyword",
"=",
"None",
",",
"owningOrg",
"=",
"None",
",",
"publishingOrg",
"=",
"None",
",",
"hostingOrg",
"=",
"None",
",",
"publishingCountry",
"=",
"None",
",",
"decade",
"=",
"None",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'dataset/suggest'",
"args",
"=",
"{",
"'q'",
":",
"q",
",",
"'type'",
":",
"type",
",",
"'keyword'",
":",
"keyword",
",",
"'publishingOrg'",
":",
"publishingOrg",
",",
"'hostingOrg'",
":",
"hostingOrg",
",",
"'owningOrg'",
":",
"owningOrg",
",",
"'decade'",
":",
"decade",
",",
"'publishingCountry'",
":",
"publishingCountry",
",",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
"}",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Search that returns up to 20 matching datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
registry.dataset_suggest(q="Amazon", type="OCCURRENCE")
# Suggest datasets tagged with keyword "france".
registry.dataset_suggest(keyword="france")
# Suggest datasets owned by the organization with key
# "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN).
registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862")
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_suggest(q="amsterdam")
# Limited search
registry.dataset_suggest(type="OCCURRENCE", limit=2)
registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10)
# Return just descriptions
registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True)
# Search by decade
registry.dataset_suggest(decade=1980, limit = 30) | [
"Search",
"that",
"returns",
"up",
"to",
"20",
"matching",
"datasets",
".",
"Results",
"are",
"ordered",
"by",
"relevance",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/registry/datasets.py#L88-L143 |
sckott/pygbif | pygbif/registry/datasets.py | dataset_search | def dataset_search(q=None, type=None, keyword=None,
owningOrg=None, publishingOrg=None, hostingOrg=None, decade=None,
publishingCountry = None, facet = None, facetMincount=None,
facetMultiselect = None, hl = False, limit = 100, offset = None,
**kwargs):
'''
Full text search across all datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter
can be a simple word or a phrase. Wildcards can be added to the simple word
parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which
you can search on. The search is done on the merged collection of tags, the
dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING
ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage
broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000,
etc, and will return datasets wholly contained in the decade as well as those
that cover the entire decade or more. Facet by decade to get the break down,
e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: type, keyword, publishingOrg, hostingOrg, decade,
and publishingCountry. Additionally subtype and country are legal values but not
yet implemented, so data will not yet be returned for them.
:param facetMincount: [str] Used in combination with the facet parameter. Set
facetMincount={#} to exclude facets with a count less than {#}, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&facetMincount=10000
only shows the type value 'OCCURRENCE' because 'CHECKLIST' and 'METADATA' have
counts less than 10000.
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
facetMultiselect=True to still return counts for values that are not currently
filtered, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&type=CHECKLIST&facetMultiselect=true
still shows type values 'OCCURRENCE' and 'METADATA' even though type is being
filtered by type=CHECKLIST
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class 'gbifH1' e.g.
http://api.gbif.org/v1/dataset/search?q=plant&hl=true
Fulltext search fields include: title, keyword, country, publishing country,
publishing organization title, hosting organization title, and description. One
additional full text field is searched which includes information from metadata
documents, but the text of this field is not returned in the response.
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:note: Note that you can pass in additional faceting parameters on a per field basis.
For example, if you want to limit the numbef of facets returned from a field ``foo`` to
3 results, pass in ``foo_facetLimit = 3``. GBIF does not allow all per field parameters,
but does allow some. See also examples.
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
# Gets all datasets of type "OCCURRENCE".
registry.dataset_search(type="OCCURRENCE", limit = 10)
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_search(q="amsterdam", limit = 10)
# Limited search
registry.dataset_search(type="OCCURRENCE", limit=2)
registry.dataset_search(type="OCCURRENCE", limit=2, offset=10)
# Search by decade
registry.dataset_search(decade=1980, limit = 10)
# Faceting
## just facets
registry.dataset_search(facet="decade", facetMincount=10, limit=0)
## data and facets
registry.dataset_search(facet="decade", facetMincount=10, limit=2)
## many facet variables
registry.dataset_search(facet=["decade", "type"], facetMincount=10, limit=0)
## facet vars
### per variable paging
x = registry.dataset_search(
facet = ["decade", "type"],
decade_facetLimit = 3,
type_facetLimit = 3,
limit = 0
)
## highlight
x = registry.dataset_search(q="plant", hl=True, limit = 10)
[ z['description'] for z in x['results'] ]
'''
url = gbif_baseurl + 'dataset/search'
args = {'q': q, 'type': type, 'keyword': keyword,
'owningOrg': owningOrg, 'publishingOrg': publishingOrg,
'hostingOrg': hostingOrg, 'decade': decade,
'publishingCountry': publishingCountry, 'facet': facet,
'facetMincount': facetMincount, 'facetMultiselect': facetMultiselect,
'hl': hl, 'limit': limit, 'offset': offset}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out | python | def dataset_search(q=None, type=None, keyword=None,
owningOrg=None, publishingOrg=None, hostingOrg=None, decade=None,
publishingCountry = None, facet = None, facetMincount=None,
facetMultiselect = None, hl = False, limit = 100, offset = None,
**kwargs):
'''
Full text search across all datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter
can be a simple word or a phrase. Wildcards can be added to the simple word
parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which
you can search on. The search is done on the merged collection of tags, the
dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING
ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage
broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000,
etc, and will return datasets wholly contained in the decade as well as those
that cover the entire decade or more. Facet by decade to get the break down,
e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: type, keyword, publishingOrg, hostingOrg, decade,
and publishingCountry. Additionally subtype and country are legal values but not
yet implemented, so data will not yet be returned for them.
:param facetMincount: [str] Used in combination with the facet parameter. Set
facetMincount={#} to exclude facets with a count less than {#}, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&facetMincount=10000
only shows the type value 'OCCURRENCE' because 'CHECKLIST' and 'METADATA' have
counts less than 10000.
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
facetMultiselect=True to still return counts for values that are not currently
filtered, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&type=CHECKLIST&facetMultiselect=true
still shows type values 'OCCURRENCE' and 'METADATA' even though type is being
filtered by type=CHECKLIST
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class 'gbifH1' e.g.
http://api.gbif.org/v1/dataset/search?q=plant&hl=true
Fulltext search fields include: title, keyword, country, publishing country,
publishing organization title, hosting organization title, and description. One
additional full text field is searched which includes information from metadata
documents, but the text of this field is not returned in the response.
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:note: Note that you can pass in additional faceting parameters on a per field basis.
For example, if you want to limit the numbef of facets returned from a field ``foo`` to
3 results, pass in ``foo_facetLimit = 3``. GBIF does not allow all per field parameters,
but does allow some. See also examples.
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
# Gets all datasets of type "OCCURRENCE".
registry.dataset_search(type="OCCURRENCE", limit = 10)
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_search(q="amsterdam", limit = 10)
# Limited search
registry.dataset_search(type="OCCURRENCE", limit=2)
registry.dataset_search(type="OCCURRENCE", limit=2, offset=10)
# Search by decade
registry.dataset_search(decade=1980, limit = 10)
# Faceting
## just facets
registry.dataset_search(facet="decade", facetMincount=10, limit=0)
## data and facets
registry.dataset_search(facet="decade", facetMincount=10, limit=2)
## many facet variables
registry.dataset_search(facet=["decade", "type"], facetMincount=10, limit=0)
## facet vars
### per variable paging
x = registry.dataset_search(
facet = ["decade", "type"],
decade_facetLimit = 3,
type_facetLimit = 3,
limit = 0
)
## highlight
x = registry.dataset_search(q="plant", hl=True, limit = 10)
[ z['description'] for z in x['results'] ]
'''
url = gbif_baseurl + 'dataset/search'
args = {'q': q, 'type': type, 'keyword': keyword,
'owningOrg': owningOrg, 'publishingOrg': publishingOrg,
'hostingOrg': hostingOrg, 'decade': decade,
'publishingCountry': publishingCountry, 'facet': facet,
'facetMincount': facetMincount, 'facetMultiselect': facetMultiselect,
'hl': hl, 'limit': limit, 'offset': offset}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out | [
"def",
"dataset_search",
"(",
"q",
"=",
"None",
",",
"type",
"=",
"None",
",",
"keyword",
"=",
"None",
",",
"owningOrg",
"=",
"None",
",",
"publishingOrg",
"=",
"None",
",",
"hostingOrg",
"=",
"None",
",",
"decade",
"=",
"None",
",",
"publishingCountry",
"=",
"None",
",",
"facet",
"=",
"None",
",",
"facetMincount",
"=",
"None",
",",
"facetMultiselect",
"=",
"None",
",",
"hl",
"=",
"False",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'dataset/search'",
"args",
"=",
"{",
"'q'",
":",
"q",
",",
"'type'",
":",
"type",
",",
"'keyword'",
":",
"keyword",
",",
"'owningOrg'",
":",
"owningOrg",
",",
"'publishingOrg'",
":",
"publishingOrg",
",",
"'hostingOrg'",
":",
"hostingOrg",
",",
"'decade'",
":",
"decade",
",",
"'publishingCountry'",
":",
"publishingCountry",
",",
"'facet'",
":",
"facet",
",",
"'facetMincount'",
":",
"facetMincount",
",",
"'facetMultiselect'",
":",
"facetMultiselect",
",",
"'hl'",
":",
"hl",
",",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
"}",
"gbif_kwargs",
"=",
"{",
"key",
":",
"kwargs",
"[",
"key",
"]",
"for",
"key",
"in",
"kwargs",
"if",
"key",
"not",
"in",
"requests_argset",
"}",
"if",
"gbif_kwargs",
"is",
"not",
"None",
":",
"xx",
"=",
"dict",
"(",
"zip",
"(",
"[",
"re",
".",
"sub",
"(",
"'_'",
",",
"'.'",
",",
"x",
")",
"for",
"x",
"in",
"gbif_kwargs",
".",
"keys",
"(",
")",
"]",
",",
"gbif_kwargs",
".",
"values",
"(",
")",
")",
")",
"args",
".",
"update",
"(",
"xx",
")",
"kwargs",
"=",
"{",
"key",
":",
"kwargs",
"[",
"key",
"]",
"for",
"key",
"in",
"kwargs",
"if",
"key",
"in",
"requests_argset",
"}",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Full text search across all datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter
can be a simple word or a phrase. Wildcards can be added to the simple word
parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which
you can search on. The search is done on the merged collection of tags, the
dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING
ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage
broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000,
etc, and will return datasets wholly contained in the decade as well as those
that cover the entire decade or more. Facet by decade to get the break down,
e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: type, keyword, publishingOrg, hostingOrg, decade,
and publishingCountry. Additionally subtype and country are legal values but not
yet implemented, so data will not yet be returned for them.
:param facetMincount: [str] Used in combination with the facet parameter. Set
facetMincount={#} to exclude facets with a count less than {#}, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&facetMincount=10000
only shows the type value 'OCCURRENCE' because 'CHECKLIST' and 'METADATA' have
counts less than 10000.
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
facetMultiselect=True to still return counts for values that are not currently
filtered, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&type=CHECKLIST&facetMultiselect=true
still shows type values 'OCCURRENCE' and 'METADATA' even though type is being
filtered by type=CHECKLIST
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class 'gbifH1' e.g.
http://api.gbif.org/v1/dataset/search?q=plant&hl=true
Fulltext search fields include: title, keyword, country, publishing country,
publishing organization title, hosting organization title, and description. One
additional full text field is searched which includes information from metadata
documents, but the text of this field is not returned in the response.
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:note: Note that you can pass in additional faceting parameters on a per field basis.
For example, if you want to limit the numbef of facets returned from a field ``foo`` to
3 results, pass in ``foo_facetLimit = 3``. GBIF does not allow all per field parameters,
but does allow some. See also examples.
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
# Gets all datasets of type "OCCURRENCE".
registry.dataset_search(type="OCCURRENCE", limit = 10)
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_search(q="amsterdam", limit = 10)
# Limited search
registry.dataset_search(type="OCCURRENCE", limit=2)
registry.dataset_search(type="OCCURRENCE", limit=2, offset=10)
# Search by decade
registry.dataset_search(decade=1980, limit = 10)
# Faceting
## just facets
registry.dataset_search(facet="decade", facetMincount=10, limit=0)
## data and facets
registry.dataset_search(facet="decade", facetMincount=10, limit=2)
## many facet variables
registry.dataset_search(facet=["decade", "type"], facetMincount=10, limit=0)
## facet vars
### per variable paging
x = registry.dataset_search(
facet = ["decade", "type"],
decade_facetLimit = 3,
type_facetLimit = 3,
limit = 0
)
## highlight
x = registry.dataset_search(q="plant", hl=True, limit = 10)
[ z['description'] for z in x['results'] ] | [
"Full",
"text",
"search",
"across",
"all",
"datasets",
".",
"Results",
"are",
"ordered",
"by",
"relevance",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/registry/datasets.py#L145-L257 |
sckott/pygbif | pygbif/utils/wkt_rewind.py | wkt_rewind | def wkt_rewind(x, digits = None):
'''
reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7)
'''
z = wkt.loads(x)
if digits is None:
coords = z['coordinates']
nums = __flatten(coords)
dec_n = [ decimal.Decimal(str(w)).as_tuple().exponent for w in nums ]
digits = abs(statistics.mean(dec_n))
else:
if not isinstance(digits, int):
raise TypeError("'digits' must be an int")
wound = rewind(z)
back_to_wkt = wkt.dumps(wound, decimals = digits)
return back_to_wkt | python | def wkt_rewind(x, digits = None):
'''
reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7)
'''
z = wkt.loads(x)
if digits is None:
coords = z['coordinates']
nums = __flatten(coords)
dec_n = [ decimal.Decimal(str(w)).as_tuple().exponent for w in nums ]
digits = abs(statistics.mean(dec_n))
else:
if not isinstance(digits, int):
raise TypeError("'digits' must be an int")
wound = rewind(z)
back_to_wkt = wkt.dumps(wound, decimals = digits)
return back_to_wkt | [
"def",
"wkt_rewind",
"(",
"x",
",",
"digits",
"=",
"None",
")",
":",
"z",
"=",
"wkt",
".",
"loads",
"(",
"x",
")",
"if",
"digits",
"is",
"None",
":",
"coords",
"=",
"z",
"[",
"'coordinates'",
"]",
"nums",
"=",
"__flatten",
"(",
"coords",
")",
"dec_n",
"=",
"[",
"decimal",
".",
"Decimal",
"(",
"str",
"(",
"w",
")",
")",
".",
"as_tuple",
"(",
")",
".",
"exponent",
"for",
"w",
"in",
"nums",
"]",
"digits",
"=",
"abs",
"(",
"statistics",
".",
"mean",
"(",
"dec_n",
")",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"digits",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"'digits' must be an int\"",
")",
"wound",
"=",
"rewind",
"(",
"z",
")",
"back_to_wkt",
"=",
"wkt",
".",
"dumps",
"(",
"wound",
",",
"decimals",
"=",
"digits",
")",
"return",
"back_to_wkt"
] | reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7) | [
"reverse",
"WKT",
"winding",
"order"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/utils/wkt_rewind.py#L6-L36 |
sckott/pygbif | pygbif/gbifissues.py | occ_issues_lookup | def occ_issues_lookup(issue=None, code=None):
'''
Lookup occurrence issue definitions and short codes
:param issue: Full name of issue, e.g, CONTINENT_COUNTRY_MISMATCH
:param code: an issue short code, e.g. ccm
Usage
pygbif.occ_issues_lookup(issue = 'CONTINENT_COUNTRY_MISMATCH')
pygbif.occ_issues_lookup(issue = 'MULTIMEDIA_DATE_INVALID')
pygbif.occ_issues_lookup(issue = 'ZERO_COORDINATE')
pygbif.occ_issues_lookup(code = 'cdiv')
'''
if code is None:
bb = [trymatch(issue, x) for x in gbifissues['issue'] ]
tmp = filter(None, bb)
else:
bb = [trymatch(code, x) for x in gbifissues['code'] ]
tmp = filter(None, bb)
return tmp | python | def occ_issues_lookup(issue=None, code=None):
'''
Lookup occurrence issue definitions and short codes
:param issue: Full name of issue, e.g, CONTINENT_COUNTRY_MISMATCH
:param code: an issue short code, e.g. ccm
Usage
pygbif.occ_issues_lookup(issue = 'CONTINENT_COUNTRY_MISMATCH')
pygbif.occ_issues_lookup(issue = 'MULTIMEDIA_DATE_INVALID')
pygbif.occ_issues_lookup(issue = 'ZERO_COORDINATE')
pygbif.occ_issues_lookup(code = 'cdiv')
'''
if code is None:
bb = [trymatch(issue, x) for x in gbifissues['issue'] ]
tmp = filter(None, bb)
else:
bb = [trymatch(code, x) for x in gbifissues['code'] ]
tmp = filter(None, bb)
return tmp | [
"def",
"occ_issues_lookup",
"(",
"issue",
"=",
"None",
",",
"code",
"=",
"None",
")",
":",
"if",
"code",
"is",
"None",
":",
"bb",
"=",
"[",
"trymatch",
"(",
"issue",
",",
"x",
")",
"for",
"x",
"in",
"gbifissues",
"[",
"'issue'",
"]",
"]",
"tmp",
"=",
"filter",
"(",
"None",
",",
"bb",
")",
"else",
":",
"bb",
"=",
"[",
"trymatch",
"(",
"code",
",",
"x",
")",
"for",
"x",
"in",
"gbifissues",
"[",
"'code'",
"]",
"]",
"tmp",
"=",
"filter",
"(",
"None",
",",
"bb",
")",
"return",
"tmp"
] | Lookup occurrence issue definitions and short codes
:param issue: Full name of issue, e.g, CONTINENT_COUNTRY_MISMATCH
:param code: an issue short code, e.g. ccm
Usage
pygbif.occ_issues_lookup(issue = 'CONTINENT_COUNTRY_MISMATCH')
pygbif.occ_issues_lookup(issue = 'MULTIMEDIA_DATE_INVALID')
pygbif.occ_issues_lookup(issue = 'ZERO_COORDINATE')
pygbif.occ_issues_lookup(code = 'cdiv') | [
"Lookup",
"occurrence",
"issue",
"definitions",
"and",
"short",
"codes"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/gbifissues.py#L3-L22 |
sckott/pygbif | pygbif/occurrences/search.py | search | def search(taxonKey=None, repatriated=None,
kingdomKey=None, phylumKey=None, classKey=None, orderKey=None,
familyKey=None, genusKey=None, subgenusKey=None, scientificName=None,
country=None, publishingCountry=None, hasCoordinate=None, typeStatus=None,
recordNumber=None, lastInterpreted=None, continent=None,
geometry=None, recordedBy=None, basisOfRecord=None, datasetKey=None,
eventDate=None, catalogNumber=None, year=None, month=None,
decimalLatitude=None, decimalLongitude=None, elevation=None,
depth=None, institutionCode=None, collectionCode=None,
hasGeospatialIssue=None, issue=None, q=None, spellCheck=None, mediatype=None,
limit=300, offset=0, establishmentMeans=None,
facet=None, facetMincount=None, facetMultiselect=None, **kwargs):
'''
Search GBIF occurrences
:param taxonKey: [int] A GBIF occurrence identifier
:param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.
:param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.
IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``
:param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in
:param kingdomKey: [int] Kingdom classification key
:param phylumKey: [int] Phylum classification key
:param classKey: [int] Class classification key
:param orderKey: [int] Order classification key
:param familyKey: [int] Family classification key
:param genusKey: [int] Genus classification key
:param subgenusKey: [int] Subgenus classification key
:param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.
:param recordedBy: [str] The person who recorded the occurrence.
:param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.
:param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.
:param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or
MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``
wouldn't work)
:param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,
smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param month: [int] The month of the year, starting with 1 for January. Supports range queries,
smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work)
:param q: [str] Query terms. The value for this parameter can be a simple word or a phrase.
:param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.
Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work)
:param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.
Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work).
:param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the
country in which the occurrence was recorded.
:param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger
(e.g., ``5,30``, whereas ``30,5`` wouldn't work)
:param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a
lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,
whereas ``30,5`` wouldn't work)
:param geometry: [str] Searches for occurrences inside a polygon described in Well Known
Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING
POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.
Polygons must have counter-clockwise ordering of points.
:param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial
issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``
returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes
only records without spatial issues. The absence of this parameter returns any
record with or without spatial issues.
:param issue: [str] One or more of many possible issues with each occurrence record. See
Details. Issues passed to this parameter filter results by the issue.
:param hasCoordinate: [bool] Return only occurence records with lat/long data (``true``) or
all records (``false``, default).
:param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus
:param recordNumber: [int] Number recorded by collector of the data, different from GBIF record
number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info
:param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:
yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,
``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``
(North America includes the Caribbean and reachies down and includes Panama), ``oceania``,
or ``south_america``
:param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,
key, latitude, and longitude. Or specify each field you want returned by name, e.g.
``fields = c('name','latitude','elevation')``.
:param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:
``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:param facet: [str] a character vector of length 1 or greater
:param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,
INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN
:param facetMincount: [int] minimum number of records to be included in the faceting results
:param facetMultiselect: [bool] Set to ``true`` to still return counts for values that are not currently
filtered. See examples. Default: ``false``
:return: A dictionary
Usage::
from pygbif import occurrences
occurrences.search(taxonKey = 3329049)
# Return 2 results, this is the default by the way
occurrences.search(taxonKey=3329049, limit=2)
# Instead of getting a taxon key first, you can search for a name directly
# However, note that using this approach (with `scientificName="..."`)
# you are getting synonyms too. The results for using `scientifcName` and
# `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some
# names they return different results
occurrences.search(scientificName = 'Ursus americanus')
from pygbif import species
key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey']
occurrences.search(taxonKey = key)
# Search by dataset key
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20)
# Search by catalog number
occurrences.search(catalogNumber="49366", limit=20)
# occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)
# Use paging parameters (limit and offset) to page. Note the different results
# for the two queries below.
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5)
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5)
# Many dataset keys
# occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)
# Search by collector name
res = occurrences.search(recordedBy="smith", limit=20)
[ x['recordedBy'] for x in res['results'] ]
# Many collector names
# occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)
# Search for many species
splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa']
keys = [ species.name_suggest(x)[0]['key'] for x in splist ]
out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]
[ x['results'][0]['speciesKey'] for x in out ]
# Search - q parameter
occurrences.search(q = "kingfisher", limit=20)
## spell check - only works with the `search` parameter
### spelled correctly - same result as above call
occurrences.search(q = "kingfisher", limit=20, spellCheck = True)
### spelled incorrectly - stops with suggested spelling
occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)
### spelled incorrectly - stops with many suggested spellings
### and number of results for each
occurrences.search(q = "helir", limit=20, spellCheck = True)
# Search on latitidue and longitude
occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)
# Search on a bounding box
## in well known text format
occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
from pygbif import species
key = species.name_suggest(q='Aesculus hippocastanum')[0]['key']
occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
## multipolygon
wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))'
occurrences.search(geometry = wkt, limit = 20)
# Search on country
occurrences.search(country='US', limit=20)
occurrences.search(country='FR', limit=20)
occurrences.search(country='DE', limit=20)
# Get only occurrences with lat/long data
occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)
# Get only occurrences that were recorded as living specimens
occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)
# Get occurrences for a particular eventDate
occurrences.search(taxonKey=key, eventDate="2013", limit=20)
occurrences.search(taxonKey=key, year="2013", limit=20)
occurrences.search(taxonKey=key, month="6", limit=20)
# Get occurrences based on depth
key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, depth="5", limit=20)
# Get occurrences based on elevation
key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)
# Get occurrences based on institutionCode
occurrences.search(institutionCode="TLMF", limit=20)
# Get occurrences based on collectionCode
occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)
# Get only those occurrences with spatial issues
occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)
# Search using a query string
occurrences.search(q="kingfisher", limit=20)
# Range queries
## See Detail for parameters that support range queries
### this is a range depth, with lower/upper limits in character string
occurrences.search(depth='50,100')
## Range search with year
occurrences.search(year='1999,2000', limit=20)
## Range search with latitude
occurrences.search(decimalLatitude='29.59,29.6')
# Search by specimen type status
## Look for possible values of the typeStatus parameter looking at the typestatus dataset
occurrences.search(typeStatus = 'allotype')
# Search by specimen record number
## This is the record number of the person/group that submitted the data, not GBIF's numbers
## You can see that many different groups have record number 1, so not super helpful
occurrences.search(recordNumber = 1)
# Search by last time interpreted: Date the record was last modified in GBIF
## The lastInterpreted parameter accepts ISO 8601 format dates, including
## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted
occurrences.search(lastInterpreted = '2014-04-01')
# Search by continent
## One of africa, antarctica, asia, europe, north_america, oceania, or south_america
occurrences.search(continent = 'south_america')
occurrences.search(continent = 'africa')
occurrences.search(continent = 'oceania')
occurrences.search(continent = 'antarctica')
# Search for occurrences with images
occurrences.search(mediatype = 'StillImage')
occurrences.search(mediatype = 'MovingImage')
x = occurrences.search(mediatype = 'Sound')
[z['media'] for z in x['results']]
# Query based on issues
occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY')
occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED'])
# Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF
# backbone properly:
occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK'])
# If you pass in an invalid polygon you get hopefully informative errors
### the WKT string is fine, but GBIF says bad polygon
wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,
-147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,
-112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,
-82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,
-77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,
179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,
163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,
127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,
149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,
178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))'
occurrences.search(geometry = wkt)
# Faceting
## return no occurrence records with limit=0
x = occurrences.search(facet = "country", limit = 0)
x['facets']
## also return occurrence records
x = occurrences.search(facet = "establishmentMeans", limit = 10)
x['facets']
x['results']
## multiple facet variables
x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)
x['results']
x['facets']
x['facets']['country']
x['facets']['basisOfRecord']
x['facets']['basisOfRecord']['count']
## set a minimum facet count
x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)
x['facets']
## paging per each faceted variable
### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"
### or "country" + "_facetOffset" = "country_facetOffset"
x = occurrences.search(
facet = ["country", "basisOfRecord", "hasCoordinate"],
country_facetLimit = 3,
basisOfRecord_facetLimit = 6,
limit = 0
)
x['facets']
# requests package options
## There's an acceptable set of requests options (['timeout', 'cookies', 'auth',
## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass
## in via **kwargs, e.g., set a timeout
x = occurrences.search(timeout = 1)
'''
url = gbif_baseurl + 'occurrence/search'
args = {'taxonKey': taxonKey, 'repatriated': repatriated,
'kingdomKey': kingdomKey, 'phylumKey': phylumKey, 'classKey': classKey,
'orderKey': orderKey, 'familyKey': familyKey, 'genusKey': genusKey,
'subgenusKey': subgenusKey, 'scientificName': scientificName,
'country': country, 'publishingCountry': publishingCountry,
'hasCoordinate': hasCoordinate, 'typeStatus': typeStatus,
'recordNumber': recordNumber, 'lastInterpreted': lastInterpreted,
'continent': continent, 'geometry': geometry, 'recordedBy': recordedBy,
'basisOfRecord': basisOfRecord, 'datasetKey': datasetKey, 'eventDate': eventDate,
'catalogNumber': catalogNumber, 'year': year, 'month': month,
'decimalLatitude': decimalLatitude, 'decimalLongitude': decimalLongitude,
'elevation': elevation, 'depth': depth, 'institutionCode': institutionCode,
'collectionCode': collectionCode, 'hasGeospatialIssue': hasGeospatialIssue,
'issue': issue, 'q': q, 'spellCheck': spellCheck, 'mediatype': mediatype,
'limit': limit, 'offset': offset, 'establishmentMeans': establishmentMeans,
'facetMincount': facetMincount, 'facet': facet,
'facetMultiselect': facetMultiselect}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out | python | def search(taxonKey=None, repatriated=None,
kingdomKey=None, phylumKey=None, classKey=None, orderKey=None,
familyKey=None, genusKey=None, subgenusKey=None, scientificName=None,
country=None, publishingCountry=None, hasCoordinate=None, typeStatus=None,
recordNumber=None, lastInterpreted=None, continent=None,
geometry=None, recordedBy=None, basisOfRecord=None, datasetKey=None,
eventDate=None, catalogNumber=None, year=None, month=None,
decimalLatitude=None, decimalLongitude=None, elevation=None,
depth=None, institutionCode=None, collectionCode=None,
hasGeospatialIssue=None, issue=None, q=None, spellCheck=None, mediatype=None,
limit=300, offset=0, establishmentMeans=None,
facet=None, facetMincount=None, facetMultiselect=None, **kwargs):
'''
Search GBIF occurrences
:param taxonKey: [int] A GBIF occurrence identifier
:param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.
:param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.
IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``
:param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in
:param kingdomKey: [int] Kingdom classification key
:param phylumKey: [int] Phylum classification key
:param classKey: [int] Class classification key
:param orderKey: [int] Order classification key
:param familyKey: [int] Family classification key
:param genusKey: [int] Genus classification key
:param subgenusKey: [int] Subgenus classification key
:param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.
:param recordedBy: [str] The person who recorded the occurrence.
:param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.
:param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.
:param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or
MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``
wouldn't work)
:param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,
smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param month: [int] The month of the year, starting with 1 for January. Supports range queries,
smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work)
:param q: [str] Query terms. The value for this parameter can be a simple word or a phrase.
:param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.
Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work)
:param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.
Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work).
:param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the
country in which the occurrence was recorded.
:param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger
(e.g., ``5,30``, whereas ``30,5`` wouldn't work)
:param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a
lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,
whereas ``30,5`` wouldn't work)
:param geometry: [str] Searches for occurrences inside a polygon described in Well Known
Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING
POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.
Polygons must have counter-clockwise ordering of points.
:param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial
issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``
returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes
only records without spatial issues. The absence of this parameter returns any
record with or without spatial issues.
:param issue: [str] One or more of many possible issues with each occurrence record. See
Details. Issues passed to this parameter filter results by the issue.
:param hasCoordinate: [bool] Return only occurence records with lat/long data (``true``) or
all records (``false``, default).
:param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus
:param recordNumber: [int] Number recorded by collector of the data, different from GBIF record
number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info
:param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:
yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,
``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``
(North America includes the Caribbean and reachies down and includes Panama), ``oceania``,
or ``south_america``
:param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,
key, latitude, and longitude. Or specify each field you want returned by name, e.g.
``fields = c('name','latitude','elevation')``.
:param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:
``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:param facet: [str] a character vector of length 1 or greater
:param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,
INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN
:param facetMincount: [int] minimum number of records to be included in the faceting results
:param facetMultiselect: [bool] Set to ``true`` to still return counts for values that are not currently
filtered. See examples. Default: ``false``
:return: A dictionary
Usage::
from pygbif import occurrences
occurrences.search(taxonKey = 3329049)
# Return 2 results, this is the default by the way
occurrences.search(taxonKey=3329049, limit=2)
# Instead of getting a taxon key first, you can search for a name directly
# However, note that using this approach (with `scientificName="..."`)
# you are getting synonyms too. The results for using `scientifcName` and
# `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some
# names they return different results
occurrences.search(scientificName = 'Ursus americanus')
from pygbif import species
key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey']
occurrences.search(taxonKey = key)
# Search by dataset key
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20)
# Search by catalog number
occurrences.search(catalogNumber="49366", limit=20)
# occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)
# Use paging parameters (limit and offset) to page. Note the different results
# for the two queries below.
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5)
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5)
# Many dataset keys
# occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)
# Search by collector name
res = occurrences.search(recordedBy="smith", limit=20)
[ x['recordedBy'] for x in res['results'] ]
# Many collector names
# occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)
# Search for many species
splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa']
keys = [ species.name_suggest(x)[0]['key'] for x in splist ]
out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]
[ x['results'][0]['speciesKey'] for x in out ]
# Search - q parameter
occurrences.search(q = "kingfisher", limit=20)
## spell check - only works with the `search` parameter
### spelled correctly - same result as above call
occurrences.search(q = "kingfisher", limit=20, spellCheck = True)
### spelled incorrectly - stops with suggested spelling
occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)
### spelled incorrectly - stops with many suggested spellings
### and number of results for each
occurrences.search(q = "helir", limit=20, spellCheck = True)
# Search on latitidue and longitude
occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)
# Search on a bounding box
## in well known text format
occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
from pygbif import species
key = species.name_suggest(q='Aesculus hippocastanum')[0]['key']
occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
## multipolygon
wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))'
occurrences.search(geometry = wkt, limit = 20)
# Search on country
occurrences.search(country='US', limit=20)
occurrences.search(country='FR', limit=20)
occurrences.search(country='DE', limit=20)
# Get only occurrences with lat/long data
occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)
# Get only occurrences that were recorded as living specimens
occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)
# Get occurrences for a particular eventDate
occurrences.search(taxonKey=key, eventDate="2013", limit=20)
occurrences.search(taxonKey=key, year="2013", limit=20)
occurrences.search(taxonKey=key, month="6", limit=20)
# Get occurrences based on depth
key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, depth="5", limit=20)
# Get occurrences based on elevation
key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)
# Get occurrences based on institutionCode
occurrences.search(institutionCode="TLMF", limit=20)
# Get occurrences based on collectionCode
occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)
# Get only those occurrences with spatial issues
occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)
# Search using a query string
occurrences.search(q="kingfisher", limit=20)
# Range queries
## See Detail for parameters that support range queries
### this is a range depth, with lower/upper limits in character string
occurrences.search(depth='50,100')
## Range search with year
occurrences.search(year='1999,2000', limit=20)
## Range search with latitude
occurrences.search(decimalLatitude='29.59,29.6')
# Search by specimen type status
## Look for possible values of the typeStatus parameter looking at the typestatus dataset
occurrences.search(typeStatus = 'allotype')
# Search by specimen record number
## This is the record number of the person/group that submitted the data, not GBIF's numbers
## You can see that many different groups have record number 1, so not super helpful
occurrences.search(recordNumber = 1)
# Search by last time interpreted: Date the record was last modified in GBIF
## The lastInterpreted parameter accepts ISO 8601 format dates, including
## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted
occurrences.search(lastInterpreted = '2014-04-01')
# Search by continent
## One of africa, antarctica, asia, europe, north_america, oceania, or south_america
occurrences.search(continent = 'south_america')
occurrences.search(continent = 'africa')
occurrences.search(continent = 'oceania')
occurrences.search(continent = 'antarctica')
# Search for occurrences with images
occurrences.search(mediatype = 'StillImage')
occurrences.search(mediatype = 'MovingImage')
x = occurrences.search(mediatype = 'Sound')
[z['media'] for z in x['results']]
# Query based on issues
occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY')
occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED'])
# Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF
# backbone properly:
occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK'])
# If you pass in an invalid polygon you get hopefully informative errors
### the WKT string is fine, but GBIF says bad polygon
wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,
-147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,
-112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,
-82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,
-77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,
179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,
163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,
127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,
149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,
178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))'
occurrences.search(geometry = wkt)
# Faceting
## return no occurrence records with limit=0
x = occurrences.search(facet = "country", limit = 0)
x['facets']
## also return occurrence records
x = occurrences.search(facet = "establishmentMeans", limit = 10)
x['facets']
x['results']
## multiple facet variables
x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)
x['results']
x['facets']
x['facets']['country']
x['facets']['basisOfRecord']
x['facets']['basisOfRecord']['count']
## set a minimum facet count
x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)
x['facets']
## paging per each faceted variable
### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"
### or "country" + "_facetOffset" = "country_facetOffset"
x = occurrences.search(
facet = ["country", "basisOfRecord", "hasCoordinate"],
country_facetLimit = 3,
basisOfRecord_facetLimit = 6,
limit = 0
)
x['facets']
# requests package options
## There's an acceptable set of requests options (['timeout', 'cookies', 'auth',
## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass
## in via **kwargs, e.g., set a timeout
x = occurrences.search(timeout = 1)
'''
url = gbif_baseurl + 'occurrence/search'
args = {'taxonKey': taxonKey, 'repatriated': repatriated,
'kingdomKey': kingdomKey, 'phylumKey': phylumKey, 'classKey': classKey,
'orderKey': orderKey, 'familyKey': familyKey, 'genusKey': genusKey,
'subgenusKey': subgenusKey, 'scientificName': scientificName,
'country': country, 'publishingCountry': publishingCountry,
'hasCoordinate': hasCoordinate, 'typeStatus': typeStatus,
'recordNumber': recordNumber, 'lastInterpreted': lastInterpreted,
'continent': continent, 'geometry': geometry, 'recordedBy': recordedBy,
'basisOfRecord': basisOfRecord, 'datasetKey': datasetKey, 'eventDate': eventDate,
'catalogNumber': catalogNumber, 'year': year, 'month': month,
'decimalLatitude': decimalLatitude, 'decimalLongitude': decimalLongitude,
'elevation': elevation, 'depth': depth, 'institutionCode': institutionCode,
'collectionCode': collectionCode, 'hasGeospatialIssue': hasGeospatialIssue,
'issue': issue, 'q': q, 'spellCheck': spellCheck, 'mediatype': mediatype,
'limit': limit, 'offset': offset, 'establishmentMeans': establishmentMeans,
'facetMincount': facetMincount, 'facet': facet,
'facetMultiselect': facetMultiselect}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out | [
"def",
"search",
"(",
"taxonKey",
"=",
"None",
",",
"repatriated",
"=",
"None",
",",
"kingdomKey",
"=",
"None",
",",
"phylumKey",
"=",
"None",
",",
"classKey",
"=",
"None",
",",
"orderKey",
"=",
"None",
",",
"familyKey",
"=",
"None",
",",
"genusKey",
"=",
"None",
",",
"subgenusKey",
"=",
"None",
",",
"scientificName",
"=",
"None",
",",
"country",
"=",
"None",
",",
"publishingCountry",
"=",
"None",
",",
"hasCoordinate",
"=",
"None",
",",
"typeStatus",
"=",
"None",
",",
"recordNumber",
"=",
"None",
",",
"lastInterpreted",
"=",
"None",
",",
"continent",
"=",
"None",
",",
"geometry",
"=",
"None",
",",
"recordedBy",
"=",
"None",
",",
"basisOfRecord",
"=",
"None",
",",
"datasetKey",
"=",
"None",
",",
"eventDate",
"=",
"None",
",",
"catalogNumber",
"=",
"None",
",",
"year",
"=",
"None",
",",
"month",
"=",
"None",
",",
"decimalLatitude",
"=",
"None",
",",
"decimalLongitude",
"=",
"None",
",",
"elevation",
"=",
"None",
",",
"depth",
"=",
"None",
",",
"institutionCode",
"=",
"None",
",",
"collectionCode",
"=",
"None",
",",
"hasGeospatialIssue",
"=",
"None",
",",
"issue",
"=",
"None",
",",
"q",
"=",
"None",
",",
"spellCheck",
"=",
"None",
",",
"mediatype",
"=",
"None",
",",
"limit",
"=",
"300",
",",
"offset",
"=",
"0",
",",
"establishmentMeans",
"=",
"None",
",",
"facet",
"=",
"None",
",",
"facetMincount",
"=",
"None",
",",
"facetMultiselect",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/search'",
"args",
"=",
"{",
"'taxonKey'",
":",
"taxonKey",
",",
"'repatriated'",
":",
"repatriated",
",",
"'kingdomKey'",
":",
"kingdomKey",
",",
"'phylumKey'",
":",
"phylumKey",
",",
"'classKey'",
":",
"classKey",
",",
"'orderKey'",
":",
"orderKey",
",",
"'familyKey'",
":",
"familyKey",
",",
"'genusKey'",
":",
"genusKey",
",",
"'subgenusKey'",
":",
"subgenusKey",
",",
"'scientificName'",
":",
"scientificName",
",",
"'country'",
":",
"country",
",",
"'publishingCountry'",
":",
"publishingCountry",
",",
"'hasCoordinate'",
":",
"hasCoordinate",
",",
"'typeStatus'",
":",
"typeStatus",
",",
"'recordNumber'",
":",
"recordNumber",
",",
"'lastInterpreted'",
":",
"lastInterpreted",
",",
"'continent'",
":",
"continent",
",",
"'geometry'",
":",
"geometry",
",",
"'recordedBy'",
":",
"recordedBy",
",",
"'basisOfRecord'",
":",
"basisOfRecord",
",",
"'datasetKey'",
":",
"datasetKey",
",",
"'eventDate'",
":",
"eventDate",
",",
"'catalogNumber'",
":",
"catalogNumber",
",",
"'year'",
":",
"year",
",",
"'month'",
":",
"month",
",",
"'decimalLatitude'",
":",
"decimalLatitude",
",",
"'decimalLongitude'",
":",
"decimalLongitude",
",",
"'elevation'",
":",
"elevation",
",",
"'depth'",
":",
"depth",
",",
"'institutionCode'",
":",
"institutionCode",
",",
"'collectionCode'",
":",
"collectionCode",
",",
"'hasGeospatialIssue'",
":",
"hasGeospatialIssue",
",",
"'issue'",
":",
"issue",
",",
"'q'",
":",
"q",
",",
"'spellCheck'",
":",
"spellCheck",
",",
"'mediatype'",
":",
"mediatype",
",",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
",",
"'establishmentMeans'",
":",
"establishmentMeans",
",",
"'facetMincount'",
":",
"facetMincount",
",",
"'facet'",
":",
"facet",
",",
"'facetMultiselect'",
":",
"facetMultiselect",
"}",
"gbif_kwargs",
"=",
"{",
"key",
":",
"kwargs",
"[",
"key",
"]",
"for",
"key",
"in",
"kwargs",
"if",
"key",
"not",
"in",
"requests_argset",
"}",
"if",
"gbif_kwargs",
"is",
"not",
"None",
":",
"xx",
"=",
"dict",
"(",
"zip",
"(",
"[",
"re",
".",
"sub",
"(",
"'_'",
",",
"'.'",
",",
"x",
")",
"for",
"x",
"in",
"gbif_kwargs",
".",
"keys",
"(",
")",
"]",
",",
"gbif_kwargs",
".",
"values",
"(",
")",
")",
")",
"args",
".",
"update",
"(",
"xx",
")",
"kwargs",
"=",
"{",
"key",
":",
"kwargs",
"[",
"key",
"]",
"for",
"key",
"in",
"kwargs",
"if",
"key",
"in",
"requests_argset",
"}",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Search GBIF occurrences
:param taxonKey: [int] A GBIF occurrence identifier
:param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.
:param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.
IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``
:param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in
:param kingdomKey: [int] Kingdom classification key
:param phylumKey: [int] Phylum classification key
:param classKey: [int] Class classification key
:param orderKey: [int] Order classification key
:param familyKey: [int] Family classification key
:param genusKey: [int] Genus classification key
:param subgenusKey: [int] Subgenus classification key
:param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.
:param recordedBy: [str] The person who recorded the occurrence.
:param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.
:param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.
:param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or
MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``
wouldn't work)
:param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,
smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param month: [int] The month of the year, starting with 1 for January. Supports range queries,
smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work)
:param q: [str] Query terms. The value for this parameter can be a simple word or a phrase.
:param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.
Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work)
:param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.
Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work).
:param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the
country in which the occurrence was recorded.
:param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger
(e.g., ``5,30``, whereas ``30,5`` wouldn't work)
:param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a
lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,
whereas ``30,5`` wouldn't work)
:param geometry: [str] Searches for occurrences inside a polygon described in Well Known
Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING
POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.
Polygons must have counter-clockwise ordering of points.
:param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial
issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``
returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes
only records without spatial issues. The absence of this parameter returns any
record with or without spatial issues.
:param issue: [str] One or more of many possible issues with each occurrence record. See
Details. Issues passed to this parameter filter results by the issue.
:param hasCoordinate: [bool] Return only occurence records with lat/long data (``true``) or
all records (``false``, default).
:param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus
:param recordNumber: [int] Number recorded by collector of the data, different from GBIF record
number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info
:param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:
yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,
``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``
(North America includes the Caribbean and reachies down and includes Panama), ``oceania``,
or ``south_america``
:param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,
key, latitude, and longitude. Or specify each field you want returned by name, e.g.
``fields = c('name','latitude','elevation')``.
:param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:
``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:param facet: [str] a character vector of length 1 or greater
:param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,
INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN
:param facetMincount: [int] minimum number of records to be included in the faceting results
:param facetMultiselect: [bool] Set to ``true`` to still return counts for values that are not currently
filtered. See examples. Default: ``false``
:return: A dictionary
Usage::
from pygbif import occurrences
occurrences.search(taxonKey = 3329049)
# Return 2 results, this is the default by the way
occurrences.search(taxonKey=3329049, limit=2)
# Instead of getting a taxon key first, you can search for a name directly
# However, note that using this approach (with `scientificName="..."`)
# you are getting synonyms too. The results for using `scientifcName` and
# `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some
# names they return different results
occurrences.search(scientificName = 'Ursus americanus')
from pygbif import species
key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey']
occurrences.search(taxonKey = key)
# Search by dataset key
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20)
# Search by catalog number
occurrences.search(catalogNumber="49366", limit=20)
# occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)
# Use paging parameters (limit and offset) to page. Note the different results
# for the two queries below.
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5)
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5)
# Many dataset keys
# occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)
# Search by collector name
res = occurrences.search(recordedBy="smith", limit=20)
[ x['recordedBy'] for x in res['results'] ]
# Many collector names
# occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)
# Search for many species
splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa']
keys = [ species.name_suggest(x)[0]['key'] for x in splist ]
out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]
[ x['results'][0]['speciesKey'] for x in out ]
# Search - q parameter
occurrences.search(q = "kingfisher", limit=20)
## spell check - only works with the `search` parameter
### spelled correctly - same result as above call
occurrences.search(q = "kingfisher", limit=20, spellCheck = True)
### spelled incorrectly - stops with suggested spelling
occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)
### spelled incorrectly - stops with many suggested spellings
### and number of results for each
occurrences.search(q = "helir", limit=20, spellCheck = True)
# Search on latitidue and longitude
occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)
# Search on a bounding box
## in well known text format
occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
from pygbif import species
key = species.name_suggest(q='Aesculus hippocastanum')[0]['key']
occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
## multipolygon
wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))'
occurrences.search(geometry = wkt, limit = 20)
# Search on country
occurrences.search(country='US', limit=20)
occurrences.search(country='FR', limit=20)
occurrences.search(country='DE', limit=20)
# Get only occurrences with lat/long data
occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)
# Get only occurrences that were recorded as living specimens
occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)
# Get occurrences for a particular eventDate
occurrences.search(taxonKey=key, eventDate="2013", limit=20)
occurrences.search(taxonKey=key, year="2013", limit=20)
occurrences.search(taxonKey=key, month="6", limit=20)
# Get occurrences based on depth
key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, depth="5", limit=20)
# Get occurrences based on elevation
key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)
# Get occurrences based on institutionCode
occurrences.search(institutionCode="TLMF", limit=20)
# Get occurrences based on collectionCode
occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)
# Get only those occurrences with spatial issues
occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)
# Search using a query string
occurrences.search(q="kingfisher", limit=20)
# Range queries
## See Detail for parameters that support range queries
### this is a range depth, with lower/upper limits in character string
occurrences.search(depth='50,100')
## Range search with year
occurrences.search(year='1999,2000', limit=20)
## Range search with latitude
occurrences.search(decimalLatitude='29.59,29.6')
# Search by specimen type status
## Look for possible values of the typeStatus parameter looking at the typestatus dataset
occurrences.search(typeStatus = 'allotype')
# Search by specimen record number
## This is the record number of the person/group that submitted the data, not GBIF's numbers
## You can see that many different groups have record number 1, so not super helpful
occurrences.search(recordNumber = 1)
# Search by last time interpreted: Date the record was last modified in GBIF
## The lastInterpreted parameter accepts ISO 8601 format dates, including
## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted
occurrences.search(lastInterpreted = '2014-04-01')
# Search by continent
## One of africa, antarctica, asia, europe, north_america, oceania, or south_america
occurrences.search(continent = 'south_america')
occurrences.search(continent = 'africa')
occurrences.search(continent = 'oceania')
occurrences.search(continent = 'antarctica')
# Search for occurrences with images
occurrences.search(mediatype = 'StillImage')
occurrences.search(mediatype = 'MovingImage')
x = occurrences.search(mediatype = 'Sound')
[z['media'] for z in x['results']]
# Query based on issues
occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY')
occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED'])
# Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF
# backbone properly:
occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK'])
# If you pass in an invalid polygon you get hopefully informative errors
### the WKT string is fine, but GBIF says bad polygon
wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,
-147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,
-112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,
-82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,
-77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,
179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,
163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,
127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,
149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,
178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))'
occurrences.search(geometry = wkt)
# Faceting
## return no occurrence records with limit=0
x = occurrences.search(facet = "country", limit = 0)
x['facets']
## also return occurrence records
x = occurrences.search(facet = "establishmentMeans", limit = 10)
x['facets']
x['results']
## multiple facet variables
x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)
x['results']
x['facets']
x['facets']['country']
x['facets']['basisOfRecord']
x['facets']['basisOfRecord']['count']
## set a minimum facet count
x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)
x['facets']
## paging per each faceted variable
### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"
### or "country" + "_facetOffset" = "country_facetOffset"
x = occurrences.search(
facet = ["country", "basisOfRecord", "hasCoordinate"],
country_facetLimit = 3,
basisOfRecord_facetLimit = 6,
limit = 0
)
x['facets']
# requests package options
## There's an acceptable set of requests options (['timeout', 'cookies', 'auth',
## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass
## in via **kwargs, e.g., set a timeout
x = occurrences.search(timeout = 1) | [
"Search",
"GBIF",
"occurrences"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/search.py#L4-L334 |
sckott/pygbif | pygbif/registry/networks.py | networks | def networks(data = 'all', uuid = None, q = None, identifier = None,
identifierType = None, limit = 100, offset = None, **kwargs):
'''
Networks metadata.
Note: there's only 1 network now, so there's not a lot you can do with this method.
:param data: [str] The type of data to get. Default: ``all``
:param uuid: [str] UUID of the data network provider. This must be specified if data
is anything other than ``all``.
:param q: [str] Query networks. Only used when ``data = 'all'``. Ignored otherwise.
:param identifier: [fixnum] The value for this parameter can be a simple string or integer,
e.g. identifier=120
:param identifierType: [str] Used in combination with the identifier parameter to filter
identifiers by identifier type: ``DOI``, ``FTP``, ``GBIF_NODE``, ``GBIF_PARTICIPANT``,
``GBIF_PORTAL``, ``HANDLER``, ``LSID``, ``UNKNOWN``, ``URI``, ``URL``, ``UUID``
:param limit: [int] Number of results to return. Default: ``100``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#networks
Usage::
from pygbif import registry
registry.networks(limit=1)
registry.networks(uuid='2b7c7b4f-4d4f-40d3-94de-c28b6fa054a6')
'''
args = {'q': q, 'limit': limit, 'offset': offset, 'identifier': identifier,
'identifierType': identifierType}
data_choices = ['all', 'contact', 'endpoint', 'identifier',
'tag', 'machineTag', 'comment', 'constituents']
check_data(data, data_choices)
def getdata(x, uuid, args, **kwargs):
if x is not 'all' and uuid is None:
stop('You must specify a uuid if data does not equal "all"')
if uuid is None:
url = gbif_baseurl + 'network'
else:
if x is 'all':
url = gbif_baseurl + 'network/' + uuid
else:
url = gbif_baseurl + 'network/' + uuid + '/' + x
res = gbif_GET(url, args, **kwargs)
return {'meta': get_meta(res), 'data': parse_results(res, uuid)}
if len2(data) == 1:
return getdata(data, uuid, args, **kwargs)
else:
return [getdata(x, uuid, args, **kwargs) for x in data] | python | def networks(data = 'all', uuid = None, q = None, identifier = None,
identifierType = None, limit = 100, offset = None, **kwargs):
'''
Networks metadata.
Note: there's only 1 network now, so there's not a lot you can do with this method.
:param data: [str] The type of data to get. Default: ``all``
:param uuid: [str] UUID of the data network provider. This must be specified if data
is anything other than ``all``.
:param q: [str] Query networks. Only used when ``data = 'all'``. Ignored otherwise.
:param identifier: [fixnum] The value for this parameter can be a simple string or integer,
e.g. identifier=120
:param identifierType: [str] Used in combination with the identifier parameter to filter
identifiers by identifier type: ``DOI``, ``FTP``, ``GBIF_NODE``, ``GBIF_PARTICIPANT``,
``GBIF_PORTAL``, ``HANDLER``, ``LSID``, ``UNKNOWN``, ``URI``, ``URL``, ``UUID``
:param limit: [int] Number of results to return. Default: ``100``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#networks
Usage::
from pygbif import registry
registry.networks(limit=1)
registry.networks(uuid='2b7c7b4f-4d4f-40d3-94de-c28b6fa054a6')
'''
args = {'q': q, 'limit': limit, 'offset': offset, 'identifier': identifier,
'identifierType': identifierType}
data_choices = ['all', 'contact', 'endpoint', 'identifier',
'tag', 'machineTag', 'comment', 'constituents']
check_data(data, data_choices)
def getdata(x, uuid, args, **kwargs):
if x is not 'all' and uuid is None:
stop('You must specify a uuid if data does not equal "all"')
if uuid is None:
url = gbif_baseurl + 'network'
else:
if x is 'all':
url = gbif_baseurl + 'network/' + uuid
else:
url = gbif_baseurl + 'network/' + uuid + '/' + x
res = gbif_GET(url, args, **kwargs)
return {'meta': get_meta(res), 'data': parse_results(res, uuid)}
if len2(data) == 1:
return getdata(data, uuid, args, **kwargs)
else:
return [getdata(x, uuid, args, **kwargs) for x in data] | [
"def",
"networks",
"(",
"data",
"=",
"'all'",
",",
"uuid",
"=",
"None",
",",
"q",
"=",
"None",
",",
"identifier",
"=",
"None",
",",
"identifierType",
"=",
"None",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"{",
"'q'",
":",
"q",
",",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
",",
"'identifier'",
":",
"identifier",
",",
"'identifierType'",
":",
"identifierType",
"}",
"data_choices",
"=",
"[",
"'all'",
",",
"'contact'",
",",
"'endpoint'",
",",
"'identifier'",
",",
"'tag'",
",",
"'machineTag'",
",",
"'comment'",
",",
"'constituents'",
"]",
"check_data",
"(",
"data",
",",
"data_choices",
")",
"def",
"getdata",
"(",
"x",
",",
"uuid",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"x",
"is",
"not",
"'all'",
"and",
"uuid",
"is",
"None",
":",
"stop",
"(",
"'You must specify a uuid if data does not equal \"all\"'",
")",
"if",
"uuid",
"is",
"None",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'network'",
"else",
":",
"if",
"x",
"is",
"'all'",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'network/'",
"+",
"uuid",
"else",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'network/'",
"+",
"uuid",
"+",
"'/'",
"+",
"x",
"res",
"=",
"gbif_GET",
"(",
"url",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"{",
"'meta'",
":",
"get_meta",
"(",
"res",
")",
",",
"'data'",
":",
"parse_results",
"(",
"res",
",",
"uuid",
")",
"}",
"if",
"len2",
"(",
"data",
")",
"==",
"1",
":",
"return",
"getdata",
"(",
"data",
",",
"uuid",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"[",
"getdata",
"(",
"x",
",",
"uuid",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"x",
"in",
"data",
"]"
] | Networks metadata.
Note: there's only 1 network now, so there's not a lot you can do with this method.
:param data: [str] The type of data to get. Default: ``all``
:param uuid: [str] UUID of the data network provider. This must be specified if data
is anything other than ``all``.
:param q: [str] Query networks. Only used when ``data = 'all'``. Ignored otherwise.
:param identifier: [fixnum] The value for this parameter can be a simple string or integer,
e.g. identifier=120
:param identifierType: [str] Used in combination with the identifier parameter to filter
identifiers by identifier type: ``DOI``, ``FTP``, ``GBIF_NODE``, ``GBIF_PARTICIPANT``,
``GBIF_PORTAL``, ``HANDLER``, ``LSID``, ``UNKNOWN``, ``URI``, ``URL``, ``UUID``
:param limit: [int] Number of results to return. Default: ``100``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#networks
Usage::
from pygbif import registry
registry.networks(limit=1)
registry.networks(uuid='2b7c7b4f-4d4f-40d3-94de-c28b6fa054a6') | [
"Networks",
"metadata",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/registry/networks.py#L3-L56 |
sckott/pygbif | pygbif/maps/map.py | map | def map(source = 'density', z = 0, x = 0, y = 0, format = '@1x.png',
srs='EPSG:4326', bin=None, hexPerTile=None, style='classic.point',
taxonKey=None, country=None, publishingCountry=None, publisher=None,
datasetKey=None, year=None, basisOfRecord=None, **kwargs):
'''
GBIF maps API
:param source: [str] Either ``density`` for fast, precalculated tiles,
or ``adhoc`` for any search
:param z: [str] zoom level
:param x: [str] longitude
:param y: [str] latitude
:param format: [str] format of returned data. One of:
- ``.mvt`` - vector tile
- ``@Hx.png`` - 256px raster tile (for legacy clients)
- ``@1x.png`` - 512px raster tile, @2x.png for a 1024px raster tile
- ``@2x.png`` - 1024px raster tile
- ``@3x.png`` - 2048px raster tile
- ``@4x.png`` - 4096px raster tile
:param srs: [str] Spatial reference system. One of:
- ``EPSG:3857`` (Web Mercator)
- ``EPSG:4326`` (WGS84 plate caree)
- ``EPSG:3575`` (Arctic LAEA)
- ``EPSG:3031`` (Antarctic stereographic)
:param bin: [str] square or hex to aggregate occurrence counts into
squares or hexagons. Points by default.
:param hexPerTile: [str] sets the size of the hexagons (the number
horizontally across a tile)
:param squareSize: [str] sets the size of the squares. Choose a factor
of 4096 so they tessalate correctly: probably from 8, 16, 32, 64,
128, 256, 512.
:param style: [str] for raster tiles, choose from the available styles.
Defaults to classic.point.
:param taxonKey: [int] A GBIF occurrence identifier
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param country: [str] The 2-letter country code (as per ISO-3166-1) of
the country in which the occurrence was recorded. See here
http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in the BasisOfRecord enum
http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html
Acceptable values are
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param year: [int] The 4 digit year. A year of 98 will be interpreted as
AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``,
whereas ``1991,1990`` wouldn't work)
:param publishingCountry: [str] The 2-letter country code (as per
ISO-3166-1) of the country in which the occurrence was recorded.
:return: An object of class GbifMap
For mvt format, see https://github.com/tilezen/mapbox-vector-tile to
decode, and example below
Usage::
from pygbif import maps
out = maps.map(taxonKey = 2435098)
out.response
out.path
out.img
out.plot()
out = maps.map(taxonKey = 2480498, year = range(2008, 2011+1))
out.response
out.path
out.img
out.plot()
# srs
maps.map(taxonKey = 2480498, year = 2010, srs = "EPSG:3857")
# bin
maps.map(taxonKey = 212, year = 1998, bin = "hex",
hexPerTile = 30, style = "classic-noborder.poly")
# style
maps.map(taxonKey = 2480498, style = "purpleYellow.point").plot()
# basisOfRecord
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = "HUMAN_OBSERVATION", bin = "hex",
hexPerTile = 500).plot()
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = ["HUMAN_OBSERVATION", "LIVING_SPECIMEN"],
hexPerTile = 500, bin = "hex").plot()
# map vector tiles, gives back raw bytes
from pygbif import maps
x = maps.map(taxonKey = 2480498, year = 2010,
format = ".mvt")
x.response
x.path
x.img # None
import mapbox_vector_tile
mapbox_vector_tile.decode(x.response.content)
'''
if format not in ['.mvt', '@Hx.png', '@1x.png', '@2x.png', '@3x.png', '@4x.png']:
raise ValueError("'format' not in allowed set, see docs")
if source not in ['density', 'adhoc']:
raise ValueError("'source' not in allowed set, see docs")
if srs not in ['EPSG:3857', 'EPSG:4326', 'EPSG:3575', 'EPSG:3031']:
raise ValueError("'srs' not in allowed set, see docs")
if bin is not None:
if bin not in ['square', 'hex']:
raise ValueError("'bin' not in allowed set, see docs")
if style is not None:
if style not in map_styles:
raise ValueError("'style' not in allowed set, see docs")
maps_baseurl = 'https://api.gbif.org'
url = maps_baseurl + '/v2/map/occurrence/%s/%s/%s/%s%s'
url = url % ( source, z, x, y, format )
year = __handle_year(year)
basisOfRecord = __handle_bor(basisOfRecord)
args = {'srs': srs, 'bin': bin, 'hexPerTile': hexPerTile, 'style': style,
'taxonKey': taxonKey, 'country': country,
'publishingCountry': publishingCountry, 'publisher': publisher,
'datasetKey': datasetKey, 'year': year,
'basisOfRecord': basisOfRecord}
kw = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if kw is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in kw.keys() ], kw.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
ctype = 'image/png' if has(format, "png") else 'application/x-protobuf'
out = gbif_GET_map(url, args, ctype, **kwargs)
# return out
return GbifMap(out) | python | def map(source = 'density', z = 0, x = 0, y = 0, format = '@1x.png',
srs='EPSG:4326', bin=None, hexPerTile=None, style='classic.point',
taxonKey=None, country=None, publishingCountry=None, publisher=None,
datasetKey=None, year=None, basisOfRecord=None, **kwargs):
'''
GBIF maps API
:param source: [str] Either ``density`` for fast, precalculated tiles,
or ``adhoc`` for any search
:param z: [str] zoom level
:param x: [str] longitude
:param y: [str] latitude
:param format: [str] format of returned data. One of:
- ``.mvt`` - vector tile
- ``@Hx.png`` - 256px raster tile (for legacy clients)
- ``@1x.png`` - 512px raster tile, @2x.png for a 1024px raster tile
- ``@2x.png`` - 1024px raster tile
- ``@3x.png`` - 2048px raster tile
- ``@4x.png`` - 4096px raster tile
:param srs: [str] Spatial reference system. One of:
- ``EPSG:3857`` (Web Mercator)
- ``EPSG:4326`` (WGS84 plate caree)
- ``EPSG:3575`` (Arctic LAEA)
- ``EPSG:3031`` (Antarctic stereographic)
:param bin: [str] square or hex to aggregate occurrence counts into
squares or hexagons. Points by default.
:param hexPerTile: [str] sets the size of the hexagons (the number
horizontally across a tile)
:param squareSize: [str] sets the size of the squares. Choose a factor
of 4096 so they tessalate correctly: probably from 8, 16, 32, 64,
128, 256, 512.
:param style: [str] for raster tiles, choose from the available styles.
Defaults to classic.point.
:param taxonKey: [int] A GBIF occurrence identifier
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param country: [str] The 2-letter country code (as per ISO-3166-1) of
the country in which the occurrence was recorded. See here
http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in the BasisOfRecord enum
http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html
Acceptable values are
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param year: [int] The 4 digit year. A year of 98 will be interpreted as
AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``,
whereas ``1991,1990`` wouldn't work)
:param publishingCountry: [str] The 2-letter country code (as per
ISO-3166-1) of the country in which the occurrence was recorded.
:return: An object of class GbifMap
For mvt format, see https://github.com/tilezen/mapbox-vector-tile to
decode, and example below
Usage::
from pygbif import maps
out = maps.map(taxonKey = 2435098)
out.response
out.path
out.img
out.plot()
out = maps.map(taxonKey = 2480498, year = range(2008, 2011+1))
out.response
out.path
out.img
out.plot()
# srs
maps.map(taxonKey = 2480498, year = 2010, srs = "EPSG:3857")
# bin
maps.map(taxonKey = 212, year = 1998, bin = "hex",
hexPerTile = 30, style = "classic-noborder.poly")
# style
maps.map(taxonKey = 2480498, style = "purpleYellow.point").plot()
# basisOfRecord
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = "HUMAN_OBSERVATION", bin = "hex",
hexPerTile = 500).plot()
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = ["HUMAN_OBSERVATION", "LIVING_SPECIMEN"],
hexPerTile = 500, bin = "hex").plot()
# map vector tiles, gives back raw bytes
from pygbif import maps
x = maps.map(taxonKey = 2480498, year = 2010,
format = ".mvt")
x.response
x.path
x.img # None
import mapbox_vector_tile
mapbox_vector_tile.decode(x.response.content)
'''
if format not in ['.mvt', '@Hx.png', '@1x.png', '@2x.png', '@3x.png', '@4x.png']:
raise ValueError("'format' not in allowed set, see docs")
if source not in ['density', 'adhoc']:
raise ValueError("'source' not in allowed set, see docs")
if srs not in ['EPSG:3857', 'EPSG:4326', 'EPSG:3575', 'EPSG:3031']:
raise ValueError("'srs' not in allowed set, see docs")
if bin is not None:
if bin not in ['square', 'hex']:
raise ValueError("'bin' not in allowed set, see docs")
if style is not None:
if style not in map_styles:
raise ValueError("'style' not in allowed set, see docs")
maps_baseurl = 'https://api.gbif.org'
url = maps_baseurl + '/v2/map/occurrence/%s/%s/%s/%s%s'
url = url % ( source, z, x, y, format )
year = __handle_year(year)
basisOfRecord = __handle_bor(basisOfRecord)
args = {'srs': srs, 'bin': bin, 'hexPerTile': hexPerTile, 'style': style,
'taxonKey': taxonKey, 'country': country,
'publishingCountry': publishingCountry, 'publisher': publisher,
'datasetKey': datasetKey, 'year': year,
'basisOfRecord': basisOfRecord}
kw = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if kw is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in kw.keys() ], kw.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
ctype = 'image/png' if has(format, "png") else 'application/x-protobuf'
out = gbif_GET_map(url, args, ctype, **kwargs)
# return out
return GbifMap(out) | [
"def",
"map",
"(",
"source",
"=",
"'density'",
",",
"z",
"=",
"0",
",",
"x",
"=",
"0",
",",
"y",
"=",
"0",
",",
"format",
"=",
"'@1x.png'",
",",
"srs",
"=",
"'EPSG:4326'",
",",
"bin",
"=",
"None",
",",
"hexPerTile",
"=",
"None",
",",
"style",
"=",
"'classic.point'",
",",
"taxonKey",
"=",
"None",
",",
"country",
"=",
"None",
",",
"publishingCountry",
"=",
"None",
",",
"publisher",
"=",
"None",
",",
"datasetKey",
"=",
"None",
",",
"year",
"=",
"None",
",",
"basisOfRecord",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"format",
"not",
"in",
"[",
"'.mvt'",
",",
"'@Hx.png'",
",",
"'@1x.png'",
",",
"'@2x.png'",
",",
"'@3x.png'",
",",
"'@4x.png'",
"]",
":",
"raise",
"ValueError",
"(",
"\"'format' not in allowed set, see docs\"",
")",
"if",
"source",
"not",
"in",
"[",
"'density'",
",",
"'adhoc'",
"]",
":",
"raise",
"ValueError",
"(",
"\"'source' not in allowed set, see docs\"",
")",
"if",
"srs",
"not",
"in",
"[",
"'EPSG:3857'",
",",
"'EPSG:4326'",
",",
"'EPSG:3575'",
",",
"'EPSG:3031'",
"]",
":",
"raise",
"ValueError",
"(",
"\"'srs' not in allowed set, see docs\"",
")",
"if",
"bin",
"is",
"not",
"None",
":",
"if",
"bin",
"not",
"in",
"[",
"'square'",
",",
"'hex'",
"]",
":",
"raise",
"ValueError",
"(",
"\"'bin' not in allowed set, see docs\"",
")",
"if",
"style",
"is",
"not",
"None",
":",
"if",
"style",
"not",
"in",
"map_styles",
":",
"raise",
"ValueError",
"(",
"\"'style' not in allowed set, see docs\"",
")",
"maps_baseurl",
"=",
"'https://api.gbif.org'",
"url",
"=",
"maps_baseurl",
"+",
"'/v2/map/occurrence/%s/%s/%s/%s%s'",
"url",
"=",
"url",
"%",
"(",
"source",
",",
"z",
",",
"x",
",",
"y",
",",
"format",
")",
"year",
"=",
"__handle_year",
"(",
"year",
")",
"basisOfRecord",
"=",
"__handle_bor",
"(",
"basisOfRecord",
")",
"args",
"=",
"{",
"'srs'",
":",
"srs",
",",
"'bin'",
":",
"bin",
",",
"'hexPerTile'",
":",
"hexPerTile",
",",
"'style'",
":",
"style",
",",
"'taxonKey'",
":",
"taxonKey",
",",
"'country'",
":",
"country",
",",
"'publishingCountry'",
":",
"publishingCountry",
",",
"'publisher'",
":",
"publisher",
",",
"'datasetKey'",
":",
"datasetKey",
",",
"'year'",
":",
"year",
",",
"'basisOfRecord'",
":",
"basisOfRecord",
"}",
"kw",
"=",
"{",
"key",
":",
"kwargs",
"[",
"key",
"]",
"for",
"key",
"in",
"kwargs",
"if",
"key",
"not",
"in",
"requests_argset",
"}",
"if",
"kw",
"is",
"not",
"None",
":",
"xx",
"=",
"dict",
"(",
"zip",
"(",
"[",
"re",
".",
"sub",
"(",
"'_'",
",",
"'.'",
",",
"x",
")",
"for",
"x",
"in",
"kw",
".",
"keys",
"(",
")",
"]",
",",
"kw",
".",
"values",
"(",
")",
")",
")",
"args",
".",
"update",
"(",
"xx",
")",
"kwargs",
"=",
"{",
"key",
":",
"kwargs",
"[",
"key",
"]",
"for",
"key",
"in",
"kwargs",
"if",
"key",
"in",
"requests_argset",
"}",
"ctype",
"=",
"'image/png'",
"if",
"has",
"(",
"format",
",",
"\"png\"",
")",
"else",
"'application/x-protobuf'",
"out",
"=",
"gbif_GET_map",
"(",
"url",
",",
"args",
",",
"ctype",
",",
"*",
"*",
"kwargs",
")",
"# return out",
"return",
"GbifMap",
"(",
"out",
")"
] | GBIF maps API
:param source: [str] Either ``density`` for fast, precalculated tiles,
or ``adhoc`` for any search
:param z: [str] zoom level
:param x: [str] longitude
:param y: [str] latitude
:param format: [str] format of returned data. One of:
- ``.mvt`` - vector tile
- ``@Hx.png`` - 256px raster tile (for legacy clients)
- ``@1x.png`` - 512px raster tile, @2x.png for a 1024px raster tile
- ``@2x.png`` - 1024px raster tile
- ``@3x.png`` - 2048px raster tile
- ``@4x.png`` - 4096px raster tile
:param srs: [str] Spatial reference system. One of:
- ``EPSG:3857`` (Web Mercator)
- ``EPSG:4326`` (WGS84 plate caree)
- ``EPSG:3575`` (Arctic LAEA)
- ``EPSG:3031`` (Antarctic stereographic)
:param bin: [str] square or hex to aggregate occurrence counts into
squares or hexagons. Points by default.
:param hexPerTile: [str] sets the size of the hexagons (the number
horizontally across a tile)
:param squareSize: [str] sets the size of the squares. Choose a factor
of 4096 so they tessalate correctly: probably from 8, 16, 32, 64,
128, 256, 512.
:param style: [str] for raster tiles, choose from the available styles.
Defaults to classic.point.
:param taxonKey: [int] A GBIF occurrence identifier
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param country: [str] The 2-letter country code (as per ISO-3166-1) of
the country in which the occurrence was recorded. See here
http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in the BasisOfRecord enum
http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html
Acceptable values are
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param year: [int] The 4 digit year. A year of 98 will be interpreted as
AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``,
whereas ``1991,1990`` wouldn't work)
:param publishingCountry: [str] The 2-letter country code (as per
ISO-3166-1) of the country in which the occurrence was recorded.
:return: An object of class GbifMap
For mvt format, see https://github.com/tilezen/mapbox-vector-tile to
decode, and example below
Usage::
from pygbif import maps
out = maps.map(taxonKey = 2435098)
out.response
out.path
out.img
out.plot()
out = maps.map(taxonKey = 2480498, year = range(2008, 2011+1))
out.response
out.path
out.img
out.plot()
# srs
maps.map(taxonKey = 2480498, year = 2010, srs = "EPSG:3857")
# bin
maps.map(taxonKey = 212, year = 1998, bin = "hex",
hexPerTile = 30, style = "classic-noborder.poly")
# style
maps.map(taxonKey = 2480498, style = "purpleYellow.point").plot()
# basisOfRecord
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = "HUMAN_OBSERVATION", bin = "hex",
hexPerTile = 500).plot()
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = ["HUMAN_OBSERVATION", "LIVING_SPECIMEN"],
hexPerTile = 500, bin = "hex").plot()
# map vector tiles, gives back raw bytes
from pygbif import maps
x = maps.map(taxonKey = 2480498, year = 2010,
format = ".mvt")
x.response
x.path
x.img # None
import mapbox_vector_tile
mapbox_vector_tile.decode(x.response.content) | [
"GBIF",
"maps",
"API"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/maps/map.py#L8-L145 |
sckott/pygbif | pygbif/species/name_usage.py | name_usage | def name_usage(key = None, name = None, data = 'all', language = None,
datasetKey = None, uuid = None, sourceId = None, rank = None, shortname = None,
limit = 100, offset = None, **kwargs):
'''
Lookup details for specific names in all taxonomies in GBIF.
:param key: [fixnum] A GBIF key for a taxon
:param name: [str] Filters by a case insensitive, canonical namestring,
e.g. 'Puma concolor'
:param data: [str] The type of data to get. Default: ``all``. Options: ``all``,
``verbatim``, ``name``, ``parents``, ``children``,
``related``, ``synonyms``, ``descriptions``, ``distributions``, ``media``,
``references``, ``speciesProfiles``, ``vernacularNames``, ``typeSpecimens``,
``root``
:param language: [str] Language, default is english
:param datasetKey: [str] Filters by the dataset's key (a uuid)
:param uuid: [str] A uuid for a dataset. Should give exact same results as datasetKey.
:param sourceId: [fixnum] Filters by the source identifier.
:param rank: [str] Taxonomic rank. Filters by taxonomic rank as one of:
``CLASS``, ``CULTIVAR``, ``CULTIVAR_GROUP``, ``DOMAIN``, ``FAMILY``, ``FORM``, ``GENUS``, ``INFORMAL``,
``INFRAGENERIC_NAME``, ``INFRAORDER``, ``INFRASPECIFIC_NAME``, ``INFRASUBSPECIFIC_NAME``,
``KINGDOM``, ``ORDER``, ``PHYLUM``, ``SECTION``, ``SERIES``, ``SPECIES``, ``STRAIN``, ``SUBCLASS``, ``SUBFAMILY``,
``SUBFORM``, ``SUBGENUS``, ``SUBKINGDOM``, ``SUBORDER``, ``SUBPHYLUM``, ``SUBSECTION``, ``SUBSERIES``,
``SUBSPECIES``, ``SUBTRIBE``, ``SUBVARIETY``, ``SUPERCLASS``, ``SUPERFAMILY``, ``SUPERORDER``,
``SUPERPHYLUM``, ``SUPRAGENERIC_NAME``, ``TRIBE``, ``UNRANKED``, ``VARIETY``
:param shortname: [str] A short name..need more info on this?
:param limit: [fixnum] Number of records to return. Default: ``100``. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
References: http://www.gbif.org/developer/species#nameUsages
Usage::
from pygbif import species
species.name_usage(key=1)
# Name usage for a taxonomic name
species.name_usage(name='Puma', rank="GENUS")
# All name usages
species.name_usage()
# References for a name usage
species.name_usage(key=2435099, data='references')
# Species profiles, descriptions
species.name_usage(key=3119195, data='speciesProfiles')
species.name_usage(key=3119195, data='descriptions')
species.name_usage(key=2435099, data='children')
# Vernacular names for a name usage
species.name_usage(key=3119195, data='vernacularNames')
# Limit number of results returned
species.name_usage(key=3119195, data='vernacularNames', limit=3)
# Search for names by dataset with datasetKey parameter
species.name_usage(datasetKey="d7dddbf4-2cf0-4f39-9b2a-bb099caae36c")
# Search for a particular language
species.name_usage(key=3119195, language="FRENCH", data='vernacularNames')
'''
args = {'language': language, 'name': name, 'datasetKey': datasetKey,
'rank': rank, 'sourceId': sourceId, 'limit': limit, 'offset': offset}
data_choices = ['all', 'verbatim', 'name', 'parents', 'children',
'related', 'synonyms', 'descriptions',
'distributions', 'media', 'references', 'speciesProfiles',
'vernacularNames', 'typeSpecimens', 'root']
check_data(data, data_choices)
if len2(data) == 1:
return name_usage_fetch(data, key, shortname, uuid, args, **kwargs)
else:
return [name_usage_fetch(x, key, shortname, uuid, args, **kwargs) for x in data] | python | def name_usage(key = None, name = None, data = 'all', language = None,
datasetKey = None, uuid = None, sourceId = None, rank = None, shortname = None,
limit = 100, offset = None, **kwargs):
'''
Lookup details for specific names in all taxonomies in GBIF.
:param key: [fixnum] A GBIF key for a taxon
:param name: [str] Filters by a case insensitive, canonical namestring,
e.g. 'Puma concolor'
:param data: [str] The type of data to get. Default: ``all``. Options: ``all``,
``verbatim``, ``name``, ``parents``, ``children``,
``related``, ``synonyms``, ``descriptions``, ``distributions``, ``media``,
``references``, ``speciesProfiles``, ``vernacularNames``, ``typeSpecimens``,
``root``
:param language: [str] Language, default is english
:param datasetKey: [str] Filters by the dataset's key (a uuid)
:param uuid: [str] A uuid for a dataset. Should give exact same results as datasetKey.
:param sourceId: [fixnum] Filters by the source identifier.
:param rank: [str] Taxonomic rank. Filters by taxonomic rank as one of:
``CLASS``, ``CULTIVAR``, ``CULTIVAR_GROUP``, ``DOMAIN``, ``FAMILY``, ``FORM``, ``GENUS``, ``INFORMAL``,
``INFRAGENERIC_NAME``, ``INFRAORDER``, ``INFRASPECIFIC_NAME``, ``INFRASUBSPECIFIC_NAME``,
``KINGDOM``, ``ORDER``, ``PHYLUM``, ``SECTION``, ``SERIES``, ``SPECIES``, ``STRAIN``, ``SUBCLASS``, ``SUBFAMILY``,
``SUBFORM``, ``SUBGENUS``, ``SUBKINGDOM``, ``SUBORDER``, ``SUBPHYLUM``, ``SUBSECTION``, ``SUBSERIES``,
``SUBSPECIES``, ``SUBTRIBE``, ``SUBVARIETY``, ``SUPERCLASS``, ``SUPERFAMILY``, ``SUPERORDER``,
``SUPERPHYLUM``, ``SUPRAGENERIC_NAME``, ``TRIBE``, ``UNRANKED``, ``VARIETY``
:param shortname: [str] A short name..need more info on this?
:param limit: [fixnum] Number of records to return. Default: ``100``. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
References: http://www.gbif.org/developer/species#nameUsages
Usage::
from pygbif import species
species.name_usage(key=1)
# Name usage for a taxonomic name
species.name_usage(name='Puma', rank="GENUS")
# All name usages
species.name_usage()
# References for a name usage
species.name_usage(key=2435099, data='references')
# Species profiles, descriptions
species.name_usage(key=3119195, data='speciesProfiles')
species.name_usage(key=3119195, data='descriptions')
species.name_usage(key=2435099, data='children')
# Vernacular names for a name usage
species.name_usage(key=3119195, data='vernacularNames')
# Limit number of results returned
species.name_usage(key=3119195, data='vernacularNames', limit=3)
# Search for names by dataset with datasetKey parameter
species.name_usage(datasetKey="d7dddbf4-2cf0-4f39-9b2a-bb099caae36c")
# Search for a particular language
species.name_usage(key=3119195, language="FRENCH", data='vernacularNames')
'''
args = {'language': language, 'name': name, 'datasetKey': datasetKey,
'rank': rank, 'sourceId': sourceId, 'limit': limit, 'offset': offset}
data_choices = ['all', 'verbatim', 'name', 'parents', 'children',
'related', 'synonyms', 'descriptions',
'distributions', 'media', 'references', 'speciesProfiles',
'vernacularNames', 'typeSpecimens', 'root']
check_data(data, data_choices)
if len2(data) == 1:
return name_usage_fetch(data, key, shortname, uuid, args, **kwargs)
else:
return [name_usage_fetch(x, key, shortname, uuid, args, **kwargs) for x in data] | [
"def",
"name_usage",
"(",
"key",
"=",
"None",
",",
"name",
"=",
"None",
",",
"data",
"=",
"'all'",
",",
"language",
"=",
"None",
",",
"datasetKey",
"=",
"None",
",",
"uuid",
"=",
"None",
",",
"sourceId",
"=",
"None",
",",
"rank",
"=",
"None",
",",
"shortname",
"=",
"None",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"{",
"'language'",
":",
"language",
",",
"'name'",
":",
"name",
",",
"'datasetKey'",
":",
"datasetKey",
",",
"'rank'",
":",
"rank",
",",
"'sourceId'",
":",
"sourceId",
",",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
"}",
"data_choices",
"=",
"[",
"'all'",
",",
"'verbatim'",
",",
"'name'",
",",
"'parents'",
",",
"'children'",
",",
"'related'",
",",
"'synonyms'",
",",
"'descriptions'",
",",
"'distributions'",
",",
"'media'",
",",
"'references'",
",",
"'speciesProfiles'",
",",
"'vernacularNames'",
",",
"'typeSpecimens'",
",",
"'root'",
"]",
"check_data",
"(",
"data",
",",
"data_choices",
")",
"if",
"len2",
"(",
"data",
")",
"==",
"1",
":",
"return",
"name_usage_fetch",
"(",
"data",
",",
"key",
",",
"shortname",
",",
"uuid",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"[",
"name_usage_fetch",
"(",
"x",
",",
"key",
",",
"shortname",
",",
"uuid",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"x",
"in",
"data",
"]"
] | Lookup details for specific names in all taxonomies in GBIF.
:param key: [fixnum] A GBIF key for a taxon
:param name: [str] Filters by a case insensitive, canonical namestring,
e.g. 'Puma concolor'
:param data: [str] The type of data to get. Default: ``all``. Options: ``all``,
``verbatim``, ``name``, ``parents``, ``children``,
``related``, ``synonyms``, ``descriptions``, ``distributions``, ``media``,
``references``, ``speciesProfiles``, ``vernacularNames``, ``typeSpecimens``,
``root``
:param language: [str] Language, default is english
:param datasetKey: [str] Filters by the dataset's key (a uuid)
:param uuid: [str] A uuid for a dataset. Should give exact same results as datasetKey.
:param sourceId: [fixnum] Filters by the source identifier.
:param rank: [str] Taxonomic rank. Filters by taxonomic rank as one of:
``CLASS``, ``CULTIVAR``, ``CULTIVAR_GROUP``, ``DOMAIN``, ``FAMILY``, ``FORM``, ``GENUS``, ``INFORMAL``,
``INFRAGENERIC_NAME``, ``INFRAORDER``, ``INFRASPECIFIC_NAME``, ``INFRASUBSPECIFIC_NAME``,
``KINGDOM``, ``ORDER``, ``PHYLUM``, ``SECTION``, ``SERIES``, ``SPECIES``, ``STRAIN``, ``SUBCLASS``, ``SUBFAMILY``,
``SUBFORM``, ``SUBGENUS``, ``SUBKINGDOM``, ``SUBORDER``, ``SUBPHYLUM``, ``SUBSECTION``, ``SUBSERIES``,
``SUBSPECIES``, ``SUBTRIBE``, ``SUBVARIETY``, ``SUPERCLASS``, ``SUPERFAMILY``, ``SUPERORDER``,
``SUPERPHYLUM``, ``SUPRAGENERIC_NAME``, ``TRIBE``, ``UNRANKED``, ``VARIETY``
:param shortname: [str] A short name..need more info on this?
:param limit: [fixnum] Number of records to return. Default: ``100``. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
References: http://www.gbif.org/developer/species#nameUsages
Usage::
from pygbif import species
species.name_usage(key=1)
# Name usage for a taxonomic name
species.name_usage(name='Puma', rank="GENUS")
# All name usages
species.name_usage()
# References for a name usage
species.name_usage(key=2435099, data='references')
# Species profiles, descriptions
species.name_usage(key=3119195, data='speciesProfiles')
species.name_usage(key=3119195, data='descriptions')
species.name_usage(key=2435099, data='children')
# Vernacular names for a name usage
species.name_usage(key=3119195, data='vernacularNames')
# Limit number of results returned
species.name_usage(key=3119195, data='vernacularNames', limit=3)
# Search for names by dataset with datasetKey parameter
species.name_usage(datasetKey="d7dddbf4-2cf0-4f39-9b2a-bb099caae36c")
# Search for a particular language
species.name_usage(key=3119195, language="FRENCH", data='vernacularNames') | [
"Lookup",
"details",
"for",
"specific",
"names",
"in",
"all",
"taxonomies",
"in",
"GBIF",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/species/name_usage.py#L3-L76 |
sckott/pygbif | pygbif/occurrences/download.py | _check_environ | def _check_environ(variable, value):
"""check if a variable is present in the environmental variables"""
if is_not_none(value):
return value
else:
value = os.environ.get(variable)
if is_none(value):
stop(''.join([variable,
""" not supplied and no entry in environmental
variables"""]))
else:
return value | python | def _check_environ(variable, value):
"""check if a variable is present in the environmental variables"""
if is_not_none(value):
return value
else:
value = os.environ.get(variable)
if is_none(value):
stop(''.join([variable,
""" not supplied and no entry in environmental
variables"""]))
else:
return value | [
"def",
"_check_environ",
"(",
"variable",
",",
"value",
")",
":",
"if",
"is_not_none",
"(",
"value",
")",
":",
"return",
"value",
"else",
":",
"value",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"variable",
")",
"if",
"is_none",
"(",
"value",
")",
":",
"stop",
"(",
"''",
".",
"join",
"(",
"[",
"variable",
",",
"\"\"\" not supplied and no entry in environmental\n variables\"\"\"",
"]",
")",
")",
"else",
":",
"return",
"value"
] | check if a variable is present in the environmental variables | [
"check",
"if",
"a",
"variable",
"is",
"present",
"in",
"the",
"environmental",
"variables"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L18-L29 |
sckott/pygbif | pygbif/occurrences/download.py | download | def download(queries, user=None, pwd=None,
email=None, pred_type='and'):
"""
Spin up a download request for GBIF occurrence data.
:param queries: One or more of query arguments to kick of a download job.
See Details.
:type queries: str or list
:param pred_type: (character) One of ``equals`` (``=``), ``and`` (``&``),
`or`` (``|``), ``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``),
``greaterThan`` (``>``), ``greaterThanOrEquals`` (``>=``),
``in``, ``within``, ``not`` (``!``), ``like``
:param user: (character) User name within GBIF's website.
Required. Set in your env vars with the option ``GBIF_USER``
:param pwd: (character) User password within GBIF's website. Required.
Set in your env vars with the option ``GBIF_PWD``
:param email: (character) Email address to recieve download notice done
email. Required. Set in your env vars with the option ``GBIF_EMAIL``
Argument passed have to be passed as character (e.g., ``country = US``),
with a space between key (``country``), operator (``=``), and value (``US``).
See the ``type`` parameter for possible options for the operator.
This character string is parsed internally.
Acceptable arguments to ``...`` (args) are:
- taxonKey = ``TAXON_KEY``
- scientificName = ``SCIENTIFIC_NAME``
- country = ``COUNTRY``
- publishingCountry = ``PUBLISHING_COUNTRY``
- hasCoordinate = ``HAS_COORDINATE``
- hasGeospatialIssue = ``HAS_GEOSPATIAL_ISSUE``
- typeStatus = ``TYPE_STATUS``
- recordNumber = ``RECORD_NUMBER``
- lastInterpreted = ``LAST_INTERPRETED``
- continent = ``CONTINENT``
- geometry = ``GEOMETRY``
- basisOfRecord = ``BASIS_OF_RECORD``
- datasetKey = ``DATASET_KEY``
- eventDate = ``EVENT_DATE``
- catalogNumber = ``CATALOG_NUMBER``
- year = ``YEAR``
- month = ``MONTH``
- decimalLatitude = ``DECIMAL_LATITUDE``
- decimalLongitude = ``DECIMAL_LONGITUDE``
- elevation = ``ELEVATION``
- depth = ``DEPTH``
- institutionCode = ``INSTITUTION_CODE``
- collectionCode = ``COLLECTION_CODE``
- issue = ``ISSUE``
- mediatype = ``MEDIA_TYPE``
- recordedBy = ``RECORDED_BY``
- repatriated = ``REPATRIATED``
See the API docs http://www.gbif.org/developer/occurrence#download
for more info, and the predicates docs
http://www.gbif.org/developer/occurrence#predicates
GBIF has a limit of 12,000 characters for download queries - so
if you're download request is really, really long and complex,
consider breaking it up into multiple requests by one factor or
another.
:return: A dictionary, of results
Usage::
from pygbif import occurrences as occ
occ.download('basisOfRecord = LITERATURE')
occ.download('taxonKey = 3119195')
occ.download('decimalLatitude > 50')
occ.download('elevation >= 9000')
occ.download('decimalLatitude >= 65')
occ.download('country = US')
occ.download('institutionCode = TLMF')
occ.download('catalogNumber = Bird.27847588')
res = occ.download(['taxonKey = 7264332', 'hasCoordinate = TRUE'])
# pass output to download_meta for more information
occ.download_meta(occ.download('decimalLatitude > 75'))
# Multiple queries
gg = occ.download(['decimalLatitude >= 65',
'decimalLatitude <= -65'], type='or')
gg = occ.download(['depth = 80', 'taxonKey = 2343454'],
type='or')
# Repratriated data for Costa Rica
occ.download(['country = CR', 'repatriated = true'])
"""
user = _check_environ('GBIF_USER', user)
pwd = _check_environ('GBIF_PWD', pwd)
email = _check_environ('GBIF_EMAIL', email)
if isinstance(queries, str):
queries = [queries]
keyval = [_parse_args(z) for z in queries]
# USE GBIFDownload class to set up the predicates
req = GbifDownload(user, email)
req.main_pred_type = pred_type
for predicate in keyval:
req.add_predicate(predicate['key'],
predicate['value'],
predicate['type'])
out = req.post_download(user, pwd)
return out, req.payload | python | def download(queries, user=None, pwd=None,
email=None, pred_type='and'):
"""
Spin up a download request for GBIF occurrence data.
:param queries: One or more of query arguments to kick of a download job.
See Details.
:type queries: str or list
:param pred_type: (character) One of ``equals`` (``=``), ``and`` (``&``),
`or`` (``|``), ``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``),
``greaterThan`` (``>``), ``greaterThanOrEquals`` (``>=``),
``in``, ``within``, ``not`` (``!``), ``like``
:param user: (character) User name within GBIF's website.
Required. Set in your env vars with the option ``GBIF_USER``
:param pwd: (character) User password within GBIF's website. Required.
Set in your env vars with the option ``GBIF_PWD``
:param email: (character) Email address to recieve download notice done
email. Required. Set in your env vars with the option ``GBIF_EMAIL``
Argument passed have to be passed as character (e.g., ``country = US``),
with a space between key (``country``), operator (``=``), and value (``US``).
See the ``type`` parameter for possible options for the operator.
This character string is parsed internally.
Acceptable arguments to ``...`` (args) are:
- taxonKey = ``TAXON_KEY``
- scientificName = ``SCIENTIFIC_NAME``
- country = ``COUNTRY``
- publishingCountry = ``PUBLISHING_COUNTRY``
- hasCoordinate = ``HAS_COORDINATE``
- hasGeospatialIssue = ``HAS_GEOSPATIAL_ISSUE``
- typeStatus = ``TYPE_STATUS``
- recordNumber = ``RECORD_NUMBER``
- lastInterpreted = ``LAST_INTERPRETED``
- continent = ``CONTINENT``
- geometry = ``GEOMETRY``
- basisOfRecord = ``BASIS_OF_RECORD``
- datasetKey = ``DATASET_KEY``
- eventDate = ``EVENT_DATE``
- catalogNumber = ``CATALOG_NUMBER``
- year = ``YEAR``
- month = ``MONTH``
- decimalLatitude = ``DECIMAL_LATITUDE``
- decimalLongitude = ``DECIMAL_LONGITUDE``
- elevation = ``ELEVATION``
- depth = ``DEPTH``
- institutionCode = ``INSTITUTION_CODE``
- collectionCode = ``COLLECTION_CODE``
- issue = ``ISSUE``
- mediatype = ``MEDIA_TYPE``
- recordedBy = ``RECORDED_BY``
- repatriated = ``REPATRIATED``
See the API docs http://www.gbif.org/developer/occurrence#download
for more info, and the predicates docs
http://www.gbif.org/developer/occurrence#predicates
GBIF has a limit of 12,000 characters for download queries - so
if you're download request is really, really long and complex,
consider breaking it up into multiple requests by one factor or
another.
:return: A dictionary, of results
Usage::
from pygbif import occurrences as occ
occ.download('basisOfRecord = LITERATURE')
occ.download('taxonKey = 3119195')
occ.download('decimalLatitude > 50')
occ.download('elevation >= 9000')
occ.download('decimalLatitude >= 65')
occ.download('country = US')
occ.download('institutionCode = TLMF')
occ.download('catalogNumber = Bird.27847588')
res = occ.download(['taxonKey = 7264332', 'hasCoordinate = TRUE'])
# pass output to download_meta for more information
occ.download_meta(occ.download('decimalLatitude > 75'))
# Multiple queries
gg = occ.download(['decimalLatitude >= 65',
'decimalLatitude <= -65'], type='or')
gg = occ.download(['depth = 80', 'taxonKey = 2343454'],
type='or')
# Repratriated data for Costa Rica
occ.download(['country = CR', 'repatriated = true'])
"""
user = _check_environ('GBIF_USER', user)
pwd = _check_environ('GBIF_PWD', pwd)
email = _check_environ('GBIF_EMAIL', email)
if isinstance(queries, str):
queries = [queries]
keyval = [_parse_args(z) for z in queries]
# USE GBIFDownload class to set up the predicates
req = GbifDownload(user, email)
req.main_pred_type = pred_type
for predicate in keyval:
req.add_predicate(predicate['key'],
predicate['value'],
predicate['type'])
out = req.post_download(user, pwd)
return out, req.payload | [
"def",
"download",
"(",
"queries",
",",
"user",
"=",
"None",
",",
"pwd",
"=",
"None",
",",
"email",
"=",
"None",
",",
"pred_type",
"=",
"'and'",
")",
":",
"user",
"=",
"_check_environ",
"(",
"'GBIF_USER'",
",",
"user",
")",
"pwd",
"=",
"_check_environ",
"(",
"'GBIF_PWD'",
",",
"pwd",
")",
"email",
"=",
"_check_environ",
"(",
"'GBIF_EMAIL'",
",",
"email",
")",
"if",
"isinstance",
"(",
"queries",
",",
"str",
")",
":",
"queries",
"=",
"[",
"queries",
"]",
"keyval",
"=",
"[",
"_parse_args",
"(",
"z",
")",
"for",
"z",
"in",
"queries",
"]",
"# USE GBIFDownload class to set up the predicates",
"req",
"=",
"GbifDownload",
"(",
"user",
",",
"email",
")",
"req",
".",
"main_pred_type",
"=",
"pred_type",
"for",
"predicate",
"in",
"keyval",
":",
"req",
".",
"add_predicate",
"(",
"predicate",
"[",
"'key'",
"]",
",",
"predicate",
"[",
"'value'",
"]",
",",
"predicate",
"[",
"'type'",
"]",
")",
"out",
"=",
"req",
".",
"post_download",
"(",
"user",
",",
"pwd",
")",
"return",
"out",
",",
"req",
".",
"payload"
] | Spin up a download request for GBIF occurrence data.
:param queries: One or more of query arguments to kick of a download job.
See Details.
:type queries: str or list
:param pred_type: (character) One of ``equals`` (``=``), ``and`` (``&``),
`or`` (``|``), ``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``),
``greaterThan`` (``>``), ``greaterThanOrEquals`` (``>=``),
``in``, ``within``, ``not`` (``!``), ``like``
:param user: (character) User name within GBIF's website.
Required. Set in your env vars with the option ``GBIF_USER``
:param pwd: (character) User password within GBIF's website. Required.
Set in your env vars with the option ``GBIF_PWD``
:param email: (character) Email address to recieve download notice done
email. Required. Set in your env vars with the option ``GBIF_EMAIL``
Argument passed have to be passed as character (e.g., ``country = US``),
with a space between key (``country``), operator (``=``), and value (``US``).
See the ``type`` parameter for possible options for the operator.
This character string is parsed internally.
Acceptable arguments to ``...`` (args) are:
- taxonKey = ``TAXON_KEY``
- scientificName = ``SCIENTIFIC_NAME``
- country = ``COUNTRY``
- publishingCountry = ``PUBLISHING_COUNTRY``
- hasCoordinate = ``HAS_COORDINATE``
- hasGeospatialIssue = ``HAS_GEOSPATIAL_ISSUE``
- typeStatus = ``TYPE_STATUS``
- recordNumber = ``RECORD_NUMBER``
- lastInterpreted = ``LAST_INTERPRETED``
- continent = ``CONTINENT``
- geometry = ``GEOMETRY``
- basisOfRecord = ``BASIS_OF_RECORD``
- datasetKey = ``DATASET_KEY``
- eventDate = ``EVENT_DATE``
- catalogNumber = ``CATALOG_NUMBER``
- year = ``YEAR``
- month = ``MONTH``
- decimalLatitude = ``DECIMAL_LATITUDE``
- decimalLongitude = ``DECIMAL_LONGITUDE``
- elevation = ``ELEVATION``
- depth = ``DEPTH``
- institutionCode = ``INSTITUTION_CODE``
- collectionCode = ``COLLECTION_CODE``
- issue = ``ISSUE``
- mediatype = ``MEDIA_TYPE``
- recordedBy = ``RECORDED_BY``
- repatriated = ``REPATRIATED``
See the API docs http://www.gbif.org/developer/occurrence#download
for more info, and the predicates docs
http://www.gbif.org/developer/occurrence#predicates
GBIF has a limit of 12,000 characters for download queries - so
if you're download request is really, really long and complex,
consider breaking it up into multiple requests by one factor or
another.
:return: A dictionary, of results
Usage::
from pygbif import occurrences as occ
occ.download('basisOfRecord = LITERATURE')
occ.download('taxonKey = 3119195')
occ.download('decimalLatitude > 50')
occ.download('elevation >= 9000')
occ.download('decimalLatitude >= 65')
occ.download('country = US')
occ.download('institutionCode = TLMF')
occ.download('catalogNumber = Bird.27847588')
res = occ.download(['taxonKey = 7264332', 'hasCoordinate = TRUE'])
# pass output to download_meta for more information
occ.download_meta(occ.download('decimalLatitude > 75'))
# Multiple queries
gg = occ.download(['decimalLatitude >= 65',
'decimalLatitude <= -65'], type='or')
gg = occ.download(['depth = 80', 'taxonKey = 2343454'],
type='or')
# Repratriated data for Costa Rica
occ.download(['country = CR', 'repatriated = true']) | [
"Spin",
"up",
"a",
"download",
"request",
"for",
"GBIF",
"occurrence",
"data",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L32-L143 |
sckott/pygbif | pygbif/occurrences/download.py | download_list | def download_list(user=None, pwd=None, limit=20, offset=0):
"""
Lists the downloads created by a user.
:param user: [str] A user name, look at env var ``GBIF_USER`` first
:param pwd: [str] Your password, look at env var ``GBIF_PWD`` first
:param limit: [int] Number of records to return. Default: ``20``
:param offset: [int] Record number to start at. Default: ``0``
Usage::
from pygbif import occurrences as occ
occ.download_list(user = "sckott")
occ.download_list(user = "sckott", limit = 5)
occ.download_list(user = "sckott", offset = 21)
"""
user = _check_environ('GBIF_USER', user)
pwd = _check_environ('GBIF_PWD', pwd)
url = 'http://api.gbif.org/v1/occurrence/download/user/' + user
args = {'limit': limit, 'offset': offset}
res = gbif_GET(url, args, auth=(user, pwd))
return {'meta': {'offset': res['offset'],
'limit': res['limit'],
'endofrecords': res['endOfRecords'],
'count': res['count']},
'results': res['results']} | python | def download_list(user=None, pwd=None, limit=20, offset=0):
"""
Lists the downloads created by a user.
:param user: [str] A user name, look at env var ``GBIF_USER`` first
:param pwd: [str] Your password, look at env var ``GBIF_PWD`` first
:param limit: [int] Number of records to return. Default: ``20``
:param offset: [int] Record number to start at. Default: ``0``
Usage::
from pygbif import occurrences as occ
occ.download_list(user = "sckott")
occ.download_list(user = "sckott", limit = 5)
occ.download_list(user = "sckott", offset = 21)
"""
user = _check_environ('GBIF_USER', user)
pwd = _check_environ('GBIF_PWD', pwd)
url = 'http://api.gbif.org/v1/occurrence/download/user/' + user
args = {'limit': limit, 'offset': offset}
res = gbif_GET(url, args, auth=(user, pwd))
return {'meta': {'offset': res['offset'],
'limit': res['limit'],
'endofrecords': res['endOfRecords'],
'count': res['count']},
'results': res['results']} | [
"def",
"download_list",
"(",
"user",
"=",
"None",
",",
"pwd",
"=",
"None",
",",
"limit",
"=",
"20",
",",
"offset",
"=",
"0",
")",
":",
"user",
"=",
"_check_environ",
"(",
"'GBIF_USER'",
",",
"user",
")",
"pwd",
"=",
"_check_environ",
"(",
"'GBIF_PWD'",
",",
"pwd",
")",
"url",
"=",
"'http://api.gbif.org/v1/occurrence/download/user/'",
"+",
"user",
"args",
"=",
"{",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
"}",
"res",
"=",
"gbif_GET",
"(",
"url",
",",
"args",
",",
"auth",
"=",
"(",
"user",
",",
"pwd",
")",
")",
"return",
"{",
"'meta'",
":",
"{",
"'offset'",
":",
"res",
"[",
"'offset'",
"]",
",",
"'limit'",
":",
"res",
"[",
"'limit'",
"]",
",",
"'endofrecords'",
":",
"res",
"[",
"'endOfRecords'",
"]",
",",
"'count'",
":",
"res",
"[",
"'count'",
"]",
"}",
",",
"'results'",
":",
"res",
"[",
"'results'",
"]",
"}"
] | Lists the downloads created by a user.
:param user: [str] A user name, look at env var ``GBIF_USER`` first
:param pwd: [str] Your password, look at env var ``GBIF_PWD`` first
:param limit: [int] Number of records to return. Default: ``20``
:param offset: [int] Record number to start at. Default: ``0``
Usage::
from pygbif import occurrences as occ
occ.download_list(user = "sckott")
occ.download_list(user = "sckott", limit = 5)
occ.download_list(user = "sckott", offset = 21) | [
"Lists",
"the",
"downloads",
"created",
"by",
"a",
"user",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L331-L358 |
sckott/pygbif | pygbif/occurrences/download.py | download_get | def download_get(key, path=".", **kwargs):
"""
Get a download from GBIF.
:param key: [str] A key generated from a request, like that from ``download``
:param path: [str] Path to write zip file to. Default: ``"."``, with a ``.zip`` appended to the end.
:param **kwargs**: Further named arguments passed on to ``requests.get``
Downloads the zip file to a directory you specify on your machine.
The speed of this function is of course proportional to the size of the
file to download, and affected by your internet connection speed.
This function only downloads the file. To open and read it, see
https://github.com/BelgianBiodiversityPlatform/python-dwca-reader
Usage::
from pygbif import occurrences as occ
occ.download_get("0000066-140928181241064")
occ.download_get("0003983-140910143529206")
"""
meta = pygbif.occurrences.download_meta(key)
if meta['status'] != 'SUCCEEDED':
raise Exception('download "%s" not of status SUCCEEDED' % key)
else:
print('Download file size: %s bytes' % meta['size'])
url = 'http://api.gbif.org/v1/occurrence/download/request/' + key
path = "%s/%s.zip" % (path, key)
gbif_GET_write(url, path, **kwargs)
print("On disk at " + path)
return {'path': path, 'size': meta['size'], 'key': key} | python | def download_get(key, path=".", **kwargs):
"""
Get a download from GBIF.
:param key: [str] A key generated from a request, like that from ``download``
:param path: [str] Path to write zip file to. Default: ``"."``, with a ``.zip`` appended to the end.
:param **kwargs**: Further named arguments passed on to ``requests.get``
Downloads the zip file to a directory you specify on your machine.
The speed of this function is of course proportional to the size of the
file to download, and affected by your internet connection speed.
This function only downloads the file. To open and read it, see
https://github.com/BelgianBiodiversityPlatform/python-dwca-reader
Usage::
from pygbif import occurrences as occ
occ.download_get("0000066-140928181241064")
occ.download_get("0003983-140910143529206")
"""
meta = pygbif.occurrences.download_meta(key)
if meta['status'] != 'SUCCEEDED':
raise Exception('download "%s" not of status SUCCEEDED' % key)
else:
print('Download file size: %s bytes' % meta['size'])
url = 'http://api.gbif.org/v1/occurrence/download/request/' + key
path = "%s/%s.zip" % (path, key)
gbif_GET_write(url, path, **kwargs)
print("On disk at " + path)
return {'path': path, 'size': meta['size'], 'key': key} | [
"def",
"download_get",
"(",
"key",
",",
"path",
"=",
"\".\"",
",",
"*",
"*",
"kwargs",
")",
":",
"meta",
"=",
"pygbif",
".",
"occurrences",
".",
"download_meta",
"(",
"key",
")",
"if",
"meta",
"[",
"'status'",
"]",
"!=",
"'SUCCEEDED'",
":",
"raise",
"Exception",
"(",
"'download \"%s\" not of status SUCCEEDED'",
"%",
"key",
")",
"else",
":",
"print",
"(",
"'Download file size: %s bytes'",
"%",
"meta",
"[",
"'size'",
"]",
")",
"url",
"=",
"'http://api.gbif.org/v1/occurrence/download/request/'",
"+",
"key",
"path",
"=",
"\"%s/%s.zip\"",
"%",
"(",
"path",
",",
"key",
")",
"gbif_GET_write",
"(",
"url",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
"print",
"(",
"\"On disk at \"",
"+",
"path",
")",
"return",
"{",
"'path'",
":",
"path",
",",
"'size'",
":",
"meta",
"[",
"'size'",
"]",
",",
"'key'",
":",
"key",
"}"
] | Get a download from GBIF.
:param key: [str] A key generated from a request, like that from ``download``
:param path: [str] Path to write zip file to. Default: ``"."``, with a ``.zip`` appended to the end.
:param **kwargs**: Further named arguments passed on to ``requests.get``
Downloads the zip file to a directory you specify on your machine.
The speed of this function is of course proportional to the size of the
file to download, and affected by your internet connection speed.
This function only downloads the file. To open and read it, see
https://github.com/BelgianBiodiversityPlatform/python-dwca-reader
Usage::
from pygbif import occurrences as occ
occ.download_get("0000066-140928181241064")
occ.download_get("0003983-140910143529206") | [
"Get",
"a",
"download",
"from",
"GBIF",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L361-L391 |
sckott/pygbif | pygbif/occurrences/download.py | GbifDownload.main_pred_type | def main_pred_type(self, value):
"""set main predicate combination type
:param value: (character) One of ``equals`` (``=``), ``and`` (``&``), ``or`` (``|``),
``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``),
``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like``
"""
if value not in operators:
value = operator_lkup.get(value)
if value:
self._main_pred_type = value
self.payload['predicate']['type'] = self._main_pred_type
else:
raise Exception("main predicate combiner not a valid operator") | python | def main_pred_type(self, value):
"""set main predicate combination type
:param value: (character) One of ``equals`` (``=``), ``and`` (``&``), ``or`` (``|``),
``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``),
``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like``
"""
if value not in operators:
value = operator_lkup.get(value)
if value:
self._main_pred_type = value
self.payload['predicate']['type'] = self._main_pred_type
else:
raise Exception("main predicate combiner not a valid operator") | [
"def",
"main_pred_type",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"not",
"in",
"operators",
":",
"value",
"=",
"operator_lkup",
".",
"get",
"(",
"value",
")",
"if",
"value",
":",
"self",
".",
"_main_pred_type",
"=",
"value",
"self",
".",
"payload",
"[",
"'predicate'",
"]",
"[",
"'type'",
"]",
"=",
"self",
".",
"_main_pred_type",
"else",
":",
"raise",
"Exception",
"(",
"\"main predicate combiner not a valid operator\"",
")"
] | set main predicate combination type
:param value: (character) One of ``equals`` (``=``), ``and`` (``&``), ``or`` (``|``),
``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``),
``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like`` | [
"set",
"main",
"predicate",
"combination",
"type"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L193-L206 |
sckott/pygbif | pygbif/occurrences/download.py | GbifDownload.add_predicate | def add_predicate(self, key, value, predicate_type='equals'):
"""
add key, value, type combination of a predicate
:param key: query KEY parameter
:param value: the value used in the predicate
:param predicate_type: the type of predicate (e.g. ``equals``)
"""
if predicate_type not in operators:
predicate_type = operator_lkup.get(predicate_type)
if predicate_type:
self.predicates.append({'type': predicate_type,
'key': key,
'value': value
})
else:
raise Exception("predicate type not a valid operator") | python | def add_predicate(self, key, value, predicate_type='equals'):
"""
add key, value, type combination of a predicate
:param key: query KEY parameter
:param value: the value used in the predicate
:param predicate_type: the type of predicate (e.g. ``equals``)
"""
if predicate_type not in operators:
predicate_type = operator_lkup.get(predicate_type)
if predicate_type:
self.predicates.append({'type': predicate_type,
'key': key,
'value': value
})
else:
raise Exception("predicate type not a valid operator") | [
"def",
"add_predicate",
"(",
"self",
",",
"key",
",",
"value",
",",
"predicate_type",
"=",
"'equals'",
")",
":",
"if",
"predicate_type",
"not",
"in",
"operators",
":",
"predicate_type",
"=",
"operator_lkup",
".",
"get",
"(",
"predicate_type",
")",
"if",
"predicate_type",
":",
"self",
".",
"predicates",
".",
"append",
"(",
"{",
"'type'",
":",
"predicate_type",
",",
"'key'",
":",
"key",
",",
"'value'",
":",
"value",
"}",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"predicate type not a valid operator\"",
")"
] | add key, value, type combination of a predicate
:param key: query KEY parameter
:param value: the value used in the predicate
:param predicate_type: the type of predicate (e.g. ``equals``) | [
"add",
"key",
"value",
"type",
"combination",
"of",
"a",
"predicate"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L208-L224 |
sckott/pygbif | pygbif/occurrences/download.py | GbifDownload._extract_values | def _extract_values(values_list):
"""extract values from either file or list
:param values_list: list or file name (str) with list of values
"""
values = []
# check if file or list of values to iterate
if isinstance(values_list, str):
with open(values_list) as ff:
reading = csv.reader(ff)
for j in reading:
values.append(j[0])
elif isinstance(values_list, list):
values = values_list
else:
raise Exception("input datatype not supported.")
return values | python | def _extract_values(values_list):
"""extract values from either file or list
:param values_list: list or file name (str) with list of values
"""
values = []
# check if file or list of values to iterate
if isinstance(values_list, str):
with open(values_list) as ff:
reading = csv.reader(ff)
for j in reading:
values.append(j[0])
elif isinstance(values_list, list):
values = values_list
else:
raise Exception("input datatype not supported.")
return values | [
"def",
"_extract_values",
"(",
"values_list",
")",
":",
"values",
"=",
"[",
"]",
"# check if file or list of values to iterate",
"if",
"isinstance",
"(",
"values_list",
",",
"str",
")",
":",
"with",
"open",
"(",
"values_list",
")",
"as",
"ff",
":",
"reading",
"=",
"csv",
".",
"reader",
"(",
"ff",
")",
"for",
"j",
"in",
"reading",
":",
"values",
".",
"append",
"(",
"j",
"[",
"0",
"]",
")",
"elif",
"isinstance",
"(",
"values_list",
",",
"list",
")",
":",
"values",
"=",
"values_list",
"else",
":",
"raise",
"Exception",
"(",
"\"input datatype not supported.\"",
")",
"return",
"values"
] | extract values from either file or list
:param values_list: list or file name (str) with list of values | [
"extract",
"values",
"from",
"either",
"file",
"or",
"list"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L227-L243 |
sckott/pygbif | pygbif/occurrences/download.py | GbifDownload.add_iterative_predicate | def add_iterative_predicate(self, key, values_list):
"""add an iterative predicate with a key and set of values
which it can be equal to in and or function.
The individual predicates are specified with the type ``equals`` and
combined with a type ``or``.
The main reason for this addition is the inability of using ``in`` as
predicate type wfor multiple taxon_key values
(cfr. http://dev.gbif.org/issues/browse/POR-2753)
:param key: API key to use for the query.
:param values_list: Filename or list containing the taxon keys to be s
searched.
"""
values = self._extract_values(values_list)
predicate = {'type': 'equals', 'key': key, 'value': None}
predicates = []
while values:
predicate['value'] = values.pop()
predicates.append(predicate.copy())
self.predicates.append({'type': 'or', 'predicates': predicates}) | python | def add_iterative_predicate(self, key, values_list):
"""add an iterative predicate with a key and set of values
which it can be equal to in and or function.
The individual predicates are specified with the type ``equals`` and
combined with a type ``or``.
The main reason for this addition is the inability of using ``in`` as
predicate type wfor multiple taxon_key values
(cfr. http://dev.gbif.org/issues/browse/POR-2753)
:param key: API key to use for the query.
:param values_list: Filename or list containing the taxon keys to be s
searched.
"""
values = self._extract_values(values_list)
predicate = {'type': 'equals', 'key': key, 'value': None}
predicates = []
while values:
predicate['value'] = values.pop()
predicates.append(predicate.copy())
self.predicates.append({'type': 'or', 'predicates': predicates}) | [
"def",
"add_iterative_predicate",
"(",
"self",
",",
"key",
",",
"values_list",
")",
":",
"values",
"=",
"self",
".",
"_extract_values",
"(",
"values_list",
")",
"predicate",
"=",
"{",
"'type'",
":",
"'equals'",
",",
"'key'",
":",
"key",
",",
"'value'",
":",
"None",
"}",
"predicates",
"=",
"[",
"]",
"while",
"values",
":",
"predicate",
"[",
"'value'",
"]",
"=",
"values",
".",
"pop",
"(",
")",
"predicates",
".",
"append",
"(",
"predicate",
".",
"copy",
"(",
")",
")",
"self",
".",
"predicates",
".",
"append",
"(",
"{",
"'type'",
":",
"'or'",
",",
"'predicates'",
":",
"predicates",
"}",
")"
] | add an iterative predicate with a key and set of values
which it can be equal to in and or function.
The individual predicates are specified with the type ``equals`` and
combined with a type ``or``.
The main reason for this addition is the inability of using ``in`` as
predicate type wfor multiple taxon_key values
(cfr. http://dev.gbif.org/issues/browse/POR-2753)
:param key: API key to use for the query.
:param values_list: Filename or list containing the taxon keys to be s
searched. | [
"add",
"an",
"iterative",
"predicate",
"with",
"a",
"key",
"and",
"set",
"of",
"values",
"which",
"it",
"can",
"be",
"equal",
"to",
"in",
"and",
"or",
"function",
".",
"The",
"individual",
"predicates",
"are",
"specified",
"with",
"the",
"type",
"equals",
"and",
"combined",
"with",
"a",
"type",
"or",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L245-L267 |
sckott/pygbif | pygbif/occurrences/get.py | get | def get(key, **kwargs):
'''
Gets details for a single, interpreted occurrence
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get(key = 1258202889)
occurrences.get(key = 1227768771)
occurrences.get(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key)
out = gbif_GET(url, {}, **kwargs)
return out | python | def get(key, **kwargs):
'''
Gets details for a single, interpreted occurrence
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get(key = 1258202889)
occurrences.get(key = 1227768771)
occurrences.get(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key)
out = gbif_GET(url, {}, **kwargs)
return out | [
"def",
"get",
"(",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/'",
"+",
"str",
"(",
"key",
")",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Gets details for a single, interpreted occurrence
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get(key = 1258202889)
occurrences.get(key = 1227768771)
occurrences.get(key = 1227769518) | [
"Gets",
"details",
"for",
"a",
"single",
"interpreted",
"occurrence"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/get.py#L3-L20 |
sckott/pygbif | pygbif/occurrences/get.py | get_verbatim | def get_verbatim(key, **kwargs):
'''
Gets a verbatim occurrence record without any interpretation
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get_verbatim(key = 1258202889)
occurrences.get_verbatim(key = 1227768771)
occurrences.get_verbatim(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key) + '/verbatim'
out = gbif_GET(url, {}, **kwargs)
return out | python | def get_verbatim(key, **kwargs):
'''
Gets a verbatim occurrence record without any interpretation
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get_verbatim(key = 1258202889)
occurrences.get_verbatim(key = 1227768771)
occurrences.get_verbatim(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key) + '/verbatim'
out = gbif_GET(url, {}, **kwargs)
return out | [
"def",
"get_verbatim",
"(",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/'",
"+",
"str",
"(",
"key",
")",
"+",
"'/verbatim'",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Gets a verbatim occurrence record without any interpretation
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get_verbatim(key = 1258202889)
occurrences.get_verbatim(key = 1227768771)
occurrences.get_verbatim(key = 1227769518) | [
"Gets",
"a",
"verbatim",
"occurrence",
"record",
"without",
"any",
"interpretation"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/get.py#L22-L39 |
sckott/pygbif | pygbif/occurrences/get.py | get_fragment | def get_fragment(key, **kwargs):
'''
Get a single occurrence fragment in its raw form (xml or json)
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get_fragment(key = 1052909293)
occurrences.get_fragment(key = 1227768771)
occurrences.get_fragment(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key) + '/fragment'
out = gbif_GET(url, {}, **kwargs)
return out | python | def get_fragment(key, **kwargs):
'''
Get a single occurrence fragment in its raw form (xml or json)
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get_fragment(key = 1052909293)
occurrences.get_fragment(key = 1227768771)
occurrences.get_fragment(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key) + '/fragment'
out = gbif_GET(url, {}, **kwargs)
return out | [
"def",
"get_fragment",
"(",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/'",
"+",
"str",
"(",
"key",
")",
"+",
"'/fragment'",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Get a single occurrence fragment in its raw form (xml or json)
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get_fragment(key = 1052909293)
occurrences.get_fragment(key = 1227768771)
occurrences.get_fragment(key = 1227769518) | [
"Get",
"a",
"single",
"occurrence",
"fragment",
"in",
"its",
"raw",
"form",
"(",
"xml",
"or",
"json",
")"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/get.py#L41-L58 |
sckott/pygbif | pygbif/species/name_backbone.py | name_backbone | def name_backbone(name, rank=None, kingdom=None, phylum=None, clazz=None,
order=None, family=None, genus=None, strict=False, verbose=False,
offset=None, limit=100, **kwargs):
'''
Lookup names in the GBIF backbone taxonomy.
:param name: [str] Full scientific name potentially with authorship (required)
:param rank: [str] The rank given as our rank enum. (optional)
:param kingdom: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param phylum: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param class: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param order: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param family: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param genus: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param strict: [bool] If True it (fuzzy) matches only the given name, but never a
taxon in the upper classification (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param offset: [int] Record to start at. Default: ``0``
:param limit: [int] Number of results to return. Default: ``100``
A list for a single taxon with many slots (with ``verbose=False`` - default), or a
list of length two, first element for the suggested taxon match, and a data.frame
with alternative name suggestions resulting from fuzzy matching (with ``verbose=True``).
If you don't get a match GBIF gives back a list of length 3 with slots synonym,
confidence, and ``matchType='NONE'``.
reference: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_backbone(name='Helianthus annuus', kingdom='plants')
species.name_backbone(name='Helianthus', rank='genus', kingdom='plants')
species.name_backbone(name='Poa', rank='genus', family='Poaceae')
# Verbose - gives back alternatives
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True)
# Strictness
species.name_backbone(name='Poa', kingdom='plants', verbose=True, strict=False)
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True, strict=True)
# Non-existent name
species.name_backbone(name='Aso')
# Multiple equal matches
species.name_backbone(name='Oenante')
'''
url = gbif_baseurl + 'species/match'
args = {'name': name, 'rank': rank, 'kingdom': kingdom, 'phylum': phylum,
'class': clazz, 'order': order, 'family': family, 'genus': genus,
'strict': strict, 'verbose': verbose, 'offset': offset, 'limit': limit}
tt = gbif_GET(url, args, **kwargs)
return tt | python | def name_backbone(name, rank=None, kingdom=None, phylum=None, clazz=None,
order=None, family=None, genus=None, strict=False, verbose=False,
offset=None, limit=100, **kwargs):
'''
Lookup names in the GBIF backbone taxonomy.
:param name: [str] Full scientific name potentially with authorship (required)
:param rank: [str] The rank given as our rank enum. (optional)
:param kingdom: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param phylum: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param class: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param order: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param family: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param genus: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param strict: [bool] If True it (fuzzy) matches only the given name, but never a
taxon in the upper classification (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param offset: [int] Record to start at. Default: ``0``
:param limit: [int] Number of results to return. Default: ``100``
A list for a single taxon with many slots (with ``verbose=False`` - default), or a
list of length two, first element for the suggested taxon match, and a data.frame
with alternative name suggestions resulting from fuzzy matching (with ``verbose=True``).
If you don't get a match GBIF gives back a list of length 3 with slots synonym,
confidence, and ``matchType='NONE'``.
reference: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_backbone(name='Helianthus annuus', kingdom='plants')
species.name_backbone(name='Helianthus', rank='genus', kingdom='plants')
species.name_backbone(name='Poa', rank='genus', family='Poaceae')
# Verbose - gives back alternatives
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True)
# Strictness
species.name_backbone(name='Poa', kingdom='plants', verbose=True, strict=False)
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True, strict=True)
# Non-existent name
species.name_backbone(name='Aso')
# Multiple equal matches
species.name_backbone(name='Oenante')
'''
url = gbif_baseurl + 'species/match'
args = {'name': name, 'rank': rank, 'kingdom': kingdom, 'phylum': phylum,
'class': clazz, 'order': order, 'family': family, 'genus': genus,
'strict': strict, 'verbose': verbose, 'offset': offset, 'limit': limit}
tt = gbif_GET(url, args, **kwargs)
return tt | [
"def",
"name_backbone",
"(",
"name",
",",
"rank",
"=",
"None",
",",
"kingdom",
"=",
"None",
",",
"phylum",
"=",
"None",
",",
"clazz",
"=",
"None",
",",
"order",
"=",
"None",
",",
"family",
"=",
"None",
",",
"genus",
"=",
"None",
",",
"strict",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"100",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'species/match'",
"args",
"=",
"{",
"'name'",
":",
"name",
",",
"'rank'",
":",
"rank",
",",
"'kingdom'",
":",
"kingdom",
",",
"'phylum'",
":",
"phylum",
",",
"'class'",
":",
"clazz",
",",
"'order'",
":",
"order",
",",
"'family'",
":",
"family",
",",
"'genus'",
":",
"genus",
",",
"'strict'",
":",
"strict",
",",
"'verbose'",
":",
"verbose",
",",
"'offset'",
":",
"offset",
",",
"'limit'",
":",
"limit",
"}",
"tt",
"=",
"gbif_GET",
"(",
"url",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"tt"
] | Lookup names in the GBIF backbone taxonomy.
:param name: [str] Full scientific name potentially with authorship (required)
:param rank: [str] The rank given as our rank enum. (optional)
:param kingdom: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param phylum: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param class: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param order: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param family: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param genus: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param strict: [bool] If True it (fuzzy) matches only the given name, but never a
taxon in the upper classification (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param offset: [int] Record to start at. Default: ``0``
:param limit: [int] Number of results to return. Default: ``100``
A list for a single taxon with many slots (with ``verbose=False`` - default), or a
list of length two, first element for the suggested taxon match, and a data.frame
with alternative name suggestions resulting from fuzzy matching (with ``verbose=True``).
If you don't get a match GBIF gives back a list of length 3 with slots synonym,
confidence, and ``matchType='NONE'``.
reference: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_backbone(name='Helianthus annuus', kingdom='plants')
species.name_backbone(name='Helianthus', rank='genus', kingdom='plants')
species.name_backbone(name='Poa', rank='genus', family='Poaceae')
# Verbose - gives back alternatives
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True)
# Strictness
species.name_backbone(name='Poa', kingdom='plants', verbose=True, strict=False)
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True, strict=True)
# Non-existent name
species.name_backbone(name='Aso')
# Multiple equal matches
species.name_backbone(name='Oenante') | [
"Lookup",
"names",
"in",
"the",
"GBIF",
"backbone",
"taxonomy",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/species/name_backbone.py#L3-L63 |
sckott/pygbif | pygbif/species/name_parser.py | name_parser | def name_parser(name, **kwargs):
'''
Parse taxon names using the GBIF name parser
:param name: [str] A character vector of scientific names. (required)
reference: http://www.gbif.org/developer/species#parser
Usage::
from pygbif import species
species.name_parser('x Agropogon littoralis')
species.name_parser(['Arrhenatherum elatius var. elatius',
'Secale cereale subsp. cereale', 'Secale cereale ssp. cereale',
'Vanessa atalanta (Linnaeus, 1758)'])
'''
url = gbif_baseurl + 'parser/name'
if name.__class__ == str:
name = [name]
return gbif_POST(url, name, **kwargs) | python | def name_parser(name, **kwargs):
'''
Parse taxon names using the GBIF name parser
:param name: [str] A character vector of scientific names. (required)
reference: http://www.gbif.org/developer/species#parser
Usage::
from pygbif import species
species.name_parser('x Agropogon littoralis')
species.name_parser(['Arrhenatherum elatius var. elatius',
'Secale cereale subsp. cereale', 'Secale cereale ssp. cereale',
'Vanessa atalanta (Linnaeus, 1758)'])
'''
url = gbif_baseurl + 'parser/name'
if name.__class__ == str:
name = [name]
return gbif_POST(url, name, **kwargs) | [
"def",
"name_parser",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'parser/name'",
"if",
"name",
".",
"__class__",
"==",
"str",
":",
"name",
"=",
"[",
"name",
"]",
"return",
"gbif_POST",
"(",
"url",
",",
"name",
",",
"*",
"*",
"kwargs",
")"
] | Parse taxon names using the GBIF name parser
:param name: [str] A character vector of scientific names. (required)
reference: http://www.gbif.org/developer/species#parser
Usage::
from pygbif import species
species.name_parser('x Agropogon littoralis')
species.name_parser(['Arrhenatherum elatius var. elatius',
'Secale cereale subsp. cereale', 'Secale cereale ssp. cereale',
'Vanessa atalanta (Linnaeus, 1758)']) | [
"Parse",
"taxon",
"names",
"using",
"the",
"GBIF",
"name",
"parser"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/species/name_parser.py#L3-L22 |
sckott/pygbif | pygbif/species/name_lookup.py | name_lookup | def name_lookup(q=None, rank=None, higherTaxonKey=None, status=None, isExtinct=None,
habitat=None, nameType=None, datasetKey=None, nomenclaturalStatus=None,
limit=100, offset=None, facet=False, facetMincount=None, facetMultiselect=None,
type=None, hl=False, verbose=False, **kwargs):
'''
Lookup names in all taxonomies in GBIF.
This service uses fuzzy lookup so that you can put in partial names and
you should get back those things that match. See examples below.
:param q: [str] Query term(s) for full text search (optional)
:param rank: [str] ``CLASS``, ``CULTIVAR``, ``CULTIVAR_GROUP``, ``DOMAIN``, ``FAMILY``,
``FORM``, ``GENUS``, ``INFORMAL``, ``INFRAGENERIC_NAME``, ``INFRAORDER``, ``INFRASPECIFIC_NAME``,
``INFRASUBSPECIFIC_NAME``, ``KINGDOM``, ``ORDER``, ``PHYLUM``, ``SECTION``, ``SERIES``, ``SPECIES``, ``STRAIN``, ``SUBCLASS``,
``SUBFAMILY``, ``SUBFORM``, ``SUBGENUS``, ``SUBKINGDOM``, ``SUBORDER``, ``SUBPHYLUM``, ``SUBSECTION``, ``SUBSERIES``,
``SUBSPECIES``, ``SUBTRIBE``, ``SUBVARIETY``, ``SUPERCLASS``, ``SUPERFAMILY``, ``SUPERORDER``, ``SUPERPHYLUM``,
``SUPRAGENERIC_NAME``, ``TRIBE``, ``UNRANKED``, ``VARIETY`` (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param higherTaxonKey: [str] Filters by any of the higher Linnean rank keys. Note this
is within the respective checklist and not searching nub keys across all checklists (optional)
:param status: [str] (optional) Filters by the taxonomic status as one of:
* ``ACCEPTED``
* ``DETERMINATION_SYNONYM`` Used for unknown child taxa referred to via spec, ssp, ...
* ``DOUBTFUL`` Treated as accepted, but doubtful whether this is correct.
* ``HETEROTYPIC_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``HOMOTYPIC_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``INTERMEDIATE_RANK_SYNONYM`` Used in nub only.
* ``MISAPPLIED`` More specific subclass of ``SYNONYM``.
* ``PROPARTE_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``SYNONYM`` A general synonym, the exact type is unknown.
:param isExtinct: [bool] Filters by extinction status (e.g. ``isExtinct=True``)
:param habitat: [str] Filters by habitat. One of: ``marine``, ``freshwater``, or
``terrestrial`` (optional)
:param nameType: [str] (optional) Filters by the name type as one of:
* ``BLACKLISTED`` surely not a scientific name.
* ``CANDIDATUS`` Candidatus is a component of the taxonomic name for a bacterium that cannot be maintained in a Bacteriology Culture Collection.
* ``CULTIVAR`` a cultivated plant name.
* ``DOUBTFUL`` doubtful whether this is a scientific name at all.
* ``HYBRID`` a hybrid formula (not a hybrid name).
* ``INFORMAL`` a scientific name with some informal addition like "cf." or indetermined like Abies spec.
* ``SCINAME`` a scientific name which is not well formed.
* ``VIRUS`` a virus name.
* ``WELLFORMED`` a well formed scientific name according to present nomenclatural rules.
:param datasetKey: [str] Filters by the dataset's key (a uuid) (optional)
:param nomenclaturalStatus: [str] Not yet implemented, but will eventually allow for
filtering by a nomenclatural status enum
:param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: ``datasetKey``, ``higherTaxonKey``, ``rank``, ``status``,
``isExtinct``, ``habitat``, and ``nameType``. Additionally ``threat`` and ``nomenclaturalStatus``
are legal values but not yet implemented, so data will not yet be returned for them. (optional)
:param facetMincount: [str] Used in combination with the facet parameter. Set
``facetMincount={#}`` to exclude facets with a count less than {#}, e.g.
http://bit.ly/1bMdByP only shows the type value ``ACCEPTED`` because the other
statuses have counts less than 7,000,000 (optional)
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
``facetMultiselect=True`` to still return counts for values that are not currently
filtered, e.g. http://bit.ly/19YLXPO still shows all status values even though
status is being filtered by ``status=ACCEPTED`` (optional)
:param type: [str] Type of name. One of ``occurrence``, ``checklist``, or ``metadata``. (optional)
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class ``gbifH1`` e.g.
``q='plant', hl=True``. Fulltext search fields include: ``title``, ``keyword``, ``country``,
``publishing country``, ``publishing organization title``, ``hosting organization title``, and
``description``. One additional full text field is searched which includes information from
metadata documents, but the text of this field is not returned in the response. (optional)
:return: A dictionary
:references: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
# Look up names like mammalia
species.name_lookup(q='mammalia')
# Paging
species.name_lookup(q='mammalia', limit=1)
species.name_lookup(q='mammalia', limit=1, offset=2)
# large requests, use offset parameter
first = species.name_lookup(q='mammalia', limit=1000)
second = species.name_lookup(q='mammalia', limit=1000, offset=1000)
# Get all data and parse it, removing descriptions which can be quite long
species.name_lookup('Helianthus annuus', rank="species", verbose=True)
# Get all data and parse it, removing descriptions field which can be quite long
out = species.name_lookup('Helianthus annuus', rank="species")
res = out['results']
[ z.pop('descriptions', None) for z in res ]
res
# Fuzzy searching
species.name_lookup(q='Heli', rank="genus")
# Limit records to certain number
species.name_lookup('Helianthus annuus', rank="species", limit=2)
# Query by habitat
species.name_lookup(habitat = "terrestrial", limit=2)
species.name_lookup(habitat = "marine", limit=2)
species.name_lookup(habitat = "freshwater", limit=2)
# Using faceting
species.name_lookup(facet='status', limit=0, facetMincount='70000')
species.name_lookup(facet=['status', 'higherTaxonKey'], limit=0, facetMincount='700000')
species.name_lookup(facet='nameType', limit=0)
species.name_lookup(facet='habitat', limit=0)
species.name_lookup(facet='datasetKey', limit=0)
species.name_lookup(facet='rank', limit=0)
species.name_lookup(facet='isExtinct', limit=0)
# text highlighting
species.name_lookup(q='plant', hl=True, limit=30)
# Lookup by datasetKey
species.name_lookup(datasetKey='3f8a1297-3259-4700-91fc-acc4170b27ce')
'''
args = {'q': q, 'rank': rank, 'higherTaxonKey': higherTaxonKey,
'status': status, 'isExtinct': isExtinct, 'habitat': habitat,
'nameType': nameType, 'datasetKey': datasetKey,
'nomenclaturalStatus': nomenclaturalStatus, 'limit': limit, 'offset': offset,
'facet': bn(facet), 'facetMincount': facetMincount, 'facetMultiselect': facetMultiselect,
'hl': bn(hl), 'verbose': bn(verbose), 'type': type}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
return gbif_GET(gbif_baseurl + 'species/search', args, **kwargs) | python | def name_lookup(q=None, rank=None, higherTaxonKey=None, status=None, isExtinct=None,
habitat=None, nameType=None, datasetKey=None, nomenclaturalStatus=None,
limit=100, offset=None, facet=False, facetMincount=None, facetMultiselect=None,
type=None, hl=False, verbose=False, **kwargs):
'''
Lookup names in all taxonomies in GBIF.
This service uses fuzzy lookup so that you can put in partial names and
you should get back those things that match. See examples below.
:param q: [str] Query term(s) for full text search (optional)
:param rank: [str] ``CLASS``, ``CULTIVAR``, ``CULTIVAR_GROUP``, ``DOMAIN``, ``FAMILY``,
``FORM``, ``GENUS``, ``INFORMAL``, ``INFRAGENERIC_NAME``, ``INFRAORDER``, ``INFRASPECIFIC_NAME``,
``INFRASUBSPECIFIC_NAME``, ``KINGDOM``, ``ORDER``, ``PHYLUM``, ``SECTION``, ``SERIES``, ``SPECIES``, ``STRAIN``, ``SUBCLASS``,
``SUBFAMILY``, ``SUBFORM``, ``SUBGENUS``, ``SUBKINGDOM``, ``SUBORDER``, ``SUBPHYLUM``, ``SUBSECTION``, ``SUBSERIES``,
``SUBSPECIES``, ``SUBTRIBE``, ``SUBVARIETY``, ``SUPERCLASS``, ``SUPERFAMILY``, ``SUPERORDER``, ``SUPERPHYLUM``,
``SUPRAGENERIC_NAME``, ``TRIBE``, ``UNRANKED``, ``VARIETY`` (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param higherTaxonKey: [str] Filters by any of the higher Linnean rank keys. Note this
is within the respective checklist and not searching nub keys across all checklists (optional)
:param status: [str] (optional) Filters by the taxonomic status as one of:
* ``ACCEPTED``
* ``DETERMINATION_SYNONYM`` Used for unknown child taxa referred to via spec, ssp, ...
* ``DOUBTFUL`` Treated as accepted, but doubtful whether this is correct.
* ``HETEROTYPIC_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``HOMOTYPIC_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``INTERMEDIATE_RANK_SYNONYM`` Used in nub only.
* ``MISAPPLIED`` More specific subclass of ``SYNONYM``.
* ``PROPARTE_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``SYNONYM`` A general synonym, the exact type is unknown.
:param isExtinct: [bool] Filters by extinction status (e.g. ``isExtinct=True``)
:param habitat: [str] Filters by habitat. One of: ``marine``, ``freshwater``, or
``terrestrial`` (optional)
:param nameType: [str] (optional) Filters by the name type as one of:
* ``BLACKLISTED`` surely not a scientific name.
* ``CANDIDATUS`` Candidatus is a component of the taxonomic name for a bacterium that cannot be maintained in a Bacteriology Culture Collection.
* ``CULTIVAR`` a cultivated plant name.
* ``DOUBTFUL`` doubtful whether this is a scientific name at all.
* ``HYBRID`` a hybrid formula (not a hybrid name).
* ``INFORMAL`` a scientific name with some informal addition like "cf." or indetermined like Abies spec.
* ``SCINAME`` a scientific name which is not well formed.
* ``VIRUS`` a virus name.
* ``WELLFORMED`` a well formed scientific name according to present nomenclatural rules.
:param datasetKey: [str] Filters by the dataset's key (a uuid) (optional)
:param nomenclaturalStatus: [str] Not yet implemented, but will eventually allow for
filtering by a nomenclatural status enum
:param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: ``datasetKey``, ``higherTaxonKey``, ``rank``, ``status``,
``isExtinct``, ``habitat``, and ``nameType``. Additionally ``threat`` and ``nomenclaturalStatus``
are legal values but not yet implemented, so data will not yet be returned for them. (optional)
:param facetMincount: [str] Used in combination with the facet parameter. Set
``facetMincount={#}`` to exclude facets with a count less than {#}, e.g.
http://bit.ly/1bMdByP only shows the type value ``ACCEPTED`` because the other
statuses have counts less than 7,000,000 (optional)
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
``facetMultiselect=True`` to still return counts for values that are not currently
filtered, e.g. http://bit.ly/19YLXPO still shows all status values even though
status is being filtered by ``status=ACCEPTED`` (optional)
:param type: [str] Type of name. One of ``occurrence``, ``checklist``, or ``metadata``. (optional)
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class ``gbifH1`` e.g.
``q='plant', hl=True``. Fulltext search fields include: ``title``, ``keyword``, ``country``,
``publishing country``, ``publishing organization title``, ``hosting organization title``, and
``description``. One additional full text field is searched which includes information from
metadata documents, but the text of this field is not returned in the response. (optional)
:return: A dictionary
:references: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
# Look up names like mammalia
species.name_lookup(q='mammalia')
# Paging
species.name_lookup(q='mammalia', limit=1)
species.name_lookup(q='mammalia', limit=1, offset=2)
# large requests, use offset parameter
first = species.name_lookup(q='mammalia', limit=1000)
second = species.name_lookup(q='mammalia', limit=1000, offset=1000)
# Get all data and parse it, removing descriptions which can be quite long
species.name_lookup('Helianthus annuus', rank="species", verbose=True)
# Get all data and parse it, removing descriptions field which can be quite long
out = species.name_lookup('Helianthus annuus', rank="species")
res = out['results']
[ z.pop('descriptions', None) for z in res ]
res
# Fuzzy searching
species.name_lookup(q='Heli', rank="genus")
# Limit records to certain number
species.name_lookup('Helianthus annuus', rank="species", limit=2)
# Query by habitat
species.name_lookup(habitat = "terrestrial", limit=2)
species.name_lookup(habitat = "marine", limit=2)
species.name_lookup(habitat = "freshwater", limit=2)
# Using faceting
species.name_lookup(facet='status', limit=0, facetMincount='70000')
species.name_lookup(facet=['status', 'higherTaxonKey'], limit=0, facetMincount='700000')
species.name_lookup(facet='nameType', limit=0)
species.name_lookup(facet='habitat', limit=0)
species.name_lookup(facet='datasetKey', limit=0)
species.name_lookup(facet='rank', limit=0)
species.name_lookup(facet='isExtinct', limit=0)
# text highlighting
species.name_lookup(q='plant', hl=True, limit=30)
# Lookup by datasetKey
species.name_lookup(datasetKey='3f8a1297-3259-4700-91fc-acc4170b27ce')
'''
args = {'q': q, 'rank': rank, 'higherTaxonKey': higherTaxonKey,
'status': status, 'isExtinct': isExtinct, 'habitat': habitat,
'nameType': nameType, 'datasetKey': datasetKey,
'nomenclaturalStatus': nomenclaturalStatus, 'limit': limit, 'offset': offset,
'facet': bn(facet), 'facetMincount': facetMincount, 'facetMultiselect': facetMultiselect,
'hl': bn(hl), 'verbose': bn(verbose), 'type': type}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
return gbif_GET(gbif_baseurl + 'species/search', args, **kwargs) | [
"def",
"name_lookup",
"(",
"q",
"=",
"None",
",",
"rank",
"=",
"None",
",",
"higherTaxonKey",
"=",
"None",
",",
"status",
"=",
"None",
",",
"isExtinct",
"=",
"None",
",",
"habitat",
"=",
"None",
",",
"nameType",
"=",
"None",
",",
"datasetKey",
"=",
"None",
",",
"nomenclaturalStatus",
"=",
"None",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"None",
",",
"facet",
"=",
"False",
",",
"facetMincount",
"=",
"None",
",",
"facetMultiselect",
"=",
"None",
",",
"type",
"=",
"None",
",",
"hl",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"{",
"'q'",
":",
"q",
",",
"'rank'",
":",
"rank",
",",
"'higherTaxonKey'",
":",
"higherTaxonKey",
",",
"'status'",
":",
"status",
",",
"'isExtinct'",
":",
"isExtinct",
",",
"'habitat'",
":",
"habitat",
",",
"'nameType'",
":",
"nameType",
",",
"'datasetKey'",
":",
"datasetKey",
",",
"'nomenclaturalStatus'",
":",
"nomenclaturalStatus",
",",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
",",
"'facet'",
":",
"bn",
"(",
"facet",
")",
",",
"'facetMincount'",
":",
"facetMincount",
",",
"'facetMultiselect'",
":",
"facetMultiselect",
",",
"'hl'",
":",
"bn",
"(",
"hl",
")",
",",
"'verbose'",
":",
"bn",
"(",
"verbose",
")",
",",
"'type'",
":",
"type",
"}",
"gbif_kwargs",
"=",
"{",
"key",
":",
"kwargs",
"[",
"key",
"]",
"for",
"key",
"in",
"kwargs",
"if",
"key",
"not",
"in",
"requests_argset",
"}",
"if",
"gbif_kwargs",
"is",
"not",
"None",
":",
"xx",
"=",
"dict",
"(",
"zip",
"(",
"[",
"re",
".",
"sub",
"(",
"'_'",
",",
"'.'",
",",
"x",
")",
"for",
"x",
"in",
"gbif_kwargs",
".",
"keys",
"(",
")",
"]",
",",
"gbif_kwargs",
".",
"values",
"(",
")",
")",
")",
"args",
".",
"update",
"(",
"xx",
")",
"kwargs",
"=",
"{",
"key",
":",
"kwargs",
"[",
"key",
"]",
"for",
"key",
"in",
"kwargs",
"if",
"key",
"in",
"requests_argset",
"}",
"return",
"gbif_GET",
"(",
"gbif_baseurl",
"+",
"'species/search'",
",",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Lookup names in all taxonomies in GBIF.
This service uses fuzzy lookup so that you can put in partial names and
you should get back those things that match. See examples below.
:param q: [str] Query term(s) for full text search (optional)
:param rank: [str] ``CLASS``, ``CULTIVAR``, ``CULTIVAR_GROUP``, ``DOMAIN``, ``FAMILY``,
``FORM``, ``GENUS``, ``INFORMAL``, ``INFRAGENERIC_NAME``, ``INFRAORDER``, ``INFRASPECIFIC_NAME``,
``INFRASUBSPECIFIC_NAME``, ``KINGDOM``, ``ORDER``, ``PHYLUM``, ``SECTION``, ``SERIES``, ``SPECIES``, ``STRAIN``, ``SUBCLASS``,
``SUBFAMILY``, ``SUBFORM``, ``SUBGENUS``, ``SUBKINGDOM``, ``SUBORDER``, ``SUBPHYLUM``, ``SUBSECTION``, ``SUBSERIES``,
``SUBSPECIES``, ``SUBTRIBE``, ``SUBVARIETY``, ``SUPERCLASS``, ``SUPERFAMILY``, ``SUPERORDER``, ``SUPERPHYLUM``,
``SUPRAGENERIC_NAME``, ``TRIBE``, ``UNRANKED``, ``VARIETY`` (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param higherTaxonKey: [str] Filters by any of the higher Linnean rank keys. Note this
is within the respective checklist and not searching nub keys across all checklists (optional)
:param status: [str] (optional) Filters by the taxonomic status as one of:
* ``ACCEPTED``
* ``DETERMINATION_SYNONYM`` Used for unknown child taxa referred to via spec, ssp, ...
* ``DOUBTFUL`` Treated as accepted, but doubtful whether this is correct.
* ``HETEROTYPIC_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``HOMOTYPIC_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``INTERMEDIATE_RANK_SYNONYM`` Used in nub only.
* ``MISAPPLIED`` More specific subclass of ``SYNONYM``.
* ``PROPARTE_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``SYNONYM`` A general synonym, the exact type is unknown.
:param isExtinct: [bool] Filters by extinction status (e.g. ``isExtinct=True``)
:param habitat: [str] Filters by habitat. One of: ``marine``, ``freshwater``, or
``terrestrial`` (optional)
:param nameType: [str] (optional) Filters by the name type as one of:
* ``BLACKLISTED`` surely not a scientific name.
* ``CANDIDATUS`` Candidatus is a component of the taxonomic name for a bacterium that cannot be maintained in a Bacteriology Culture Collection.
* ``CULTIVAR`` a cultivated plant name.
* ``DOUBTFUL`` doubtful whether this is a scientific name at all.
* ``HYBRID`` a hybrid formula (not a hybrid name).
* ``INFORMAL`` a scientific name with some informal addition like "cf." or indetermined like Abies spec.
* ``SCINAME`` a scientific name which is not well formed.
* ``VIRUS`` a virus name.
* ``WELLFORMED`` a well formed scientific name according to present nomenclatural rules.
:param datasetKey: [str] Filters by the dataset's key (a uuid) (optional)
:param nomenclaturalStatus: [str] Not yet implemented, but will eventually allow for
filtering by a nomenclatural status enum
:param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: ``datasetKey``, ``higherTaxonKey``, ``rank``, ``status``,
``isExtinct``, ``habitat``, and ``nameType``. Additionally ``threat`` and ``nomenclaturalStatus``
are legal values but not yet implemented, so data will not yet be returned for them. (optional)
:param facetMincount: [str] Used in combination with the facet parameter. Set
``facetMincount={#}`` to exclude facets with a count less than {#}, e.g.
http://bit.ly/1bMdByP only shows the type value ``ACCEPTED`` because the other
statuses have counts less than 7,000,000 (optional)
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
``facetMultiselect=True`` to still return counts for values that are not currently
filtered, e.g. http://bit.ly/19YLXPO still shows all status values even though
status is being filtered by ``status=ACCEPTED`` (optional)
:param type: [str] Type of name. One of ``occurrence``, ``checklist``, or ``metadata``. (optional)
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class ``gbifH1`` e.g.
``q='plant', hl=True``. Fulltext search fields include: ``title``, ``keyword``, ``country``,
``publishing country``, ``publishing organization title``, ``hosting organization title``, and
``description``. One additional full text field is searched which includes information from
metadata documents, but the text of this field is not returned in the response. (optional)
:return: A dictionary
:references: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
# Look up names like mammalia
species.name_lookup(q='mammalia')
# Paging
species.name_lookup(q='mammalia', limit=1)
species.name_lookup(q='mammalia', limit=1, offset=2)
# large requests, use offset parameter
first = species.name_lookup(q='mammalia', limit=1000)
second = species.name_lookup(q='mammalia', limit=1000, offset=1000)
# Get all data and parse it, removing descriptions which can be quite long
species.name_lookup('Helianthus annuus', rank="species", verbose=True)
# Get all data and parse it, removing descriptions field which can be quite long
out = species.name_lookup('Helianthus annuus', rank="species")
res = out['results']
[ z.pop('descriptions', None) for z in res ]
res
# Fuzzy searching
species.name_lookup(q='Heli', rank="genus")
# Limit records to certain number
species.name_lookup('Helianthus annuus', rank="species", limit=2)
# Query by habitat
species.name_lookup(habitat = "terrestrial", limit=2)
species.name_lookup(habitat = "marine", limit=2)
species.name_lookup(habitat = "freshwater", limit=2)
# Using faceting
species.name_lookup(facet='status', limit=0, facetMincount='70000')
species.name_lookup(facet=['status', 'higherTaxonKey'], limit=0, facetMincount='700000')
species.name_lookup(facet='nameType', limit=0)
species.name_lookup(facet='habitat', limit=0)
species.name_lookup(facet='datasetKey', limit=0)
species.name_lookup(facet='rank', limit=0)
species.name_lookup(facet='isExtinct', limit=0)
# text highlighting
species.name_lookup(q='plant', hl=True, limit=30)
# Lookup by datasetKey
species.name_lookup(datasetKey='3f8a1297-3259-4700-91fc-acc4170b27ce') | [
"Lookup",
"names",
"in",
"all",
"taxonomies",
"in",
"GBIF",
"."
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/species/name_lookup.py#L3-L141 |
sckott/pygbif | pygbif/occurrences/count.py | count | def count(taxonKey=None, basisOfRecord=None, country=None, isGeoreferenced=None,
datasetKey=None, publishingCountry=None, typeStatus=None,
issue=None, year=None, **kwargs):
'''
Returns occurrence counts for a predefined set of dimensions
:param taxonKey: [int] A GBIF occurrence identifier
:param basisOfRecord: [str] A GBIF occurrence identifier
:param country: [str] A GBIF occurrence identifier
:param isGeoreferenced: [bool] A GBIF occurrence identifier
:param datasetKey: [str] A GBIF occurrence identifier
:param publishingCountry: [str] A GBIF occurrence identifier
:param typeStatus: [str] A GBIF occurrence identifier
:param issue: [str] A GBIF occurrence identifier
:param year: [int] A GBIF occurrence identifier
:return: dict
Usage::
from pygbif import occurrences
occurrences.count(taxonKey = 3329049)
occurrences.count(country = 'CA')
occurrences.count(isGeoreferenced = True)
occurrences.count(basisOfRecord = 'OBSERVATION')
'''
url = gbif_baseurl + 'occurrence/count'
out = gbif_GET(url, {'taxonKey': taxonKey, 'basisOfRecord': basisOfRecord, 'country': country,
'isGeoreferenced': isGeoreferenced, 'datasetKey': datasetKey,
'publishingCountry': publishingCountry, 'typeStatus': typeStatus,
'issue': issue, 'year': year}, **kwargs)
return out | python | def count(taxonKey=None, basisOfRecord=None, country=None, isGeoreferenced=None,
datasetKey=None, publishingCountry=None, typeStatus=None,
issue=None, year=None, **kwargs):
'''
Returns occurrence counts for a predefined set of dimensions
:param taxonKey: [int] A GBIF occurrence identifier
:param basisOfRecord: [str] A GBIF occurrence identifier
:param country: [str] A GBIF occurrence identifier
:param isGeoreferenced: [bool] A GBIF occurrence identifier
:param datasetKey: [str] A GBIF occurrence identifier
:param publishingCountry: [str] A GBIF occurrence identifier
:param typeStatus: [str] A GBIF occurrence identifier
:param issue: [str] A GBIF occurrence identifier
:param year: [int] A GBIF occurrence identifier
:return: dict
Usage::
from pygbif import occurrences
occurrences.count(taxonKey = 3329049)
occurrences.count(country = 'CA')
occurrences.count(isGeoreferenced = True)
occurrences.count(basisOfRecord = 'OBSERVATION')
'''
url = gbif_baseurl + 'occurrence/count'
out = gbif_GET(url, {'taxonKey': taxonKey, 'basisOfRecord': basisOfRecord, 'country': country,
'isGeoreferenced': isGeoreferenced, 'datasetKey': datasetKey,
'publishingCountry': publishingCountry, 'typeStatus': typeStatus,
'issue': issue, 'year': year}, **kwargs)
return out | [
"def",
"count",
"(",
"taxonKey",
"=",
"None",
",",
"basisOfRecord",
"=",
"None",
",",
"country",
"=",
"None",
",",
"isGeoreferenced",
"=",
"None",
",",
"datasetKey",
"=",
"None",
",",
"publishingCountry",
"=",
"None",
",",
"typeStatus",
"=",
"None",
",",
"issue",
"=",
"None",
",",
"year",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/count'",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"'taxonKey'",
":",
"taxonKey",
",",
"'basisOfRecord'",
":",
"basisOfRecord",
",",
"'country'",
":",
"country",
",",
"'isGeoreferenced'",
":",
"isGeoreferenced",
",",
"'datasetKey'",
":",
"datasetKey",
",",
"'publishingCountry'",
":",
"publishingCountry",
",",
"'typeStatus'",
":",
"typeStatus",
",",
"'issue'",
":",
"issue",
",",
"'year'",
":",
"year",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Returns occurrence counts for a predefined set of dimensions
:param taxonKey: [int] A GBIF occurrence identifier
:param basisOfRecord: [str] A GBIF occurrence identifier
:param country: [str] A GBIF occurrence identifier
:param isGeoreferenced: [bool] A GBIF occurrence identifier
:param datasetKey: [str] A GBIF occurrence identifier
:param publishingCountry: [str] A GBIF occurrence identifier
:param typeStatus: [str] A GBIF occurrence identifier
:param issue: [str] A GBIF occurrence identifier
:param year: [int] A GBIF occurrence identifier
:return: dict
Usage::
from pygbif import occurrences
occurrences.count(taxonKey = 3329049)
occurrences.count(country = 'CA')
occurrences.count(isGeoreferenced = True)
occurrences.count(basisOfRecord = 'OBSERVATION') | [
"Returns",
"occurrence",
"counts",
"for",
"a",
"predefined",
"set",
"of",
"dimensions"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/count.py#L3-L34 |
sckott/pygbif | pygbif/occurrences/count.py | count_year | def count_year(year, **kwargs):
'''
Lists occurrence counts by year
:param year: [int] year range, e.g., ``1990,2000``. Does not support ranges like ``asterisk,2010``
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_year(year = '1990,2000')
'''
url = gbif_baseurl + 'occurrence/counts/year'
out = gbif_GET(url, {'year': year}, **kwargs)
return out | python | def count_year(year, **kwargs):
'''
Lists occurrence counts by year
:param year: [int] year range, e.g., ``1990,2000``. Does not support ranges like ``asterisk,2010``
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_year(year = '1990,2000')
'''
url = gbif_baseurl + 'occurrence/counts/year'
out = gbif_GET(url, {'year': year}, **kwargs)
return out | [
"def",
"count_year",
"(",
"year",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/counts/year'",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"'year'",
":",
"year",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Lists occurrence counts by year
:param year: [int] year range, e.g., ``1990,2000``. Does not support ranges like ``asterisk,2010``
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_year(year = '1990,2000') | [
"Lists",
"occurrence",
"counts",
"by",
"year"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/count.py#L51-L66 |
sckott/pygbif | pygbif/occurrences/count.py | count_datasets | def count_datasets(taxonKey = None, country = None, **kwargs):
'''
Lists occurrence counts for datasets that cover a given taxon or country
:param taxonKey: [int] Taxon key
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_datasets(country = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/datasets'
out = gbif_GET(url, {'taxonKey': taxonKey, 'country': country}, **kwargs)
return out | python | def count_datasets(taxonKey = None, country = None, **kwargs):
'''
Lists occurrence counts for datasets that cover a given taxon or country
:param taxonKey: [int] Taxon key
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_datasets(country = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/datasets'
out = gbif_GET(url, {'taxonKey': taxonKey, 'country': country}, **kwargs)
return out | [
"def",
"count_datasets",
"(",
"taxonKey",
"=",
"None",
",",
"country",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/counts/datasets'",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"'taxonKey'",
":",
"taxonKey",
",",
"'country'",
":",
"country",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Lists occurrence counts for datasets that cover a given taxon or country
:param taxonKey: [int] Taxon key
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_datasets(country = "DE") | [
"Lists",
"occurrence",
"counts",
"for",
"datasets",
"that",
"cover",
"a",
"given",
"taxon",
"or",
"country"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/count.py#L68-L84 |
sckott/pygbif | pygbif/occurrences/count.py | count_countries | def count_countries(publishingCountry, **kwargs):
'''
Lists occurrence counts for all countries covered by the data published by the given country
:param publishingCountry: [str] A two letter country code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_countries(publishingCountry = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/countries'
out = gbif_GET(url, {'publishingCountry': publishingCountry}, **kwargs)
return out | python | def count_countries(publishingCountry, **kwargs):
'''
Lists occurrence counts for all countries covered by the data published by the given country
:param publishingCountry: [str] A two letter country code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_countries(publishingCountry = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/countries'
out = gbif_GET(url, {'publishingCountry': publishingCountry}, **kwargs)
return out | [
"def",
"count_countries",
"(",
"publishingCountry",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/counts/countries'",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"'publishingCountry'",
":",
"publishingCountry",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Lists occurrence counts for all countries covered by the data published by the given country
:param publishingCountry: [str] A two letter country code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_countries(publishingCountry = "DE") | [
"Lists",
"occurrence",
"counts",
"for",
"all",
"countries",
"covered",
"by",
"the",
"data",
"published",
"by",
"the",
"given",
"country"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/count.py#L86-L101 |
sckott/pygbif | pygbif/occurrences/count.py | count_publishingcountries | def count_publishingcountries(country, **kwargs):
'''
Lists occurrence counts for all countries that publish data about the given country
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_publishingcountries(country = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/publishingCountries'
out = gbif_GET(url, {"country": country}, **kwargs)
return out | python | def count_publishingcountries(country, **kwargs):
'''
Lists occurrence counts for all countries that publish data about the given country
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_publishingcountries(country = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/publishingCountries'
out = gbif_GET(url, {"country": country}, **kwargs)
return out | [
"def",
"count_publishingcountries",
"(",
"country",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"gbif_baseurl",
"+",
"'occurrence/counts/publishingCountries'",
"out",
"=",
"gbif_GET",
"(",
"url",
",",
"{",
"\"country\"",
":",
"country",
"}",
",",
"*",
"*",
"kwargs",
")",
"return",
"out"
] | Lists occurrence counts for all countries that publish data about the given country
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_publishingcountries(country = "DE") | [
"Lists",
"occurrence",
"counts",
"for",
"all",
"countries",
"that",
"publish",
"data",
"about",
"the",
"given",
"country"
] | train | https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/count.py#L103-L118 |
jwkvam/plotlywrapper | plotlywrapper.py | _detect_notebook | def _detect_notebook() -> bool:
"""Detect if code is running in a Jupyter Notebook.
This isn't 100% correct but seems good enough
Returns
-------
bool
True if it detects this is a notebook, otherwise False.
"""
try:
from IPython import get_ipython
from ipykernel import zmqshell
except ImportError:
return False
kernel = get_ipython()
try:
from spyder.utils.ipython.spyder_kernel import SpyderKernel
if isinstance(kernel.kernel, SpyderKernel):
return False
except (ImportError, AttributeError):
pass
return isinstance(kernel, zmqshell.ZMQInteractiveShell) | python | def _detect_notebook() -> bool:
"""Detect if code is running in a Jupyter Notebook.
This isn't 100% correct but seems good enough
Returns
-------
bool
True if it detects this is a notebook, otherwise False.
"""
try:
from IPython import get_ipython
from ipykernel import zmqshell
except ImportError:
return False
kernel = get_ipython()
try:
from spyder.utils.ipython.spyder_kernel import SpyderKernel
if isinstance(kernel.kernel, SpyderKernel):
return False
except (ImportError, AttributeError):
pass
return isinstance(kernel, zmqshell.ZMQInteractiveShell) | [
"def",
"_detect_notebook",
"(",
")",
"->",
"bool",
":",
"try",
":",
"from",
"IPython",
"import",
"get_ipython",
"from",
"ipykernel",
"import",
"zmqshell",
"except",
"ImportError",
":",
"return",
"False",
"kernel",
"=",
"get_ipython",
"(",
")",
"try",
":",
"from",
"spyder",
".",
"utils",
".",
"ipython",
".",
"spyder_kernel",
"import",
"SpyderKernel",
"if",
"isinstance",
"(",
"kernel",
".",
"kernel",
",",
"SpyderKernel",
")",
":",
"return",
"False",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
":",
"pass",
"return",
"isinstance",
"(",
"kernel",
",",
"zmqshell",
".",
"ZMQInteractiveShell",
")"
] | Detect if code is running in a Jupyter Notebook.
This isn't 100% correct but seems good enough
Returns
-------
bool
True if it detects this is a notebook, otherwise False. | [
"Detect",
"if",
"code",
"is",
"running",
"in",
"a",
"Jupyter",
"Notebook",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L31-L55 |
jwkvam/plotlywrapper | plotlywrapper.py | _merge_layout | def _merge_layout(x: go.Layout, y: go.Layout) -> go.Layout:
"""Merge attributes from two layouts."""
xjson = x.to_plotly_json()
yjson = y.to_plotly_json()
if 'shapes' in yjson and 'shapes' in xjson:
xjson['shapes'] += yjson['shapes']
yjson.update(xjson)
return go.Layout(yjson) | python | def _merge_layout(x: go.Layout, y: go.Layout) -> go.Layout:
"""Merge attributes from two layouts."""
xjson = x.to_plotly_json()
yjson = y.to_plotly_json()
if 'shapes' in yjson and 'shapes' in xjson:
xjson['shapes'] += yjson['shapes']
yjson.update(xjson)
return go.Layout(yjson) | [
"def",
"_merge_layout",
"(",
"x",
":",
"go",
".",
"Layout",
",",
"y",
":",
"go",
".",
"Layout",
")",
"->",
"go",
".",
"Layout",
":",
"xjson",
"=",
"x",
".",
"to_plotly_json",
"(",
")",
"yjson",
"=",
"y",
".",
"to_plotly_json",
"(",
")",
"if",
"'shapes'",
"in",
"yjson",
"and",
"'shapes'",
"in",
"xjson",
":",
"xjson",
"[",
"'shapes'",
"]",
"+=",
"yjson",
"[",
"'shapes'",
"]",
"yjson",
".",
"update",
"(",
"xjson",
")",
"return",
"go",
".",
"Layout",
"(",
"yjson",
")"
] | Merge attributes from two layouts. | [
"Merge",
"attributes",
"from",
"two",
"layouts",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L58-L65 |
jwkvam/plotlywrapper | plotlywrapper.py | _try_pydatetime | def _try_pydatetime(x):
"""Try to convert to pandas objects to datetimes.
Plotly doesn't know how to handle them.
"""
try:
# for datetimeindex
x = [y.isoformat() for y in x.to_pydatetime()]
except AttributeError:
pass
try:
# for generic series
x = [y.isoformat() for y in x.dt.to_pydatetime()]
except AttributeError:
pass
return x | python | def _try_pydatetime(x):
"""Try to convert to pandas objects to datetimes.
Plotly doesn't know how to handle them.
"""
try:
# for datetimeindex
x = [y.isoformat() for y in x.to_pydatetime()]
except AttributeError:
pass
try:
# for generic series
x = [y.isoformat() for y in x.dt.to_pydatetime()]
except AttributeError:
pass
return x | [
"def",
"_try_pydatetime",
"(",
"x",
")",
":",
"try",
":",
"# for datetimeindex",
"x",
"=",
"[",
"y",
".",
"isoformat",
"(",
")",
"for",
"y",
"in",
"x",
".",
"to_pydatetime",
"(",
")",
"]",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"# for generic series",
"x",
"=",
"[",
"y",
".",
"isoformat",
"(",
")",
"for",
"y",
"in",
"x",
".",
"dt",
".",
"to_pydatetime",
"(",
")",
"]",
"except",
"AttributeError",
":",
"pass",
"return",
"x"
] | Try to convert to pandas objects to datetimes.
Plotly doesn't know how to handle them. | [
"Try",
"to",
"convert",
"to",
"pandas",
"objects",
"to",
"datetimes",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L68-L83 |
jwkvam/plotlywrapper | plotlywrapper.py | spark_shape | def spark_shape(points, shapes, fill=None, color='blue', width=5, yindex=0, heights=None):
"""TODO: Docstring for spark.
Parameters
----------
points : array-like
shapes : array-like
fill : array-like, optional
Returns
-------
Chart
"""
assert len(points) == len(shapes) + 1
data = [{'marker': {'color': 'white'}, 'x': [points[0], points[-1]], 'y': [yindex, yindex]}]
if fill is None:
fill = [False] * len(shapes)
if heights is None:
heights = [0.4] * len(shapes)
lays = []
for i, (shape, height) in enumerate(zip(shapes, heights)):
if shape is None:
continue
if fill[i]:
fillcolor = color
else:
fillcolor = 'white'
lays.append(
dict(
type=shape,
x0=points[i],
x1=points[i + 1],
y0=yindex - height,
y1=yindex + height,
xref='x',
yref='y',
fillcolor=fillcolor,
line=dict(color=color, width=width),
)
)
layout = dict(shapes=lays)
return Chart(data=data, layout=layout) | python | def spark_shape(points, shapes, fill=None, color='blue', width=5, yindex=0, heights=None):
"""TODO: Docstring for spark.
Parameters
----------
points : array-like
shapes : array-like
fill : array-like, optional
Returns
-------
Chart
"""
assert len(points) == len(shapes) + 1
data = [{'marker': {'color': 'white'}, 'x': [points[0], points[-1]], 'y': [yindex, yindex]}]
if fill is None:
fill = [False] * len(shapes)
if heights is None:
heights = [0.4] * len(shapes)
lays = []
for i, (shape, height) in enumerate(zip(shapes, heights)):
if shape is None:
continue
if fill[i]:
fillcolor = color
else:
fillcolor = 'white'
lays.append(
dict(
type=shape,
x0=points[i],
x1=points[i + 1],
y0=yindex - height,
y1=yindex + height,
xref='x',
yref='y',
fillcolor=fillcolor,
line=dict(color=color, width=width),
)
)
layout = dict(shapes=lays)
return Chart(data=data, layout=layout) | [
"def",
"spark_shape",
"(",
"points",
",",
"shapes",
",",
"fill",
"=",
"None",
",",
"color",
"=",
"'blue'",
",",
"width",
"=",
"5",
",",
"yindex",
"=",
"0",
",",
"heights",
"=",
"None",
")",
":",
"assert",
"len",
"(",
"points",
")",
"==",
"len",
"(",
"shapes",
")",
"+",
"1",
"data",
"=",
"[",
"{",
"'marker'",
":",
"{",
"'color'",
":",
"'white'",
"}",
",",
"'x'",
":",
"[",
"points",
"[",
"0",
"]",
",",
"points",
"[",
"-",
"1",
"]",
"]",
",",
"'y'",
":",
"[",
"yindex",
",",
"yindex",
"]",
"}",
"]",
"if",
"fill",
"is",
"None",
":",
"fill",
"=",
"[",
"False",
"]",
"*",
"len",
"(",
"shapes",
")",
"if",
"heights",
"is",
"None",
":",
"heights",
"=",
"[",
"0.4",
"]",
"*",
"len",
"(",
"shapes",
")",
"lays",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"shape",
",",
"height",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"shapes",
",",
"heights",
")",
")",
":",
"if",
"shape",
"is",
"None",
":",
"continue",
"if",
"fill",
"[",
"i",
"]",
":",
"fillcolor",
"=",
"color",
"else",
":",
"fillcolor",
"=",
"'white'",
"lays",
".",
"append",
"(",
"dict",
"(",
"type",
"=",
"shape",
",",
"x0",
"=",
"points",
"[",
"i",
"]",
",",
"x1",
"=",
"points",
"[",
"i",
"+",
"1",
"]",
",",
"y0",
"=",
"yindex",
"-",
"height",
",",
"y1",
"=",
"yindex",
"+",
"height",
",",
"xref",
"=",
"'x'",
",",
"yref",
"=",
"'y'",
",",
"fillcolor",
"=",
"fillcolor",
",",
"line",
"=",
"dict",
"(",
"color",
"=",
"color",
",",
"width",
"=",
"width",
")",
",",
")",
")",
"layout",
"=",
"dict",
"(",
"shapes",
"=",
"lays",
")",
"return",
"Chart",
"(",
"data",
"=",
"data",
",",
"layout",
"=",
"layout",
")"
] | TODO: Docstring for spark.
Parameters
----------
points : array-like
shapes : array-like
fill : array-like, optional
Returns
-------
Chart | [
"TODO",
":",
"Docstring",
"for",
"spark",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L472-L519 |
jwkvam/plotlywrapper | plotlywrapper.py | vertical | def vertical(x, ymin=0, ymax=1, color=None, width=None, dash=None, opacity=None):
"""Draws a vertical line from `ymin` to `ymax`.
Parameters
----------
xmin : int, optional
xmax : int, optional
color : str, optional
width : number, optional
Returns
-------
Chart
"""
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
layout = dict(
shapes=[dict(type='line', x0=x, x1=x, y0=ymin, y1=ymax, opacity=opacity, line=lineattr)]
)
return Chart(layout=layout) | python | def vertical(x, ymin=0, ymax=1, color=None, width=None, dash=None, opacity=None):
"""Draws a vertical line from `ymin` to `ymax`.
Parameters
----------
xmin : int, optional
xmax : int, optional
color : str, optional
width : number, optional
Returns
-------
Chart
"""
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
layout = dict(
shapes=[dict(type='line', x0=x, x1=x, y0=ymin, y1=ymax, opacity=opacity, line=lineattr)]
)
return Chart(layout=layout) | [
"def",
"vertical",
"(",
"x",
",",
"ymin",
"=",
"0",
",",
"ymax",
"=",
"1",
",",
"color",
"=",
"None",
",",
"width",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"opacity",
"=",
"None",
")",
":",
"lineattr",
"=",
"{",
"}",
"if",
"color",
":",
"lineattr",
"[",
"'color'",
"]",
"=",
"color",
"if",
"width",
":",
"lineattr",
"[",
"'width'",
"]",
"=",
"width",
"if",
"dash",
":",
"lineattr",
"[",
"'dash'",
"]",
"=",
"dash",
"layout",
"=",
"dict",
"(",
"shapes",
"=",
"[",
"dict",
"(",
"type",
"=",
"'line'",
",",
"x0",
"=",
"x",
",",
"x1",
"=",
"x",
",",
"y0",
"=",
"ymin",
",",
"y1",
"=",
"ymax",
",",
"opacity",
"=",
"opacity",
",",
"line",
"=",
"lineattr",
")",
"]",
")",
"return",
"Chart",
"(",
"layout",
"=",
"layout",
")"
] | Draws a vertical line from `ymin` to `ymax`.
Parameters
----------
xmin : int, optional
xmax : int, optional
color : str, optional
width : number, optional
Returns
-------
Chart | [
"Draws",
"a",
"vertical",
"line",
"from",
"ymin",
"to",
"ymax",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L522-L548 |
jwkvam/plotlywrapper | plotlywrapper.py | horizontal | def horizontal(y, xmin=0, xmax=1, color=None, width=None, dash=None, opacity=None):
"""Draws a horizontal line from `xmin` to `xmax`.
Parameters
----------
xmin : int, optional
xmax : int, optional
color : str, optional
width : number, optional
Returns
-------
Chart
"""
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
layout = dict(
shapes=[dict(type='line', x0=xmin, x1=xmax, y0=y, y1=y, opacity=opacity, line=lineattr)]
)
return Chart(layout=layout) | python | def horizontal(y, xmin=0, xmax=1, color=None, width=None, dash=None, opacity=None):
"""Draws a horizontal line from `xmin` to `xmax`.
Parameters
----------
xmin : int, optional
xmax : int, optional
color : str, optional
width : number, optional
Returns
-------
Chart
"""
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
layout = dict(
shapes=[dict(type='line', x0=xmin, x1=xmax, y0=y, y1=y, opacity=opacity, line=lineattr)]
)
return Chart(layout=layout) | [
"def",
"horizontal",
"(",
"y",
",",
"xmin",
"=",
"0",
",",
"xmax",
"=",
"1",
",",
"color",
"=",
"None",
",",
"width",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"opacity",
"=",
"None",
")",
":",
"lineattr",
"=",
"{",
"}",
"if",
"color",
":",
"lineattr",
"[",
"'color'",
"]",
"=",
"color",
"if",
"width",
":",
"lineattr",
"[",
"'width'",
"]",
"=",
"width",
"if",
"dash",
":",
"lineattr",
"[",
"'dash'",
"]",
"=",
"dash",
"layout",
"=",
"dict",
"(",
"shapes",
"=",
"[",
"dict",
"(",
"type",
"=",
"'line'",
",",
"x0",
"=",
"xmin",
",",
"x1",
"=",
"xmax",
",",
"y0",
"=",
"y",
",",
"y1",
"=",
"y",
",",
"opacity",
"=",
"opacity",
",",
"line",
"=",
"lineattr",
")",
"]",
")",
"return",
"Chart",
"(",
"layout",
"=",
"layout",
")"
] | Draws a horizontal line from `xmin` to `xmax`.
Parameters
----------
xmin : int, optional
xmax : int, optional
color : str, optional
width : number, optional
Returns
-------
Chart | [
"Draws",
"a",
"horizontal",
"line",
"from",
"xmin",
"to",
"xmax",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L551-L577 |
jwkvam/plotlywrapper | plotlywrapper.py | line | def line(
x=None,
y=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
mode='lines+markers',
yaxis=1,
fill=None,
text="",
markersize=6,
):
"""Draws connected dots.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : array-like, optional
Returns
-------
Chart
"""
assert x is not None or y is not None, "x or y must be something"
yn = 'y' + str(yaxis)
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
if y is None:
y = x
x = None
if x is None:
x = np.arange(len(y))
else:
x = _try_pydatetime(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
assert x.shape[0] == y.shape[0]
if y.ndim == 2:
if not hasattr(label, '__iter__'):
if label is None:
label = _labels()
else:
label = _labels(label)
data = [
go.Scatter(
x=x,
y=yy,
name=ll,
line=lineattr,
mode=mode,
text=text,
fill=fill,
opacity=opacity,
yaxis=yn,
marker=dict(size=markersize, opacity=opacity),
)
for ll, yy in zip(label, y.T)
]
else:
data = [
go.Scatter(
x=x,
y=y,
name=label,
line=lineattr,
mode=mode,
text=text,
fill=fill,
opacity=opacity,
yaxis=yn,
marker=dict(size=markersize, opacity=opacity),
)
]
if yaxis == 1:
return Chart(data=data)
return Chart(data=data, layout={'yaxis' + str(yaxis): dict(overlaying='y')}) | python | def line(
x=None,
y=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
mode='lines+markers',
yaxis=1,
fill=None,
text="",
markersize=6,
):
"""Draws connected dots.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : array-like, optional
Returns
-------
Chart
"""
assert x is not None or y is not None, "x or y must be something"
yn = 'y' + str(yaxis)
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
if y is None:
y = x
x = None
if x is None:
x = np.arange(len(y))
else:
x = _try_pydatetime(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
assert x.shape[0] == y.shape[0]
if y.ndim == 2:
if not hasattr(label, '__iter__'):
if label is None:
label = _labels()
else:
label = _labels(label)
data = [
go.Scatter(
x=x,
y=yy,
name=ll,
line=lineattr,
mode=mode,
text=text,
fill=fill,
opacity=opacity,
yaxis=yn,
marker=dict(size=markersize, opacity=opacity),
)
for ll, yy in zip(label, y.T)
]
else:
data = [
go.Scatter(
x=x,
y=y,
name=label,
line=lineattr,
mode=mode,
text=text,
fill=fill,
opacity=opacity,
yaxis=yn,
marker=dict(size=markersize, opacity=opacity),
)
]
if yaxis == 1:
return Chart(data=data)
return Chart(data=data, layout={'yaxis' + str(yaxis): dict(overlaying='y')}) | [
"def",
"line",
"(",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"label",
"=",
"None",
",",
"color",
"=",
"None",
",",
"width",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"opacity",
"=",
"None",
",",
"mode",
"=",
"'lines+markers'",
",",
"yaxis",
"=",
"1",
",",
"fill",
"=",
"None",
",",
"text",
"=",
"\"\"",
",",
"markersize",
"=",
"6",
",",
")",
":",
"assert",
"x",
"is",
"not",
"None",
"or",
"y",
"is",
"not",
"None",
",",
"\"x or y must be something\"",
"yn",
"=",
"'y'",
"+",
"str",
"(",
"yaxis",
")",
"lineattr",
"=",
"{",
"}",
"if",
"color",
":",
"lineattr",
"[",
"'color'",
"]",
"=",
"color",
"if",
"width",
":",
"lineattr",
"[",
"'width'",
"]",
"=",
"width",
"if",
"dash",
":",
"lineattr",
"[",
"'dash'",
"]",
"=",
"dash",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"x",
"x",
"=",
"None",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"y",
")",
")",
"else",
":",
"x",
"=",
"_try_pydatetime",
"(",
"x",
")",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"y",
"=",
"np",
".",
"atleast_1d",
"(",
"y",
")",
"assert",
"x",
".",
"shape",
"[",
"0",
"]",
"==",
"y",
".",
"shape",
"[",
"0",
"]",
"if",
"y",
".",
"ndim",
"==",
"2",
":",
"if",
"not",
"hasattr",
"(",
"label",
",",
"'__iter__'",
")",
":",
"if",
"label",
"is",
"None",
":",
"label",
"=",
"_labels",
"(",
")",
"else",
":",
"label",
"=",
"_labels",
"(",
"label",
")",
"data",
"=",
"[",
"go",
".",
"Scatter",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"yy",
",",
"name",
"=",
"ll",
",",
"line",
"=",
"lineattr",
",",
"mode",
"=",
"mode",
",",
"text",
"=",
"text",
",",
"fill",
"=",
"fill",
",",
"opacity",
"=",
"opacity",
",",
"yaxis",
"=",
"yn",
",",
"marker",
"=",
"dict",
"(",
"size",
"=",
"markersize",
",",
"opacity",
"=",
"opacity",
")",
",",
")",
"for",
"ll",
",",
"yy",
"in",
"zip",
"(",
"label",
",",
"y",
".",
"T",
")",
"]",
"else",
":",
"data",
"=",
"[",
"go",
".",
"Scatter",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"name",
"=",
"label",
",",
"line",
"=",
"lineattr",
",",
"mode",
"=",
"mode",
",",
"text",
"=",
"text",
",",
"fill",
"=",
"fill",
",",
"opacity",
"=",
"opacity",
",",
"yaxis",
"=",
"yn",
",",
"marker",
"=",
"dict",
"(",
"size",
"=",
"markersize",
",",
"opacity",
"=",
"opacity",
")",
",",
")",
"]",
"if",
"yaxis",
"==",
"1",
":",
"return",
"Chart",
"(",
"data",
"=",
"data",
")",
"return",
"Chart",
"(",
"data",
"=",
"data",
",",
"layout",
"=",
"{",
"'yaxis'",
"+",
"str",
"(",
"yaxis",
")",
":",
"dict",
"(",
"overlaying",
"=",
"'y'",
")",
"}",
")"
] | Draws connected dots.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : array-like, optional
Returns
-------
Chart | [
"Draws",
"connected",
"dots",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L580-L665 |
jwkvam/plotlywrapper | plotlywrapper.py | line3d | def line3d(
x, y, z, label=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers'
):
"""Create a 3d line chart."""
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
assert x.shape == y.shape
assert y.shape == z.shape
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
if y.ndim == 2:
if not hasattr(label, '__iter__'):
if label is None:
label = _labels()
else:
label = _labels(label)
data = [
go.Scatter3d(x=xx, y=yy, z=zz, name=ll, line=lineattr, mode=mode, opacity=opacity)
for ll, xx, yy, zz in zip(label, x.T, y.T, z.T)
]
else:
data = [go.Scatter3d(x=x, y=y, z=z, name=label, line=lineattr, mode=mode, opacity=opacity)]
return Chart(data=data) | python | def line3d(
x, y, z, label=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers'
):
"""Create a 3d line chart."""
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
assert x.shape == y.shape
assert y.shape == z.shape
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
if y.ndim == 2:
if not hasattr(label, '__iter__'):
if label is None:
label = _labels()
else:
label = _labels(label)
data = [
go.Scatter3d(x=xx, y=yy, z=zz, name=ll, line=lineattr, mode=mode, opacity=opacity)
for ll, xx, yy, zz in zip(label, x.T, y.T, z.T)
]
else:
data = [go.Scatter3d(x=x, y=y, z=z, name=label, line=lineattr, mode=mode, opacity=opacity)]
return Chart(data=data) | [
"def",
"line3d",
"(",
"x",
",",
"y",
",",
"z",
",",
"label",
"=",
"None",
",",
"color",
"=",
"None",
",",
"width",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"opacity",
"=",
"None",
",",
"mode",
"=",
"'lines+markers'",
")",
":",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"y",
"=",
"np",
".",
"atleast_1d",
"(",
"y",
")",
"z",
"=",
"np",
".",
"atleast_1d",
"(",
"z",
")",
"assert",
"x",
".",
"shape",
"==",
"y",
".",
"shape",
"assert",
"y",
".",
"shape",
"==",
"z",
".",
"shape",
"lineattr",
"=",
"{",
"}",
"if",
"color",
":",
"lineattr",
"[",
"'color'",
"]",
"=",
"color",
"if",
"width",
":",
"lineattr",
"[",
"'width'",
"]",
"=",
"width",
"if",
"dash",
":",
"lineattr",
"[",
"'dash'",
"]",
"=",
"dash",
"if",
"y",
".",
"ndim",
"==",
"2",
":",
"if",
"not",
"hasattr",
"(",
"label",
",",
"'__iter__'",
")",
":",
"if",
"label",
"is",
"None",
":",
"label",
"=",
"_labels",
"(",
")",
"else",
":",
"label",
"=",
"_labels",
"(",
"label",
")",
"data",
"=",
"[",
"go",
".",
"Scatter3d",
"(",
"x",
"=",
"xx",
",",
"y",
"=",
"yy",
",",
"z",
"=",
"zz",
",",
"name",
"=",
"ll",
",",
"line",
"=",
"lineattr",
",",
"mode",
"=",
"mode",
",",
"opacity",
"=",
"opacity",
")",
"for",
"ll",
",",
"xx",
",",
"yy",
",",
"zz",
"in",
"zip",
"(",
"label",
",",
"x",
".",
"T",
",",
"y",
".",
"T",
",",
"z",
".",
"T",
")",
"]",
"else",
":",
"data",
"=",
"[",
"go",
".",
"Scatter3d",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"z",
"=",
"z",
",",
"name",
"=",
"label",
",",
"line",
"=",
"lineattr",
",",
"mode",
"=",
"mode",
",",
"opacity",
"=",
"opacity",
")",
"]",
"return",
"Chart",
"(",
"data",
"=",
"data",
")"
] | Create a 3d line chart. | [
"Create",
"a",
"3d",
"line",
"chart",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L668-L696 |
jwkvam/plotlywrapper | plotlywrapper.py | scatter | def scatter(
x=None,
y=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
markersize=6,
yaxis=1,
fill=None,
text="",
mode='markers',
):
"""Draws dots.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : array-like, optional
Returns
-------
Chart
"""
return line(
x=x,
y=y,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
yaxis=yaxis,
fill=fill,
text=text,
markersize=markersize,
) | python | def scatter(
x=None,
y=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
markersize=6,
yaxis=1,
fill=None,
text="",
mode='markers',
):
"""Draws dots.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : array-like, optional
Returns
-------
Chart
"""
return line(
x=x,
y=y,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
yaxis=yaxis,
fill=fill,
text=text,
markersize=markersize,
) | [
"def",
"scatter",
"(",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"label",
"=",
"None",
",",
"color",
"=",
"None",
",",
"width",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"opacity",
"=",
"None",
",",
"markersize",
"=",
"6",
",",
"yaxis",
"=",
"1",
",",
"fill",
"=",
"None",
",",
"text",
"=",
"\"\"",
",",
"mode",
"=",
"'markers'",
",",
")",
":",
"return",
"line",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"label",
"=",
"label",
",",
"color",
"=",
"color",
",",
"width",
"=",
"width",
",",
"dash",
"=",
"dash",
",",
"opacity",
"=",
"opacity",
",",
"mode",
"=",
"mode",
",",
"yaxis",
"=",
"yaxis",
",",
"fill",
"=",
"fill",
",",
"text",
"=",
"text",
",",
"markersize",
"=",
"markersize",
",",
")"
] | Draws dots.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : array-like, optional
Returns
-------
Chart | [
"Draws",
"dots",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L746-L786 |
jwkvam/plotlywrapper | plotlywrapper.py | bar | def bar(x=None, y=None, label=None, mode='group', yaxis=1, opacity=None):
"""Create a bar chart.
Parameters
----------
x : array-like, optional
y : TODO, optional
label : TODO, optional
mode : 'group' or 'stack', default 'group'
opacity : TODO, optional
Returns
-------
Chart
A Chart with bar graph data.
"""
assert x is not None or y is not None, "x or y must be something"
yn = 'y' + str(yaxis)
if y is None:
y = x
x = None
if x is None:
x = np.arange(len(y))
else:
x = _try_pydatetime(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
if y.ndim == 2:
if not hasattr(label, '__iter__'):
if label is None:
label = _labels()
else:
label = _labels(label)
data = [go.Bar(x=x, y=yy, name=ll, yaxis=yn, opacity=opacity) for ll, yy in zip(label, y.T)]
else:
data = [go.Bar(x=x, y=y, name=label, yaxis=yn, opacity=opacity)]
if yaxis == 1:
return Chart(data=data, layout={'barmode': mode})
return Chart(data=data, layout={'barmode': mode, 'yaxis' + str(yaxis): dict(overlaying='y')}) | python | def bar(x=None, y=None, label=None, mode='group', yaxis=1, opacity=None):
"""Create a bar chart.
Parameters
----------
x : array-like, optional
y : TODO, optional
label : TODO, optional
mode : 'group' or 'stack', default 'group'
opacity : TODO, optional
Returns
-------
Chart
A Chart with bar graph data.
"""
assert x is not None or y is not None, "x or y must be something"
yn = 'y' + str(yaxis)
if y is None:
y = x
x = None
if x is None:
x = np.arange(len(y))
else:
x = _try_pydatetime(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
if y.ndim == 2:
if not hasattr(label, '__iter__'):
if label is None:
label = _labels()
else:
label = _labels(label)
data = [go.Bar(x=x, y=yy, name=ll, yaxis=yn, opacity=opacity) for ll, yy in zip(label, y.T)]
else:
data = [go.Bar(x=x, y=y, name=label, yaxis=yn, opacity=opacity)]
if yaxis == 1:
return Chart(data=data, layout={'barmode': mode})
return Chart(data=data, layout={'barmode': mode, 'yaxis' + str(yaxis): dict(overlaying='y')}) | [
"def",
"bar",
"(",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"label",
"=",
"None",
",",
"mode",
"=",
"'group'",
",",
"yaxis",
"=",
"1",
",",
"opacity",
"=",
"None",
")",
":",
"assert",
"x",
"is",
"not",
"None",
"or",
"y",
"is",
"not",
"None",
",",
"\"x or y must be something\"",
"yn",
"=",
"'y'",
"+",
"str",
"(",
"yaxis",
")",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"x",
"x",
"=",
"None",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"y",
")",
")",
"else",
":",
"x",
"=",
"_try_pydatetime",
"(",
"x",
")",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"y",
"=",
"np",
".",
"atleast_1d",
"(",
"y",
")",
"if",
"y",
".",
"ndim",
"==",
"2",
":",
"if",
"not",
"hasattr",
"(",
"label",
",",
"'__iter__'",
")",
":",
"if",
"label",
"is",
"None",
":",
"label",
"=",
"_labels",
"(",
")",
"else",
":",
"label",
"=",
"_labels",
"(",
"label",
")",
"data",
"=",
"[",
"go",
".",
"Bar",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"yy",
",",
"name",
"=",
"ll",
",",
"yaxis",
"=",
"yn",
",",
"opacity",
"=",
"opacity",
")",
"for",
"ll",
",",
"yy",
"in",
"zip",
"(",
"label",
",",
"y",
".",
"T",
")",
"]",
"else",
":",
"data",
"=",
"[",
"go",
".",
"Bar",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"name",
"=",
"label",
",",
"yaxis",
"=",
"yn",
",",
"opacity",
"=",
"opacity",
")",
"]",
"if",
"yaxis",
"==",
"1",
":",
"return",
"Chart",
"(",
"data",
"=",
"data",
",",
"layout",
"=",
"{",
"'barmode'",
":",
"mode",
"}",
")",
"return",
"Chart",
"(",
"data",
"=",
"data",
",",
"layout",
"=",
"{",
"'barmode'",
":",
"mode",
",",
"'yaxis'",
"+",
"str",
"(",
"yaxis",
")",
":",
"dict",
"(",
"overlaying",
"=",
"'y'",
")",
"}",
")"
] | Create a bar chart.
Parameters
----------
x : array-like, optional
y : TODO, optional
label : TODO, optional
mode : 'group' or 'stack', default 'group'
opacity : TODO, optional
Returns
-------
Chart
A Chart with bar graph data. | [
"Create",
"a",
"bar",
"chart",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L789-L829 |
jwkvam/plotlywrapper | plotlywrapper.py | heatmap | def heatmap(z, x=None, y=None, colorscale='Viridis'):
"""Create a heatmap.
Parameters
----------
z : TODO
x : TODO, optional
y : TODO, optional
colorscale : TODO, optional
Returns
-------
Chart
"""
z = np.atleast_1d(z)
data = [go.Heatmap(z=z, x=x, y=y, colorscale=colorscale)]
return Chart(data=data) | python | def heatmap(z, x=None, y=None, colorscale='Viridis'):
"""Create a heatmap.
Parameters
----------
z : TODO
x : TODO, optional
y : TODO, optional
colorscale : TODO, optional
Returns
-------
Chart
"""
z = np.atleast_1d(z)
data = [go.Heatmap(z=z, x=x, y=y, colorscale=colorscale)]
return Chart(data=data) | [
"def",
"heatmap",
"(",
"z",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"colorscale",
"=",
"'Viridis'",
")",
":",
"z",
"=",
"np",
".",
"atleast_1d",
"(",
"z",
")",
"data",
"=",
"[",
"go",
".",
"Heatmap",
"(",
"z",
"=",
"z",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"colorscale",
"=",
"colorscale",
")",
"]",
"return",
"Chart",
"(",
"data",
"=",
"data",
")"
] | Create a heatmap.
Parameters
----------
z : TODO
x : TODO, optional
y : TODO, optional
colorscale : TODO, optional
Returns
-------
Chart | [
"Create",
"a",
"heatmap",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L832-L850 |
jwkvam/plotlywrapper | plotlywrapper.py | fill_zero | def fill_zero(
x=None,
y=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
mode='lines+markers',
**kargs
):
"""Fill to zero.
Parameters
----------
x : array-like, optional
y : TODO, optional
label : TODO, optional
Returns
-------
Chart
"""
return line(
x=x,
y=y,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill='tozeroy',
**kargs
) | python | def fill_zero(
x=None,
y=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
mode='lines+markers',
**kargs
):
"""Fill to zero.
Parameters
----------
x : array-like, optional
y : TODO, optional
label : TODO, optional
Returns
-------
Chart
"""
return line(
x=x,
y=y,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill='tozeroy',
**kargs
) | [
"def",
"fill_zero",
"(",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"label",
"=",
"None",
",",
"color",
"=",
"None",
",",
"width",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"opacity",
"=",
"None",
",",
"mode",
"=",
"'lines+markers'",
",",
"*",
"*",
"kargs",
")",
":",
"return",
"line",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"label",
"=",
"label",
",",
"color",
"=",
"color",
",",
"width",
"=",
"width",
",",
"dash",
"=",
"dash",
",",
"opacity",
"=",
"opacity",
",",
"mode",
"=",
"mode",
",",
"fill",
"=",
"'tozeroy'",
",",
"*",
"*",
"kargs",
")"
] | Fill to zero.
Parameters
----------
x : array-like, optional
y : TODO, optional
label : TODO, optional
Returns
-------
Chart | [
"Fill",
"to",
"zero",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L853-L888 |
jwkvam/plotlywrapper | plotlywrapper.py | fill_between | def fill_between(
x=None,
ylow=None,
yhigh=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
mode='lines+markers',
**kargs
):
"""Fill between `ylow` and `yhigh`.
Parameters
----------
x : array-like, optional
ylow : TODO, optional
yhigh : TODO, optional
Returns
-------
Chart
"""
plot = line(
x=x,
y=ylow,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill=None,
**kargs
)
plot += line(
x=x,
y=yhigh,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill='tonexty',
**kargs
)
return plot | python | def fill_between(
x=None,
ylow=None,
yhigh=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
mode='lines+markers',
**kargs
):
"""Fill between `ylow` and `yhigh`.
Parameters
----------
x : array-like, optional
ylow : TODO, optional
yhigh : TODO, optional
Returns
-------
Chart
"""
plot = line(
x=x,
y=ylow,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill=None,
**kargs
)
plot += line(
x=x,
y=yhigh,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill='tonexty',
**kargs
)
return plot | [
"def",
"fill_between",
"(",
"x",
"=",
"None",
",",
"ylow",
"=",
"None",
",",
"yhigh",
"=",
"None",
",",
"label",
"=",
"None",
",",
"color",
"=",
"None",
",",
"width",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"opacity",
"=",
"None",
",",
"mode",
"=",
"'lines+markers'",
",",
"*",
"*",
"kargs",
")",
":",
"plot",
"=",
"line",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"ylow",
",",
"label",
"=",
"label",
",",
"color",
"=",
"color",
",",
"width",
"=",
"width",
",",
"dash",
"=",
"dash",
",",
"opacity",
"=",
"opacity",
",",
"mode",
"=",
"mode",
",",
"fill",
"=",
"None",
",",
"*",
"*",
"kargs",
")",
"plot",
"+=",
"line",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"yhigh",
",",
"label",
"=",
"label",
",",
"color",
"=",
"color",
",",
"width",
"=",
"width",
",",
"dash",
"=",
"dash",
",",
"opacity",
"=",
"opacity",
",",
"mode",
"=",
"mode",
",",
"fill",
"=",
"'tonexty'",
",",
"*",
"*",
"kargs",
")",
"return",
"plot"
] | Fill between `ylow` and `yhigh`.
Parameters
----------
x : array-like, optional
ylow : TODO, optional
yhigh : TODO, optional
Returns
-------
Chart | [
"Fill",
"between",
"ylow",
"and",
"yhigh",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L891-L940 |
jwkvam/plotlywrapper | plotlywrapper.py | rug | def rug(x, label=None, opacity=None):
"""Rug chart.
Parameters
----------
x : array-like, optional
label : TODO, optional
opacity : TODO, optional
Returns
-------
Chart
"""
x = _try_pydatetime(x)
x = np.atleast_1d(x)
data = [
go.Scatter(
x=x,
y=np.ones_like(x),
name=label,
opacity=opacity,
mode='markers',
marker=dict(symbol='line-ns-open'),
)
]
layout = dict(
barmode='overlay',
hovermode='closest',
legend=dict(traceorder='reversed'),
xaxis1=dict(zeroline=False),
yaxis1=dict(
domain=[0.85, 1],
showline=False,
showgrid=False,
zeroline=False,
anchor='free',
position=0.0,
showticklabels=False,
),
)
return Chart(data=data, layout=layout) | python | def rug(x, label=None, opacity=None):
"""Rug chart.
Parameters
----------
x : array-like, optional
label : TODO, optional
opacity : TODO, optional
Returns
-------
Chart
"""
x = _try_pydatetime(x)
x = np.atleast_1d(x)
data = [
go.Scatter(
x=x,
y=np.ones_like(x),
name=label,
opacity=opacity,
mode='markers',
marker=dict(symbol='line-ns-open'),
)
]
layout = dict(
barmode='overlay',
hovermode='closest',
legend=dict(traceorder='reversed'),
xaxis1=dict(zeroline=False),
yaxis1=dict(
domain=[0.85, 1],
showline=False,
showgrid=False,
zeroline=False,
anchor='free',
position=0.0,
showticklabels=False,
),
)
return Chart(data=data, layout=layout) | [
"def",
"rug",
"(",
"x",
",",
"label",
"=",
"None",
",",
"opacity",
"=",
"None",
")",
":",
"x",
"=",
"_try_pydatetime",
"(",
"x",
")",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"data",
"=",
"[",
"go",
".",
"Scatter",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"np",
".",
"ones_like",
"(",
"x",
")",
",",
"name",
"=",
"label",
",",
"opacity",
"=",
"opacity",
",",
"mode",
"=",
"'markers'",
",",
"marker",
"=",
"dict",
"(",
"symbol",
"=",
"'line-ns-open'",
")",
",",
")",
"]",
"layout",
"=",
"dict",
"(",
"barmode",
"=",
"'overlay'",
",",
"hovermode",
"=",
"'closest'",
",",
"legend",
"=",
"dict",
"(",
"traceorder",
"=",
"'reversed'",
")",
",",
"xaxis1",
"=",
"dict",
"(",
"zeroline",
"=",
"False",
")",
",",
"yaxis1",
"=",
"dict",
"(",
"domain",
"=",
"[",
"0.85",
",",
"1",
"]",
",",
"showline",
"=",
"False",
",",
"showgrid",
"=",
"False",
",",
"zeroline",
"=",
"False",
",",
"anchor",
"=",
"'free'",
",",
"position",
"=",
"0.0",
",",
"showticklabels",
"=",
"False",
",",
")",
",",
")",
"return",
"Chart",
"(",
"data",
"=",
"data",
",",
"layout",
"=",
"layout",
")"
] | Rug chart.
Parameters
----------
x : array-like, optional
label : TODO, optional
opacity : TODO, optional
Returns
-------
Chart | [
"Rug",
"chart",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L943-L984 |
jwkvam/plotlywrapper | plotlywrapper.py | surface | def surface(x, y, z):
"""Surface plot.
Parameters
----------
x : array-like, optional
y : array-like, optional
z : array-like, optional
Returns
-------
Chart
"""
data = [go.Surface(x=x, y=y, z=z)]
return Chart(data=data) | python | def surface(x, y, z):
"""Surface plot.
Parameters
----------
x : array-like, optional
y : array-like, optional
z : array-like, optional
Returns
-------
Chart
"""
data = [go.Surface(x=x, y=y, z=z)]
return Chart(data=data) | [
"def",
"surface",
"(",
"x",
",",
"y",
",",
"z",
")",
":",
"data",
"=",
"[",
"go",
".",
"Surface",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"z",
"=",
"z",
")",
"]",
"return",
"Chart",
"(",
"data",
"=",
"data",
")"
] | Surface plot.
Parameters
----------
x : array-like, optional
y : array-like, optional
z : array-like, optional
Returns
-------
Chart | [
"Surface",
"plot",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L987-L1002 |
jwkvam/plotlywrapper | plotlywrapper.py | hist | def hist(x, mode='overlay', label=None, opacity=None, horz=False, histnorm=None):
"""Histogram.
Parameters
----------
x : array-like
mode : str, optional
label : TODO, optional
opacity : float, optional
horz : bool, optional
histnorm : None, "percent", "probability", "density", "probability density", optional
Specifies the type of normalization used for this histogram trace.
If ``None``, the span of each bar corresponds to the number of occurrences
(i.e. the number of data points lying inside the bins). If "percent",
the span of each bar corresponds to the percentage of occurrences with
respect to the total number of sample points (here, the sum of all bin
area equals 100%). If "density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of the bin interval
(here, the sum of all bin area equals the total number of sample
points). If "probability density", the span of each bar corresponds to
the probability that an event will fall into the corresponding bin
(here, the sum of all bin area equals 1).
Returns
-------
Chart
"""
x = np.atleast_1d(x)
if horz:
kargs = dict(y=x)
else:
kargs = dict(x=x)
layout = dict(barmode=mode)
data = [go.Histogram(opacity=opacity, name=label, histnorm=histnorm, **kargs)]
return Chart(data=data, layout=layout) | python | def hist(x, mode='overlay', label=None, opacity=None, horz=False, histnorm=None):
"""Histogram.
Parameters
----------
x : array-like
mode : str, optional
label : TODO, optional
opacity : float, optional
horz : bool, optional
histnorm : None, "percent", "probability", "density", "probability density", optional
Specifies the type of normalization used for this histogram trace.
If ``None``, the span of each bar corresponds to the number of occurrences
(i.e. the number of data points lying inside the bins). If "percent",
the span of each bar corresponds to the percentage of occurrences with
respect to the total number of sample points (here, the sum of all bin
area equals 100%). If "density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of the bin interval
(here, the sum of all bin area equals the total number of sample
points). If "probability density", the span of each bar corresponds to
the probability that an event will fall into the corresponding bin
(here, the sum of all bin area equals 1).
Returns
-------
Chart
"""
x = np.atleast_1d(x)
if horz:
kargs = dict(y=x)
else:
kargs = dict(x=x)
layout = dict(barmode=mode)
data = [go.Histogram(opacity=opacity, name=label, histnorm=histnorm, **kargs)]
return Chart(data=data, layout=layout) | [
"def",
"hist",
"(",
"x",
",",
"mode",
"=",
"'overlay'",
",",
"label",
"=",
"None",
",",
"opacity",
"=",
"None",
",",
"horz",
"=",
"False",
",",
"histnorm",
"=",
"None",
")",
":",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"if",
"horz",
":",
"kargs",
"=",
"dict",
"(",
"y",
"=",
"x",
")",
"else",
":",
"kargs",
"=",
"dict",
"(",
"x",
"=",
"x",
")",
"layout",
"=",
"dict",
"(",
"barmode",
"=",
"mode",
")",
"data",
"=",
"[",
"go",
".",
"Histogram",
"(",
"opacity",
"=",
"opacity",
",",
"name",
"=",
"label",
",",
"histnorm",
"=",
"histnorm",
",",
"*",
"*",
"kargs",
")",
"]",
"return",
"Chart",
"(",
"data",
"=",
"data",
",",
"layout",
"=",
"layout",
")"
] | Histogram.
Parameters
----------
x : array-like
mode : str, optional
label : TODO, optional
opacity : float, optional
horz : bool, optional
histnorm : None, "percent", "probability", "density", "probability density", optional
Specifies the type of normalization used for this histogram trace.
If ``None``, the span of each bar corresponds to the number of occurrences
(i.e. the number of data points lying inside the bins). If "percent",
the span of each bar corresponds to the percentage of occurrences with
respect to the total number of sample points (here, the sum of all bin
area equals 100%). If "density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of the bin interval
(here, the sum of all bin area equals the total number of sample
points). If "probability density", the span of each bar corresponds to
the probability that an event will fall into the corresponding bin
(here, the sum of all bin area equals 1).
Returns
-------
Chart | [
"Histogram",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L1005-L1040 |
jwkvam/plotlywrapper | plotlywrapper.py | hist2d | def hist2d(x, y, label=None, opacity=None):
"""2D Histogram.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : TODO, optional
opacity : float, optional
Returns
-------
Chart
"""
x = np.atleast_1d(x)
y = np.atleast_1d(y)
data = [go.Histogram2d(x=x, y=y, opacity=opacity, name=label)]
return Chart(data=data) | python | def hist2d(x, y, label=None, opacity=None):
"""2D Histogram.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : TODO, optional
opacity : float, optional
Returns
-------
Chart
"""
x = np.atleast_1d(x)
y = np.atleast_1d(y)
data = [go.Histogram2d(x=x, y=y, opacity=opacity, name=label)]
return Chart(data=data) | [
"def",
"hist2d",
"(",
"x",
",",
"y",
",",
"label",
"=",
"None",
",",
"opacity",
"=",
"None",
")",
":",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"y",
"=",
"np",
".",
"atleast_1d",
"(",
"y",
")",
"data",
"=",
"[",
"go",
".",
"Histogram2d",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"opacity",
"=",
"opacity",
",",
"name",
"=",
"label",
")",
"]",
"return",
"Chart",
"(",
"data",
"=",
"data",
")"
] | 2D Histogram.
Parameters
----------
x : array-like, optional
y : array-like, optional
label : TODO, optional
opacity : float, optional
Returns
-------
Chart | [
"2D",
"Histogram",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L1043-L1061 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.ytickangle | def ytickangle(self, angle, index=1):
"""Set the angle of the y-axis tick labels.
Parameters
----------
value : int
Angle in degrees
index : int, optional
Y-axis index
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['tickangle'] = angle
return self | python | def ytickangle(self, angle, index=1):
"""Set the angle of the y-axis tick labels.
Parameters
----------
value : int
Angle in degrees
index : int, optional
Y-axis index
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['tickangle'] = angle
return self | [
"def",
"ytickangle",
"(",
"self",
",",
"angle",
",",
"index",
"=",
"1",
")",
":",
"self",
".",
"layout",
"[",
"'yaxis'",
"+",
"str",
"(",
"index",
")",
"]",
"[",
"'tickangle'",
"]",
"=",
"angle",
"return",
"self"
] | Set the angle of the y-axis tick labels.
Parameters
----------
value : int
Angle in degrees
index : int, optional
Y-axis index
Returns
-------
Chart | [
"Set",
"the",
"angle",
"of",
"the",
"y",
"-",
"axis",
"tick",
"labels",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L221-L237 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.ylabelsize | def ylabelsize(self, size, index=1):
"""Set the size of the label.
Parameters
----------
size : int
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['titlefont']['size'] = size
return self | python | def ylabelsize(self, size, index=1):
"""Set the size of the label.
Parameters
----------
size : int
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['titlefont']['size'] = size
return self | [
"def",
"ylabelsize",
"(",
"self",
",",
"size",
",",
"index",
"=",
"1",
")",
":",
"self",
".",
"layout",
"[",
"'yaxis'",
"+",
"str",
"(",
"index",
")",
"]",
"[",
"'titlefont'",
"]",
"[",
"'size'",
"]",
"=",
"size",
"return",
"self"
] | Set the size of the label.
Parameters
----------
size : int
Returns
-------
Chart | [
"Set",
"the",
"size",
"of",
"the",
"label",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L254-L267 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.yticksize | def yticksize(self, size, index=1):
"""Set the tick font size.
Parameters
----------
size : int
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['tickfont']['size'] = size
return self | python | def yticksize(self, size, index=1):
"""Set the tick font size.
Parameters
----------
size : int
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['tickfont']['size'] = size
return self | [
"def",
"yticksize",
"(",
"self",
",",
"size",
",",
"index",
"=",
"1",
")",
":",
"self",
".",
"layout",
"[",
"'yaxis'",
"+",
"str",
"(",
"index",
")",
"]",
"[",
"'tickfont'",
"]",
"[",
"'size'",
"]",
"=",
"size",
"return",
"self"
] | Set the tick font size.
Parameters
----------
size : int
Returns
-------
Chart | [
"Set",
"the",
"tick",
"font",
"size",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L284-L297 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.ytickvals | def ytickvals(self, values, index=1):
"""Set the tick values.
Parameters
----------
values : array-like
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['tickvals'] = values
return self | python | def ytickvals(self, values, index=1):
"""Set the tick values.
Parameters
----------
values : array-like
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['tickvals'] = values
return self | [
"def",
"ytickvals",
"(",
"self",
",",
"values",
",",
"index",
"=",
"1",
")",
":",
"self",
".",
"layout",
"[",
"'yaxis'",
"+",
"str",
"(",
"index",
")",
"]",
"[",
"'tickvals'",
"]",
"=",
"values",
"return",
"self"
] | Set the tick values.
Parameters
----------
values : array-like
Returns
-------
Chart | [
"Set",
"the",
"tick",
"values",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L299-L312 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.yticktext | def yticktext(self, labels, index=1):
"""Set the tick labels.
Parameters
----------
labels : array-like
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['ticktext'] = labels
return self | python | def yticktext(self, labels, index=1):
"""Set the tick labels.
Parameters
----------
labels : array-like
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['ticktext'] = labels
return self | [
"def",
"yticktext",
"(",
"self",
",",
"labels",
",",
"index",
"=",
"1",
")",
":",
"self",
".",
"layout",
"[",
"'yaxis'",
"+",
"str",
"(",
"index",
")",
"]",
"[",
"'ticktext'",
"]",
"=",
"labels",
"return",
"self"
] | Set the tick labels.
Parameters
----------
labels : array-like
Returns
-------
Chart | [
"Set",
"the",
"tick",
"labels",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L314-L327 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.ylim | def ylim(self, low, high, index=1):
"""Set yaxis limits.
Parameters
----------
low : number
high : number
index : int, optional
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['range'] = [low, high]
return self | python | def ylim(self, low, high, index=1):
"""Set yaxis limits.
Parameters
----------
low : number
high : number
index : int, optional
Returns
-------
Chart
"""
self.layout['yaxis' + str(index)]['range'] = [low, high]
return self | [
"def",
"ylim",
"(",
"self",
",",
"low",
",",
"high",
",",
"index",
"=",
"1",
")",
":",
"self",
".",
"layout",
"[",
"'yaxis'",
"+",
"str",
"(",
"index",
")",
"]",
"[",
"'range'",
"]",
"=",
"[",
"low",
",",
"high",
"]",
"return",
"self"
] | Set yaxis limits.
Parameters
----------
low : number
high : number
index : int, optional
Returns
-------
Chart | [
"Set",
"yaxis",
"limits",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L345-L360 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.ydtick | def ydtick(self, dtick, index=1):
"""Set the tick distance."""
self.layout['yaxis' + str(index)]['dtick'] = dtick
return self | python | def ydtick(self, dtick, index=1):
"""Set the tick distance."""
self.layout['yaxis' + str(index)]['dtick'] = dtick
return self | [
"def",
"ydtick",
"(",
"self",
",",
"dtick",
",",
"index",
"=",
"1",
")",
":",
"self",
".",
"layout",
"[",
"'yaxis'",
"+",
"str",
"(",
"index",
")",
"]",
"[",
"'dtick'",
"]",
"=",
"dtick",
"return",
"self"
] | Set the tick distance. | [
"Set",
"the",
"tick",
"distance",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L367-L370 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.ynticks | def ynticks(self, nticks, index=1):
"""Set the number of ticks."""
self.layout['yaxis' + str(index)]['nticks'] = nticks
return self | python | def ynticks(self, nticks, index=1):
"""Set the number of ticks."""
self.layout['yaxis' + str(index)]['nticks'] = nticks
return self | [
"def",
"ynticks",
"(",
"self",
",",
"nticks",
",",
"index",
"=",
"1",
")",
":",
"self",
".",
"layout",
"[",
"'yaxis'",
"+",
"str",
"(",
"index",
")",
"]",
"[",
"'nticks'",
"]",
"=",
"nticks",
"return",
"self"
] | Set the number of ticks. | [
"Set",
"the",
"number",
"of",
"ticks",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L377-L380 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.show | def show(
self,
filename: Optional[str] = None,
show_link: bool = True,
auto_open: bool = True,
detect_notebook: bool = True,
) -> None:
"""Display the chart.
Parameters
----------
filename : str, optional
Save plot to this filename, otherwise it's saved to a temporary file.
show_link : bool, optional
Show link to plotly.
auto_open : bool, optional
Automatically open the plot (in the browser).
detect_notebook : bool, optional
Try to detect if we're running in a notebook.
"""
kargs = {}
if detect_notebook and _detect_notebook():
py.init_notebook_mode()
plot = py.iplot
else:
plot = py.plot
if filename is None:
filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name
kargs['filename'] = filename
kargs['auto_open'] = auto_open
plot(self, show_link=show_link, **kargs) | python | def show(
self,
filename: Optional[str] = None,
show_link: bool = True,
auto_open: bool = True,
detect_notebook: bool = True,
) -> None:
"""Display the chart.
Parameters
----------
filename : str, optional
Save plot to this filename, otherwise it's saved to a temporary file.
show_link : bool, optional
Show link to plotly.
auto_open : bool, optional
Automatically open the plot (in the browser).
detect_notebook : bool, optional
Try to detect if we're running in a notebook.
"""
kargs = {}
if detect_notebook and _detect_notebook():
py.init_notebook_mode()
plot = py.iplot
else:
plot = py.plot
if filename is None:
filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name
kargs['filename'] = filename
kargs['auto_open'] = auto_open
plot(self, show_link=show_link, **kargs) | [
"def",
"show",
"(",
"self",
",",
"filename",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"show_link",
":",
"bool",
"=",
"True",
",",
"auto_open",
":",
"bool",
"=",
"True",
",",
"detect_notebook",
":",
"bool",
"=",
"True",
",",
")",
"->",
"None",
":",
"kargs",
"=",
"{",
"}",
"if",
"detect_notebook",
"and",
"_detect_notebook",
"(",
")",
":",
"py",
".",
"init_notebook_mode",
"(",
")",
"plot",
"=",
"py",
".",
"iplot",
"else",
":",
"plot",
"=",
"py",
".",
"plot",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"'plotly'",
",",
"suffix",
"=",
"'.html'",
",",
"delete",
"=",
"False",
")",
".",
"name",
"kargs",
"[",
"'filename'",
"]",
"=",
"filename",
"kargs",
"[",
"'auto_open'",
"]",
"=",
"auto_open",
"plot",
"(",
"self",
",",
"show_link",
"=",
"show_link",
",",
"*",
"*",
"kargs",
")"
] | Display the chart.
Parameters
----------
filename : str, optional
Save plot to this filename, otherwise it's saved to a temporary file.
show_link : bool, optional
Show link to plotly.
auto_open : bool, optional
Automatically open the plot (in the browser).
detect_notebook : bool, optional
Try to detect if we're running in a notebook. | [
"Display",
"the",
"chart",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L410-L442 |
jwkvam/plotlywrapper | plotlywrapper.py | Chart.save | def save(
self,
filename: Optional[str] = None,
show_link: bool = True,
auto_open: bool = False,
output: str = 'file',
plotlyjs: bool = True,
) -> str:
"""Save the chart to an html file."""
if filename is None:
filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name
# NOTE: this doesn't work for output 'div'
py.plot(
self,
show_link=show_link,
filename=filename,
auto_open=auto_open,
output_type=output,
include_plotlyjs=plotlyjs,
)
return filename | python | def save(
self,
filename: Optional[str] = None,
show_link: bool = True,
auto_open: bool = False,
output: str = 'file',
plotlyjs: bool = True,
) -> str:
"""Save the chart to an html file."""
if filename is None:
filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name
# NOTE: this doesn't work for output 'div'
py.plot(
self,
show_link=show_link,
filename=filename,
auto_open=auto_open,
output_type=output,
include_plotlyjs=plotlyjs,
)
return filename | [
"def",
"save",
"(",
"self",
",",
"filename",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"show_link",
":",
"bool",
"=",
"True",
",",
"auto_open",
":",
"bool",
"=",
"False",
",",
"output",
":",
"str",
"=",
"'file'",
",",
"plotlyjs",
":",
"bool",
"=",
"True",
",",
")",
"->",
"str",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"'plotly'",
",",
"suffix",
"=",
"'.html'",
",",
"delete",
"=",
"False",
")",
".",
"name",
"# NOTE: this doesn't work for output 'div'",
"py",
".",
"plot",
"(",
"self",
",",
"show_link",
"=",
"show_link",
",",
"filename",
"=",
"filename",
",",
"auto_open",
"=",
"auto_open",
",",
"output_type",
"=",
"output",
",",
"include_plotlyjs",
"=",
"plotlyjs",
",",
")",
"return",
"filename"
] | Save the chart to an html file. | [
"Save",
"the",
"chart",
"to",
"an",
"html",
"file",
"."
] | train | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L444-L464 |
cgarciae/phi | phi/builder.py | Builder.RegisterMethod | def RegisterMethod(cls, *args, **kwargs):
"""
**RegisterMethod**
RegisterMethod(f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True)
`classmethod` for registering functions as methods of this class.
**Arguments**
* **f** : the particular function being registered as a method
* **library_path** : library from where `f` comes from, unless you pass an empty string, put a period `"."` at the end of the library name.
* `alias=None` : alias for the name/method being registered
* `original_name=None` : name of the original function, used for documentation purposes.
* `doc=None` : complete documentation of the method being registered
* `wrapped=None` : if you are registering a function which wraps around another function, pass this other function through `wrapped` to get better documentation, this is specially useful is you register a bunch of functions in a for loop. Please include an `explanation` to tell how the actual function differs from the wrapped one.
* `explanation=""` : especify any additional information for the documentation of the method being registered, you can use any of the following format tags within this string and they will be replace latter on: `{original_name}`, `{name}`, `{fn_docs}`, `{library_path}`, `{builder_class}`.
* `method_type=identity` : by default its applied but does nothing, you might also want to register functions as `property`, `classmethod`, `staticmethod`
* `explain=True` : decide whether or not to show any kind of explanation, its useful to set it to `False` if you are using a `Register*` decorator and will only use the function as a registered method.
A main feature of `phi` is that it enables you to integrate your library or even an existing library with the DSL. You can achieve three levels of integration
1. Passing your functions to the DSL. This a very general machanism -since you could actually do everything with python lamdas- but in practice functions often receive multiple parameters.
2. Creating partials with the `Then*` method family. Using this you could integrate any function, but it will add a lot of noise if you use heavily on it.
3. Registering functions as methods of a `Builder` derived class. This produces the most readable code and its the approach you should take if you want to create a Phi-based library or a helper class.
While point 3 is the most desirable it has a cost: you need to create your own `phi.builder.Builder`-derived class. This is because SHOULD NOT register functions to existing builders e.g. the `phi.builder.Builder` or [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) provided by phi because that would pollute the `P` object. Instead you should create a custom class that derives from `phi.builder.Builder`, [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) or another custom builder depending on your needs and register your functions to that class.
**Examples**
Say you have a function on a library called `"my_lib"`
def some_fun(obj, arg1, arg2):
# code
You could use it with the dsl like this
from phi import P, Then
P.Pipe(
input,
...
Then(some_fun, arg1, arg2)
...
)
assuming the first parameter `obj` is being piped down. However if you do this very often or you are creating a library, you are better off creating a custom class derived from `Builder` or `PythonBuilder`
from phi import Builder #or PythonBuilder
class MyBuilder(Builder): # or PythonBuilder
pass
and registering your function as a method. The first way you could do this is by creating a wrapper function for `some_fun` and registering it as a method
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
MyBuilder.RegisterMethod(some_fun_wrapper, "my_lib.", wrapped=some_fun)
Here we basically created a shortcut for the original expression `Then(some_fun, arg1, arg2)`. You could also do this using a decorator
@MyBuilder.RegisterMethod("my_lib.", wrapped=some_fun)
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
However, this is such a common task that we've created the method `Register` to avoid you from having to create the wrapper. With it you could register the function `some_fun` directly as a method like this
MyBuilder.Register(some_fun, "my_lib.")
or by using a decorator over the original function definition
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
Once done you've done any of the previous approaches you can create a custom global object e.g. `M` and use it instead of/along with `P`
M = MyBuilder(lambda x: x)
M.Pipe(
input,
...
M.some_fun(arg1, args)
...
)
**Argument position**
`phi.builder.Builder.Register` internally uses `phi.builder.Builder.Then`, this is only useful if the object being piped is intended to be passed as the first argument of the function being registered, if this is not the case you could use `phi.builder.Builder.Register2`, `phi.builder.Builder.Register3`, ..., `phi.builder.Builder.Register5` or `phi.builder.Builder.RegisterAt` to set an arbitrary position, these functions will internally use `phi.builder.Builder.Then2`, `phi.builder.Builder.Then3`, ..., `phi.builder.Builder.Then5` or `phi.builder.Builder.ThenAt` respectively.
**Wrapping functions**
Sometimes you have an existing function that you would like to modify slightly so it plays nicely with the DSL, what you normally do is create a function that wraps around it and passes the arguments to it in a way that is convenient
import some_lib
@MyBuilder.Register("some_lib.")
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
When you do this -as a side effect- you loose the original documentation, to avoid this you can use the Registers `wrapped` argument along with the `explanation` argument to clarity the situation
import some_lib
some_fun_explanation = "However, it differs in that `n` is automatically subtracted `1`"
@MyBuilder.Register("some_lib.", wrapped=some_lib.some_fun, explanation=some_fun_explanation)
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
Now the documentation for `MyBuilder.some_fun` will be a little bit nicer since it includes the original documentation from `some_lib.some_fun`. This behaviour is specially useful if you are wrapping an entire 3rd party library, you usually automate the process iterating over all the funcitions in a for loop. The `phi.builder.Builder.PatchAt` method lets you register and entire module using a few lines of code, however, something you have to do thing more manually and do the iteration yourself.
**See Also**
* `phi.builder.Builder.PatchAt`
* `phi.builder.Builder.RegisterAt`
"""
unpack_error = True
try:
f, library_path = args
unpack_error = False
cls._RegisterMethod(f, library_path, **kwargs)
except:
if not unpack_error:
raise
def register_decorator(f):
library_path, = args
cls._RegisterMethod(f, library_path, **kwargs)
return f
return register_decorator | python | def RegisterMethod(cls, *args, **kwargs):
"""
**RegisterMethod**
RegisterMethod(f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True)
`classmethod` for registering functions as methods of this class.
**Arguments**
* **f** : the particular function being registered as a method
* **library_path** : library from where `f` comes from, unless you pass an empty string, put a period `"."` at the end of the library name.
* `alias=None` : alias for the name/method being registered
* `original_name=None` : name of the original function, used for documentation purposes.
* `doc=None` : complete documentation of the method being registered
* `wrapped=None` : if you are registering a function which wraps around another function, pass this other function through `wrapped` to get better documentation, this is specially useful is you register a bunch of functions in a for loop. Please include an `explanation` to tell how the actual function differs from the wrapped one.
* `explanation=""` : especify any additional information for the documentation of the method being registered, you can use any of the following format tags within this string and they will be replace latter on: `{original_name}`, `{name}`, `{fn_docs}`, `{library_path}`, `{builder_class}`.
* `method_type=identity` : by default its applied but does nothing, you might also want to register functions as `property`, `classmethod`, `staticmethod`
* `explain=True` : decide whether or not to show any kind of explanation, its useful to set it to `False` if you are using a `Register*` decorator and will only use the function as a registered method.
A main feature of `phi` is that it enables you to integrate your library or even an existing library with the DSL. You can achieve three levels of integration
1. Passing your functions to the DSL. This a very general machanism -since you could actually do everything with python lamdas- but in practice functions often receive multiple parameters.
2. Creating partials with the `Then*` method family. Using this you could integrate any function, but it will add a lot of noise if you use heavily on it.
3. Registering functions as methods of a `Builder` derived class. This produces the most readable code and its the approach you should take if you want to create a Phi-based library or a helper class.
While point 3 is the most desirable it has a cost: you need to create your own `phi.builder.Builder`-derived class. This is because SHOULD NOT register functions to existing builders e.g. the `phi.builder.Builder` or [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) provided by phi because that would pollute the `P` object. Instead you should create a custom class that derives from `phi.builder.Builder`, [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) or another custom builder depending on your needs and register your functions to that class.
**Examples**
Say you have a function on a library called `"my_lib"`
def some_fun(obj, arg1, arg2):
# code
You could use it with the dsl like this
from phi import P, Then
P.Pipe(
input,
...
Then(some_fun, arg1, arg2)
...
)
assuming the first parameter `obj` is being piped down. However if you do this very often or you are creating a library, you are better off creating a custom class derived from `Builder` or `PythonBuilder`
from phi import Builder #or PythonBuilder
class MyBuilder(Builder): # or PythonBuilder
pass
and registering your function as a method. The first way you could do this is by creating a wrapper function for `some_fun` and registering it as a method
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
MyBuilder.RegisterMethod(some_fun_wrapper, "my_lib.", wrapped=some_fun)
Here we basically created a shortcut for the original expression `Then(some_fun, arg1, arg2)`. You could also do this using a decorator
@MyBuilder.RegisterMethod("my_lib.", wrapped=some_fun)
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
However, this is such a common task that we've created the method `Register` to avoid you from having to create the wrapper. With it you could register the function `some_fun` directly as a method like this
MyBuilder.Register(some_fun, "my_lib.")
or by using a decorator over the original function definition
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
Once done you've done any of the previous approaches you can create a custom global object e.g. `M` and use it instead of/along with `P`
M = MyBuilder(lambda x: x)
M.Pipe(
input,
...
M.some_fun(arg1, args)
...
)
**Argument position**
`phi.builder.Builder.Register` internally uses `phi.builder.Builder.Then`, this is only useful if the object being piped is intended to be passed as the first argument of the function being registered, if this is not the case you could use `phi.builder.Builder.Register2`, `phi.builder.Builder.Register3`, ..., `phi.builder.Builder.Register5` or `phi.builder.Builder.RegisterAt` to set an arbitrary position, these functions will internally use `phi.builder.Builder.Then2`, `phi.builder.Builder.Then3`, ..., `phi.builder.Builder.Then5` or `phi.builder.Builder.ThenAt` respectively.
**Wrapping functions**
Sometimes you have an existing function that you would like to modify slightly so it plays nicely with the DSL, what you normally do is create a function that wraps around it and passes the arguments to it in a way that is convenient
import some_lib
@MyBuilder.Register("some_lib.")
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
When you do this -as a side effect- you loose the original documentation, to avoid this you can use the Registers `wrapped` argument along with the `explanation` argument to clarity the situation
import some_lib
some_fun_explanation = "However, it differs in that `n` is automatically subtracted `1`"
@MyBuilder.Register("some_lib.", wrapped=some_lib.some_fun, explanation=some_fun_explanation)
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
Now the documentation for `MyBuilder.some_fun` will be a little bit nicer since it includes the original documentation from `some_lib.some_fun`. This behaviour is specially useful if you are wrapping an entire 3rd party library, you usually automate the process iterating over all the funcitions in a for loop. The `phi.builder.Builder.PatchAt` method lets you register and entire module using a few lines of code, however, something you have to do thing more manually and do the iteration yourself.
**See Also**
* `phi.builder.Builder.PatchAt`
* `phi.builder.Builder.RegisterAt`
"""
unpack_error = True
try:
f, library_path = args
unpack_error = False
cls._RegisterMethod(f, library_path, **kwargs)
except:
if not unpack_error:
raise
def register_decorator(f):
library_path, = args
cls._RegisterMethod(f, library_path, **kwargs)
return f
return register_decorator | [
"def",
"RegisterMethod",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"unpack_error",
"=",
"True",
"try",
":",
"f",
",",
"library_path",
"=",
"args",
"unpack_error",
"=",
"False",
"cls",
".",
"_RegisterMethod",
"(",
"f",
",",
"library_path",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"if",
"not",
"unpack_error",
":",
"raise",
"def",
"register_decorator",
"(",
"f",
")",
":",
"library_path",
",",
"=",
"args",
"cls",
".",
"_RegisterMethod",
"(",
"f",
",",
"library_path",
",",
"*",
"*",
"kwargs",
")",
"return",
"f",
"return",
"register_decorator"
] | **RegisterMethod**
RegisterMethod(f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True)
`classmethod` for registering functions as methods of this class.
**Arguments**
* **f** : the particular function being registered as a method
* **library_path** : library from where `f` comes from, unless you pass an empty string, put a period `"."` at the end of the library name.
* `alias=None` : alias for the name/method being registered
* `original_name=None` : name of the original function, used for documentation purposes.
* `doc=None` : complete documentation of the method being registered
* `wrapped=None` : if you are registering a function which wraps around another function, pass this other function through `wrapped` to get better documentation, this is specially useful is you register a bunch of functions in a for loop. Please include an `explanation` to tell how the actual function differs from the wrapped one.
* `explanation=""` : especify any additional information for the documentation of the method being registered, you can use any of the following format tags within this string and they will be replace latter on: `{original_name}`, `{name}`, `{fn_docs}`, `{library_path}`, `{builder_class}`.
* `method_type=identity` : by default its applied but does nothing, you might also want to register functions as `property`, `classmethod`, `staticmethod`
* `explain=True` : decide whether or not to show any kind of explanation, its useful to set it to `False` if you are using a `Register*` decorator and will only use the function as a registered method.
A main feature of `phi` is that it enables you to integrate your library or even an existing library with the DSL. You can achieve three levels of integration
1. Passing your functions to the DSL. This a very general machanism -since you could actually do everything with python lamdas- but in practice functions often receive multiple parameters.
2. Creating partials with the `Then*` method family. Using this you could integrate any function, but it will add a lot of noise if you use heavily on it.
3. Registering functions as methods of a `Builder` derived class. This produces the most readable code and its the approach you should take if you want to create a Phi-based library or a helper class.
While point 3 is the most desirable it has a cost: you need to create your own `phi.builder.Builder`-derived class. This is because SHOULD NOT register functions to existing builders e.g. the `phi.builder.Builder` or [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) provided by phi because that would pollute the `P` object. Instead you should create a custom class that derives from `phi.builder.Builder`, [PythonBuilder](https://cgarciae.github.io/phi/builder.m.html#phi.python_builder.PythonBuilder) or another custom builder depending on your needs and register your functions to that class.
**Examples**
Say you have a function on a library called `"my_lib"`
def some_fun(obj, arg1, arg2):
# code
You could use it with the dsl like this
from phi import P, Then
P.Pipe(
input,
...
Then(some_fun, arg1, arg2)
...
)
assuming the first parameter `obj` is being piped down. However if you do this very often or you are creating a library, you are better off creating a custom class derived from `Builder` or `PythonBuilder`
from phi import Builder #or PythonBuilder
class MyBuilder(Builder): # or PythonBuilder
pass
and registering your function as a method. The first way you could do this is by creating a wrapper function for `some_fun` and registering it as a method
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
MyBuilder.RegisterMethod(some_fun_wrapper, "my_lib.", wrapped=some_fun)
Here we basically created a shortcut for the original expression `Then(some_fun, arg1, arg2)`. You could also do this using a decorator
@MyBuilder.RegisterMethod("my_lib.", wrapped=some_fun)
def some_fun_wrapper(self, arg1, arg2):
return self.Then(some_fun, arg1, arg2)
However, this is such a common task that we've created the method `Register` to avoid you from having to create the wrapper. With it you could register the function `some_fun` directly as a method like this
MyBuilder.Register(some_fun, "my_lib.")
or by using a decorator over the original function definition
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
Once done you've done any of the previous approaches you can create a custom global object e.g. `M` and use it instead of/along with `P`
M = MyBuilder(lambda x: x)
M.Pipe(
input,
...
M.some_fun(arg1, args)
...
)
**Argument position**
`phi.builder.Builder.Register` internally uses `phi.builder.Builder.Then`, this is only useful if the object being piped is intended to be passed as the first argument of the function being registered, if this is not the case you could use `phi.builder.Builder.Register2`, `phi.builder.Builder.Register3`, ..., `phi.builder.Builder.Register5` or `phi.builder.Builder.RegisterAt` to set an arbitrary position, these functions will internally use `phi.builder.Builder.Then2`, `phi.builder.Builder.Then3`, ..., `phi.builder.Builder.Then5` or `phi.builder.Builder.ThenAt` respectively.
**Wrapping functions**
Sometimes you have an existing function that you would like to modify slightly so it plays nicely with the DSL, what you normally do is create a function that wraps around it and passes the arguments to it in a way that is convenient
import some_lib
@MyBuilder.Register("some_lib.")
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
When you do this -as a side effect- you loose the original documentation, to avoid this you can use the Registers `wrapped` argument along with the `explanation` argument to clarity the situation
import some_lib
some_fun_explanation = "However, it differs in that `n` is automatically subtracted `1`"
@MyBuilder.Register("some_lib.", wrapped=some_lib.some_fun, explanation=some_fun_explanation)
def some_fun(a, n):
return some_lib.some_fun(a, n - 1) # forward the args, n slightly modified
Now the documentation for `MyBuilder.some_fun` will be a little bit nicer since it includes the original documentation from `some_lib.some_fun`. This behaviour is specially useful if you are wrapping an entire 3rd party library, you usually automate the process iterating over all the funcitions in a for loop. The `phi.builder.Builder.PatchAt` method lets you register and entire module using a few lines of code, however, something you have to do thing more manually and do the iteration yourself.
**See Also**
* `phi.builder.Builder.PatchAt`
* `phi.builder.Builder.RegisterAt` | [
"**",
"RegisterMethod",
"**"
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/builder.py#L71-L205 |
cgarciae/phi | phi/builder.py | Builder.RegisterAt | def RegisterAt(cls, *args, **kwargs):
"""
**RegisterAt**
RegisterAt(n, f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None)
Most of the time you don't want to register an method as such, that is, you don't care about the `self` builder object, instead you want to register a function that transforms the value being piped down the DSL. For this you can use `RegisterAt` so e.g.
def some_fun(obj, arg1, arg2):
# code
@MyBuilder.RegisterMethod("my_lib.")
def some_fun_wrapper(self, arg1, arg2):
return self.ThenAt(1, some_fun, arg1, arg2)
can be written directly as
@MyBuilder.RegisterAt(1, "my_lib.")
def some_fun(obj, arg1, arg2):
# code
For this case you can just use `Register` which is a shortcut for `RegisterAt(1, ...)`
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
**Also See**
* `phi.builder.Builder.RegisterMethod`
"""
unpack_error = True
try:
n, f, library_path = args
unpack_error = False
cls._RegisterAt(n, f, library_path, **kwargs)
except:
if not unpack_error:
raise
def register_decorator(f):
n, library_path = args
cls._RegisterAt(n, f, library_path, **kwargs)
return f
return register_decorator | python | def RegisterAt(cls, *args, **kwargs):
"""
**RegisterAt**
RegisterAt(n, f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None)
Most of the time you don't want to register an method as such, that is, you don't care about the `self` builder object, instead you want to register a function that transforms the value being piped down the DSL. For this you can use `RegisterAt` so e.g.
def some_fun(obj, arg1, arg2):
# code
@MyBuilder.RegisterMethod("my_lib.")
def some_fun_wrapper(self, arg1, arg2):
return self.ThenAt(1, some_fun, arg1, arg2)
can be written directly as
@MyBuilder.RegisterAt(1, "my_lib.")
def some_fun(obj, arg1, arg2):
# code
For this case you can just use `Register` which is a shortcut for `RegisterAt(1, ...)`
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
**Also See**
* `phi.builder.Builder.RegisterMethod`
"""
unpack_error = True
try:
n, f, library_path = args
unpack_error = False
cls._RegisterAt(n, f, library_path, **kwargs)
except:
if not unpack_error:
raise
def register_decorator(f):
n, library_path = args
cls._RegisterAt(n, f, library_path, **kwargs)
return f
return register_decorator | [
"def",
"RegisterAt",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"unpack_error",
"=",
"True",
"try",
":",
"n",
",",
"f",
",",
"library_path",
"=",
"args",
"unpack_error",
"=",
"False",
"cls",
".",
"_RegisterAt",
"(",
"n",
",",
"f",
",",
"library_path",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"if",
"not",
"unpack_error",
":",
"raise",
"def",
"register_decorator",
"(",
"f",
")",
":",
"n",
",",
"library_path",
"=",
"args",
"cls",
".",
"_RegisterAt",
"(",
"n",
",",
"f",
",",
"library_path",
",",
"*",
"*",
"kwargs",
")",
"return",
"f",
"return",
"register_decorator"
] | **RegisterAt**
RegisterAt(n, f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None)
Most of the time you don't want to register an method as such, that is, you don't care about the `self` builder object, instead you want to register a function that transforms the value being piped down the DSL. For this you can use `RegisterAt` so e.g.
def some_fun(obj, arg1, arg2):
# code
@MyBuilder.RegisterMethod("my_lib.")
def some_fun_wrapper(self, arg1, arg2):
return self.ThenAt(1, some_fun, arg1, arg2)
can be written directly as
@MyBuilder.RegisterAt(1, "my_lib.")
def some_fun(obj, arg1, arg2):
# code
For this case you can just use `Register` which is a shortcut for `RegisterAt(1, ...)`
@MyBuilder.Register("my_lib.")
def some_fun(obj, arg1, arg2):
# code
**Also See**
* `phi.builder.Builder.RegisterMethod` | [
"**",
"RegisterAt",
"**"
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/builder.py#L238-L285 |
cgarciae/phi | phi/builder.py | Builder.PatchAt | def PatchAt(cls, n, module, method_wrapper=None, module_alias=None, method_name_modifier=utils.identity, blacklist_predicate=_False, whitelist_predicate=_True, return_type_predicate=_None, getmembers_predicate=inspect.isfunction, admit_private=False, explanation=""):
"""
This classmethod lets you easily patch all of functions/callables from a module or class as methods a Builder class.
**Arguments**
* **n** : the position the the object being piped will take in the arguments when the function being patched is applied. See `RegisterMethod` and `ThenAt`.
* **module** : a module or class from which the functions/methods/callables will be taken.
* `module_alias = None` : an optional alias for the module used for documentation purposes.
* `method_name_modifier = lambda f_name: None` : a function that can modify the name of the method will take. If `None` the name of the function will be used.
* `blacklist_predicate = lambda f_name: name[0] != "_"` : A predicate that determines which functions are banned given their name. By default it excludes all function whose name start with `'_'`. `blacklist_predicate` can also be of type list, in which case all names contained in this list will be banned.
* `whitelist_predicate = lambda f_name: True` : A predicate that determines which functions are admitted given their name. By default it include any function. `whitelist_predicate` can also be of type list, in which case only names contained in this list will be admitted. You can use both `blacklist_predicate` and `whitelist_predicate` at the same time.
* `return_type_predicate = lambda f_name: None` : a predicate that determines the `_return_type` of the Builder. By default it will always return `None`. See `phi.builder.Builder.ThenAt`.
* `getmembers_predicate = inspect.isfunction` : a predicate that determines what type of elements/members will be fetched by the `inspect` module, defaults to [inspect.isfunction](https://docs.python.org/2/library/inspect.html#inspect.isfunction). See [getmembers](https://docs.python.org/2/library/inspect.html#inspect.getmembers).
**Examples**
Lets patch ALL the main functions from numpy into a custom builder!
from phi import PythonBuilder #or Builder
import numpy as np
class NumpyBuilder(PythonBuilder): #or Builder
"A Builder for numpy functions!"
pass
NumpyBuilder.PatchAt(1, np)
N = NumpyBuilder(lambda x: x)
Thats it! Although a serious patch would involve filtering out functions that don't take arrays. Another common task would be to use `NumpyBuilder.PatchAt(2, ...)` (`PatchAt(n, ..)` in general) when convenient to send the object being pipe to the relevant argument of the function. The previous is usually done with and a combination of `whitelist_predicate`s and `blacklist_predicate`s on `PatchAt(1, ...)` and `PatchAt(2, ...)` to filter or include the approriate functions on each kind of patch. Given the previous code we could now do
import numpy as np
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
z = N.Pipe(
x, N
.dot(y)
.add(x)
.transpose()
.sum(axis=1)
)
Which is strictly equivalent to
import numpy as np
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
z = np.dot(x, y)
z = np.add(z, x)
z = np.transpose(z)
z = np.sum(z, axis=1)
The thing to notice is that with the `NumpyBuilder` we avoid the repetitive and needless passing and reassigment of the `z` variable, this removes a lot of noise from our code.
"""
_rtp = return_type_predicate
return_type_predicate = (lambda x: _rtp) if inspect.isclass(_rtp) and issubclass(_rtp, Builder) else _rtp
module_name = module_alias if module_alias else module.__name__ + '.'
patch_members = _get_patch_members(module, blacklist_predicate=blacklist_predicate, whitelist_predicate=whitelist_predicate, getmembers_predicate=getmembers_predicate, admit_private=admit_private)
for name, f in patch_members:
wrapped = None
if method_wrapper:
g = method_wrapper(f)
wrapped = f
else:
g = f
cls.RegisterAt(n, g, module_name, wrapped=wrapped, _return_type=return_type_predicate(name), alias=method_name_modifier(name), explanation=explanation) | python | def PatchAt(cls, n, module, method_wrapper=None, module_alias=None, method_name_modifier=utils.identity, blacklist_predicate=_False, whitelist_predicate=_True, return_type_predicate=_None, getmembers_predicate=inspect.isfunction, admit_private=False, explanation=""):
"""
This classmethod lets you easily patch all of functions/callables from a module or class as methods a Builder class.
**Arguments**
* **n** : the position the the object being piped will take in the arguments when the function being patched is applied. See `RegisterMethod` and `ThenAt`.
* **module** : a module or class from which the functions/methods/callables will be taken.
* `module_alias = None` : an optional alias for the module used for documentation purposes.
* `method_name_modifier = lambda f_name: None` : a function that can modify the name of the method will take. If `None` the name of the function will be used.
* `blacklist_predicate = lambda f_name: name[0] != "_"` : A predicate that determines which functions are banned given their name. By default it excludes all function whose name start with `'_'`. `blacklist_predicate` can also be of type list, in which case all names contained in this list will be banned.
* `whitelist_predicate = lambda f_name: True` : A predicate that determines which functions are admitted given their name. By default it include any function. `whitelist_predicate` can also be of type list, in which case only names contained in this list will be admitted. You can use both `blacklist_predicate` and `whitelist_predicate` at the same time.
* `return_type_predicate = lambda f_name: None` : a predicate that determines the `_return_type` of the Builder. By default it will always return `None`. See `phi.builder.Builder.ThenAt`.
* `getmembers_predicate = inspect.isfunction` : a predicate that determines what type of elements/members will be fetched by the `inspect` module, defaults to [inspect.isfunction](https://docs.python.org/2/library/inspect.html#inspect.isfunction). See [getmembers](https://docs.python.org/2/library/inspect.html#inspect.getmembers).
**Examples**
Lets patch ALL the main functions from numpy into a custom builder!
from phi import PythonBuilder #or Builder
import numpy as np
class NumpyBuilder(PythonBuilder): #or Builder
"A Builder for numpy functions!"
pass
NumpyBuilder.PatchAt(1, np)
N = NumpyBuilder(lambda x: x)
Thats it! Although a serious patch would involve filtering out functions that don't take arrays. Another common task would be to use `NumpyBuilder.PatchAt(2, ...)` (`PatchAt(n, ..)` in general) when convenient to send the object being pipe to the relevant argument of the function. The previous is usually done with and a combination of `whitelist_predicate`s and `blacklist_predicate`s on `PatchAt(1, ...)` and `PatchAt(2, ...)` to filter or include the approriate functions on each kind of patch. Given the previous code we could now do
import numpy as np
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
z = N.Pipe(
x, N
.dot(y)
.add(x)
.transpose()
.sum(axis=1)
)
Which is strictly equivalent to
import numpy as np
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
z = np.dot(x, y)
z = np.add(z, x)
z = np.transpose(z)
z = np.sum(z, axis=1)
The thing to notice is that with the `NumpyBuilder` we avoid the repetitive and needless passing and reassigment of the `z` variable, this removes a lot of noise from our code.
"""
_rtp = return_type_predicate
return_type_predicate = (lambda x: _rtp) if inspect.isclass(_rtp) and issubclass(_rtp, Builder) else _rtp
module_name = module_alias if module_alias else module.__name__ + '.'
patch_members = _get_patch_members(module, blacklist_predicate=blacklist_predicate, whitelist_predicate=whitelist_predicate, getmembers_predicate=getmembers_predicate, admit_private=admit_private)
for name, f in patch_members:
wrapped = None
if method_wrapper:
g = method_wrapper(f)
wrapped = f
else:
g = f
cls.RegisterAt(n, g, module_name, wrapped=wrapped, _return_type=return_type_predicate(name), alias=method_name_modifier(name), explanation=explanation) | [
"def",
"PatchAt",
"(",
"cls",
",",
"n",
",",
"module",
",",
"method_wrapper",
"=",
"None",
",",
"module_alias",
"=",
"None",
",",
"method_name_modifier",
"=",
"utils",
".",
"identity",
",",
"blacklist_predicate",
"=",
"_False",
",",
"whitelist_predicate",
"=",
"_True",
",",
"return_type_predicate",
"=",
"_None",
",",
"getmembers_predicate",
"=",
"inspect",
".",
"isfunction",
",",
"admit_private",
"=",
"False",
",",
"explanation",
"=",
"\"\"",
")",
":",
"_rtp",
"=",
"return_type_predicate",
"return_type_predicate",
"=",
"(",
"lambda",
"x",
":",
"_rtp",
")",
"if",
"inspect",
".",
"isclass",
"(",
"_rtp",
")",
"and",
"issubclass",
"(",
"_rtp",
",",
"Builder",
")",
"else",
"_rtp",
"module_name",
"=",
"module_alias",
"if",
"module_alias",
"else",
"module",
".",
"__name__",
"+",
"'.'",
"patch_members",
"=",
"_get_patch_members",
"(",
"module",
",",
"blacklist_predicate",
"=",
"blacklist_predicate",
",",
"whitelist_predicate",
"=",
"whitelist_predicate",
",",
"getmembers_predicate",
"=",
"getmembers_predicate",
",",
"admit_private",
"=",
"admit_private",
")",
"for",
"name",
",",
"f",
"in",
"patch_members",
":",
"wrapped",
"=",
"None",
"if",
"method_wrapper",
":",
"g",
"=",
"method_wrapper",
"(",
"f",
")",
"wrapped",
"=",
"f",
"else",
":",
"g",
"=",
"f",
"cls",
".",
"RegisterAt",
"(",
"n",
",",
"g",
",",
"module_name",
",",
"wrapped",
"=",
"wrapped",
",",
"_return_type",
"=",
"return_type_predicate",
"(",
"name",
")",
",",
"alias",
"=",
"method_name_modifier",
"(",
"name",
")",
",",
"explanation",
"=",
"explanation",
")"
] | This classmethod lets you easily patch all of functions/callables from a module or class as methods a Builder class.
**Arguments**
* **n** : the position the the object being piped will take in the arguments when the function being patched is applied. See `RegisterMethod` and `ThenAt`.
* **module** : a module or class from which the functions/methods/callables will be taken.
* `module_alias = None` : an optional alias for the module used for documentation purposes.
* `method_name_modifier = lambda f_name: None` : a function that can modify the name of the method will take. If `None` the name of the function will be used.
* `blacklist_predicate = lambda f_name: name[0] != "_"` : A predicate that determines which functions are banned given their name. By default it excludes all function whose name start with `'_'`. `blacklist_predicate` can also be of type list, in which case all names contained in this list will be banned.
* `whitelist_predicate = lambda f_name: True` : A predicate that determines which functions are admitted given their name. By default it include any function. `whitelist_predicate` can also be of type list, in which case only names contained in this list will be admitted. You can use both `blacklist_predicate` and `whitelist_predicate` at the same time.
* `return_type_predicate = lambda f_name: None` : a predicate that determines the `_return_type` of the Builder. By default it will always return `None`. See `phi.builder.Builder.ThenAt`.
* `getmembers_predicate = inspect.isfunction` : a predicate that determines what type of elements/members will be fetched by the `inspect` module, defaults to [inspect.isfunction](https://docs.python.org/2/library/inspect.html#inspect.isfunction). See [getmembers](https://docs.python.org/2/library/inspect.html#inspect.getmembers).
**Examples**
Lets patch ALL the main functions from numpy into a custom builder!
from phi import PythonBuilder #or Builder
import numpy as np
class NumpyBuilder(PythonBuilder): #or Builder
"A Builder for numpy functions!"
pass
NumpyBuilder.PatchAt(1, np)
N = NumpyBuilder(lambda x: x)
Thats it! Although a serious patch would involve filtering out functions that don't take arrays. Another common task would be to use `NumpyBuilder.PatchAt(2, ...)` (`PatchAt(n, ..)` in general) when convenient to send the object being pipe to the relevant argument of the function. The previous is usually done with and a combination of `whitelist_predicate`s and `blacklist_predicate`s on `PatchAt(1, ...)` and `PatchAt(2, ...)` to filter or include the approriate functions on each kind of patch. Given the previous code we could now do
import numpy as np
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
z = N.Pipe(
x, N
.dot(y)
.add(x)
.transpose()
.sum(axis=1)
)
Which is strictly equivalent to
import numpy as np
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
z = np.dot(x, y)
z = np.add(z, x)
z = np.transpose(z)
z = np.sum(z, axis=1)
The thing to notice is that with the `NumpyBuilder` we avoid the repetitive and needless passing and reassigment of the `z` variable, this removes a lot of noise from our code. | [
"This",
"classmethod",
"lets",
"you",
"easily",
"patch",
"all",
"of",
"functions",
"/",
"callables",
"from",
"a",
"module",
"or",
"class",
"as",
"methods",
"a",
"Builder",
"class",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/builder.py#L360-L434 |
cgarciae/phi | phi/utils.py | get_method_sig | def get_method_sig(method):
""" Given a function, it returns a string that pretty much looks how the
function signature_ would be written in python.
:param method: a python method
:return: A string similar describing the pythong method signature_.
eg: "my_method(first_argArg, second_arg=42, third_arg='something')"
"""
# The return value of ArgSpec is a bit weird, as the list of arguments and
# list of defaults are returned in separate array.
# eg: ArgSpec(args=['first_arg', 'second_arg', 'third_arg'],
# varargs=None, keywords=None, defaults=(42, 'something'))
argspec = inspect.getargspec(method)
arg_index=0
args = []
# Use the args and defaults array returned by argspec and find out
# which arguments has default
for arg in argspec.args:
default_arg = _get_default_arg(argspec.args, argspec.defaults, arg_index)
if default_arg.has_default:
args.append("%s=%s" % (arg, default_arg.default_value))
else:
args.append(arg)
arg_index += 1
return "%s(%s)" % (method.__name__, ", ".join(args)) | python | def get_method_sig(method):
""" Given a function, it returns a string that pretty much looks how the
function signature_ would be written in python.
:param method: a python method
:return: A string similar describing the pythong method signature_.
eg: "my_method(first_argArg, second_arg=42, third_arg='something')"
"""
# The return value of ArgSpec is a bit weird, as the list of arguments and
# list of defaults are returned in separate array.
# eg: ArgSpec(args=['first_arg', 'second_arg', 'third_arg'],
# varargs=None, keywords=None, defaults=(42, 'something'))
argspec = inspect.getargspec(method)
arg_index=0
args = []
# Use the args and defaults array returned by argspec and find out
# which arguments has default
for arg in argspec.args:
default_arg = _get_default_arg(argspec.args, argspec.defaults, arg_index)
if default_arg.has_default:
args.append("%s=%s" % (arg, default_arg.default_value))
else:
args.append(arg)
arg_index += 1
return "%s(%s)" % (method.__name__, ", ".join(args)) | [
"def",
"get_method_sig",
"(",
"method",
")",
":",
"# The return value of ArgSpec is a bit weird, as the list of arguments and",
"# list of defaults are returned in separate array.",
"# eg: ArgSpec(args=['first_arg', 'second_arg', 'third_arg'],",
"# varargs=None, keywords=None, defaults=(42, 'something'))",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"method",
")",
"arg_index",
"=",
"0",
"args",
"=",
"[",
"]",
"# Use the args and defaults array returned by argspec and find out",
"# which arguments has default",
"for",
"arg",
"in",
"argspec",
".",
"args",
":",
"default_arg",
"=",
"_get_default_arg",
"(",
"argspec",
".",
"args",
",",
"argspec",
".",
"defaults",
",",
"arg_index",
")",
"if",
"default_arg",
".",
"has_default",
":",
"args",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"arg",
",",
"default_arg",
".",
"default_value",
")",
")",
"else",
":",
"args",
".",
"append",
"(",
"arg",
")",
"arg_index",
"+=",
"1",
"return",
"\"%s(%s)\"",
"%",
"(",
"method",
".",
"__name__",
",",
"\", \"",
".",
"join",
"(",
"args",
")",
")"
] | Given a function, it returns a string that pretty much looks how the
function signature_ would be written in python.
:param method: a python method
:return: A string similar describing the pythong method signature_.
eg: "my_method(first_argArg, second_arg=42, third_arg='something')" | [
"Given",
"a",
"function",
"it",
"returns",
"a",
"string",
"that",
"pretty",
"much",
"looks",
"how",
"the",
"function",
"signature_",
"would",
"be",
"written",
"in",
"python",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/utils.py#L64-L90 |
cgarciae/phi | phi/dsl.py | Expression.Pipe | def Pipe(self, *sequence, **kwargs):
"""
`Pipe` runs any `phi.dsl.Expression`. Its highly inspired by Elixir's [|> (pipe)](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) operator.
**Arguments**
* ***sequence**: any variable amount of expressions. All expressions inside of `sequence` will be composed together using `phi.dsl.Expression.Seq`.
* ****kwargs**: `Pipe` forwards all `kwargs` to `phi.builder.Builder.Seq`, visit its documentation for more info.
The expression
Pipe(*sequence, **kwargs)
is equivalent to
Seq(*sequence, **kwargs)(None)
Normally the first argument or `Pipe` is a value, that is reinterpreted as a `phi.dsl.Expression.Val`, therfore, the input `None` is discarded.
**Examples**
from phi import P
def add1(x): return x + 1
def mul3(x): return x * 3
x = P.Pipe(
1, #input
add1, #1 + 1 == 2
mul3 #2 * 3 == 6
)
assert x == 6
The previous using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions
from phi import P
x = P.Pipe(
1, #input
P + 1, #1 + 1 == 2
P * 3 #2 * 3 == 6
)
assert x == 6
**Also see**
* `phi.builder.Builder.Seq`
* [dsl](https://cgarciae.github.io/phi/dsl.m.html)
* [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
* [lambdas](https://cgarciae.github.io/phi/lambdas.m.html)
"""
state = kwargs.pop("refs", {})
return self.Seq(*sequence, **kwargs)(None, **state) | python | def Pipe(self, *sequence, **kwargs):
"""
`Pipe` runs any `phi.dsl.Expression`. Its highly inspired by Elixir's [|> (pipe)](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) operator.
**Arguments**
* ***sequence**: any variable amount of expressions. All expressions inside of `sequence` will be composed together using `phi.dsl.Expression.Seq`.
* ****kwargs**: `Pipe` forwards all `kwargs` to `phi.builder.Builder.Seq`, visit its documentation for more info.
The expression
Pipe(*sequence, **kwargs)
is equivalent to
Seq(*sequence, **kwargs)(None)
Normally the first argument or `Pipe` is a value, that is reinterpreted as a `phi.dsl.Expression.Val`, therfore, the input `None` is discarded.
**Examples**
from phi import P
def add1(x): return x + 1
def mul3(x): return x * 3
x = P.Pipe(
1, #input
add1, #1 + 1 == 2
mul3 #2 * 3 == 6
)
assert x == 6
The previous using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions
from phi import P
x = P.Pipe(
1, #input
P + 1, #1 + 1 == 2
P * 3 #2 * 3 == 6
)
assert x == 6
**Also see**
* `phi.builder.Builder.Seq`
* [dsl](https://cgarciae.github.io/phi/dsl.m.html)
* [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
* [lambdas](https://cgarciae.github.io/phi/lambdas.m.html)
"""
state = kwargs.pop("refs", {})
return self.Seq(*sequence, **kwargs)(None, **state) | [
"def",
"Pipe",
"(",
"self",
",",
"*",
"sequence",
",",
"*",
"*",
"kwargs",
")",
":",
"state",
"=",
"kwargs",
".",
"pop",
"(",
"\"refs\"",
",",
"{",
"}",
")",
"return",
"self",
".",
"Seq",
"(",
"*",
"sequence",
",",
"*",
"*",
"kwargs",
")",
"(",
"None",
",",
"*",
"*",
"state",
")"
] | `Pipe` runs any `phi.dsl.Expression`. Its highly inspired by Elixir's [|> (pipe)](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) operator.
**Arguments**
* ***sequence**: any variable amount of expressions. All expressions inside of `sequence` will be composed together using `phi.dsl.Expression.Seq`.
* ****kwargs**: `Pipe` forwards all `kwargs` to `phi.builder.Builder.Seq`, visit its documentation for more info.
The expression
Pipe(*sequence, **kwargs)
is equivalent to
Seq(*sequence, **kwargs)(None)
Normally the first argument or `Pipe` is a value, that is reinterpreted as a `phi.dsl.Expression.Val`, therfore, the input `None` is discarded.
**Examples**
from phi import P
def add1(x): return x + 1
def mul3(x): return x * 3
x = P.Pipe(
1, #input
add1, #1 + 1 == 2
mul3 #2 * 3 == 6
)
assert x == 6
The previous using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions
from phi import P
x = P.Pipe(
1, #input
P + 1, #1 + 1 == 2
P * 3 #2 * 3 == 6
)
assert x == 6
**Also see**
* `phi.builder.Builder.Seq`
* [dsl](https://cgarciae.github.io/phi/dsl.m.html)
* [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile)
* [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) | [
"Pipe",
"runs",
"any",
"phi",
".",
"dsl",
".",
"Expression",
".",
"Its",
"highly",
"inspired",
"by",
"Elixir",
"s",
"[",
"|",
">",
"(",
"pipe",
")",
"]",
"(",
"https",
":",
"//",
"hexdocs",
".",
"pm",
"/",
"elixir",
"/",
"Kernel",
".",
"html#%7C%3E",
"/",
"2",
")",
"operator",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L468-L522 |
cgarciae/phi | phi/dsl.py | Expression.ThenAt | def ThenAt(self, n, f, *_args, **kwargs):
"""
`ThenAt` enables you to create a partially apply many arguments to a function, the returned partial expects a single arguments which will be applied at the `n`th position of the original function.
**Arguments**
* **n**: position at which the created partial will apply its awaited argument on the original function.
* **f**: function which the partial will be created.
* **_args & kwargs**: all `*_args` and `**kwargs` will be passed to the function `f`.
* `_return_type = None`: type of the returned `builder`, if `None` it will return the same type of the current `builder`. This special kwarg will NOT be passed to `f`.
You can think of `n` as the position that the value being piped down will pass through the `f`. Say you have the following expression
D == fun(A, B, C)
all the following are equivalent
from phi import P, Pipe, ThenAt
D == Pipe(A, ThenAt(1, fun, B, C))
D == Pipe(B, ThenAt(2, fun, A, C))
D == Pipe(C, ThenAt(3, fun, A, B))
you could also use the shortcuts `Then`, `Then2`,..., `Then5`, which are more readable
from phi import P, Pipe
D == Pipe(A, P.Then(fun, B, C))
D == Pipe(B, P.Then2(fun, A, C))
D == Pipe(C, P.Then3(fun, A, B))
There is a special case not discussed above: `n = 0`. When this happens only the arguments given will be applied to `f`, this method it will return a partial that expects a single argument but completely ignores it
from phi import P
D == Pipe(None, P.ThenAt(0, fun, A, B, C))
D == Pipe(None, P.Then0(fun, A, B, C))
**Examples**
Max of 6 and the argument:
from phi import P
assert 6 == P.Pipe(
2,
P.Then(max, 6)
)
Previous is equivalent to
assert 6 == max(2, 6)
Open a file in read mode (`'r'`)
from phi import P
f = P.Pipe(
"file.txt",
P.Then(open, 'r')
)
Previous is equivalent to
f = open("file.txt", 'r')
Split a string by whitespace and then get the length of each word
from phi import P
assert [5, 5, 5] == P.Pipe(
"Again hello world",
P.Then(str.split, ' ')
.Then2(map, len)
)
Previous is equivalent to
x = "Again hello world"
x = str.split(x, ' ')
x = map(len, x)
assert [5, 5, 5] == x
As you see, `Then2` was very useful because `map` accepts and `iterable` as its `2nd` parameter. You can rewrite the previous using the [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html) and the `phi.builder.Builder.Obj` object
from phi import P, Obj
assert [5, 5, 5] == P.Pipe(
"Again hello world",
Obj.split(' '),
P.map(len)
)
**Also see**
* `phi.builder.Builder.Obj`
* [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html)
* `phi.builder.Builder.RegisterAt`
"""
_return_type = None
n_args = n - 1
if '_return_type' in kwargs:
_return_type = kwargs['_return_type']
del kwargs['_return_type']
@utils.lift
def g(x):
new_args = _args[0:n_args] + (x,) + _args[n_args:] if n_args >= 0 else _args
return f(*new_args, **kwargs)
return self.__then__(g, _return_type=_return_type) | python | def ThenAt(self, n, f, *_args, **kwargs):
"""
`ThenAt` enables you to create a partially apply many arguments to a function, the returned partial expects a single arguments which will be applied at the `n`th position of the original function.
**Arguments**
* **n**: position at which the created partial will apply its awaited argument on the original function.
* **f**: function which the partial will be created.
* **_args & kwargs**: all `*_args` and `**kwargs` will be passed to the function `f`.
* `_return_type = None`: type of the returned `builder`, if `None` it will return the same type of the current `builder`. This special kwarg will NOT be passed to `f`.
You can think of `n` as the position that the value being piped down will pass through the `f`. Say you have the following expression
D == fun(A, B, C)
all the following are equivalent
from phi import P, Pipe, ThenAt
D == Pipe(A, ThenAt(1, fun, B, C))
D == Pipe(B, ThenAt(2, fun, A, C))
D == Pipe(C, ThenAt(3, fun, A, B))
you could also use the shortcuts `Then`, `Then2`,..., `Then5`, which are more readable
from phi import P, Pipe
D == Pipe(A, P.Then(fun, B, C))
D == Pipe(B, P.Then2(fun, A, C))
D == Pipe(C, P.Then3(fun, A, B))
There is a special case not discussed above: `n = 0`. When this happens only the arguments given will be applied to `f`, this method it will return a partial that expects a single argument but completely ignores it
from phi import P
D == Pipe(None, P.ThenAt(0, fun, A, B, C))
D == Pipe(None, P.Then0(fun, A, B, C))
**Examples**
Max of 6 and the argument:
from phi import P
assert 6 == P.Pipe(
2,
P.Then(max, 6)
)
Previous is equivalent to
assert 6 == max(2, 6)
Open a file in read mode (`'r'`)
from phi import P
f = P.Pipe(
"file.txt",
P.Then(open, 'r')
)
Previous is equivalent to
f = open("file.txt", 'r')
Split a string by whitespace and then get the length of each word
from phi import P
assert [5, 5, 5] == P.Pipe(
"Again hello world",
P.Then(str.split, ' ')
.Then2(map, len)
)
Previous is equivalent to
x = "Again hello world"
x = str.split(x, ' ')
x = map(len, x)
assert [5, 5, 5] == x
As you see, `Then2` was very useful because `map` accepts and `iterable` as its `2nd` parameter. You can rewrite the previous using the [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html) and the `phi.builder.Builder.Obj` object
from phi import P, Obj
assert [5, 5, 5] == P.Pipe(
"Again hello world",
Obj.split(' '),
P.map(len)
)
**Also see**
* `phi.builder.Builder.Obj`
* [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html)
* `phi.builder.Builder.RegisterAt`
"""
_return_type = None
n_args = n - 1
if '_return_type' in kwargs:
_return_type = kwargs['_return_type']
del kwargs['_return_type']
@utils.lift
def g(x):
new_args = _args[0:n_args] + (x,) + _args[n_args:] if n_args >= 0 else _args
return f(*new_args, **kwargs)
return self.__then__(g, _return_type=_return_type) | [
"def",
"ThenAt",
"(",
"self",
",",
"n",
",",
"f",
",",
"*",
"_args",
",",
"*",
"*",
"kwargs",
")",
":",
"_return_type",
"=",
"None",
"n_args",
"=",
"n",
"-",
"1",
"if",
"'_return_type'",
"in",
"kwargs",
":",
"_return_type",
"=",
"kwargs",
"[",
"'_return_type'",
"]",
"del",
"kwargs",
"[",
"'_return_type'",
"]",
"@",
"utils",
".",
"lift",
"def",
"g",
"(",
"x",
")",
":",
"new_args",
"=",
"_args",
"[",
"0",
":",
"n_args",
"]",
"+",
"(",
"x",
",",
")",
"+",
"_args",
"[",
"n_args",
":",
"]",
"if",
"n_args",
">=",
"0",
"else",
"_args",
"return",
"f",
"(",
"*",
"new_args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"__then__",
"(",
"g",
",",
"_return_type",
"=",
"_return_type",
")"
] | `ThenAt` enables you to create a partially apply many arguments to a function, the returned partial expects a single arguments which will be applied at the `n`th position of the original function.
**Arguments**
* **n**: position at which the created partial will apply its awaited argument on the original function.
* **f**: function which the partial will be created.
* **_args & kwargs**: all `*_args` and `**kwargs` will be passed to the function `f`.
* `_return_type = None`: type of the returned `builder`, if `None` it will return the same type of the current `builder`. This special kwarg will NOT be passed to `f`.
You can think of `n` as the position that the value being piped down will pass through the `f`. Say you have the following expression
D == fun(A, B, C)
all the following are equivalent
from phi import P, Pipe, ThenAt
D == Pipe(A, ThenAt(1, fun, B, C))
D == Pipe(B, ThenAt(2, fun, A, C))
D == Pipe(C, ThenAt(3, fun, A, B))
you could also use the shortcuts `Then`, `Then2`,..., `Then5`, which are more readable
from phi import P, Pipe
D == Pipe(A, P.Then(fun, B, C))
D == Pipe(B, P.Then2(fun, A, C))
D == Pipe(C, P.Then3(fun, A, B))
There is a special case not discussed above: `n = 0`. When this happens only the arguments given will be applied to `f`, this method it will return a partial that expects a single argument but completely ignores it
from phi import P
D == Pipe(None, P.ThenAt(0, fun, A, B, C))
D == Pipe(None, P.Then0(fun, A, B, C))
**Examples**
Max of 6 and the argument:
from phi import P
assert 6 == P.Pipe(
2,
P.Then(max, 6)
)
Previous is equivalent to
assert 6 == max(2, 6)
Open a file in read mode (`'r'`)
from phi import P
f = P.Pipe(
"file.txt",
P.Then(open, 'r')
)
Previous is equivalent to
f = open("file.txt", 'r')
Split a string by whitespace and then get the length of each word
from phi import P
assert [5, 5, 5] == P.Pipe(
"Again hello world",
P.Then(str.split, ' ')
.Then2(map, len)
)
Previous is equivalent to
x = "Again hello world"
x = str.split(x, ' ')
x = map(len, x)
assert [5, 5, 5] == x
As you see, `Then2` was very useful because `map` accepts and `iterable` as its `2nd` parameter. You can rewrite the previous using the [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html) and the `phi.builder.Builder.Obj` object
from phi import P, Obj
assert [5, 5, 5] == P.Pipe(
"Again hello world",
Obj.split(' '),
P.map(len)
)
**Also see**
* `phi.builder.Builder.Obj`
* [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html)
* `phi.builder.Builder.RegisterAt` | [
"ThenAt",
"enables",
"you",
"to",
"create",
"a",
"partially",
"apply",
"many",
"arguments",
"to",
"a",
"function",
"the",
"returned",
"partial",
"expects",
"a",
"single",
"arguments",
"which",
"will",
"be",
"applied",
"at",
"the",
"n",
"th",
"position",
"of",
"the",
"original",
"function",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L524-L638 |
cgarciae/phi | phi/dsl.py | Expression.Then0 | def Then0(self, f, *args, **kwargs):
"""
`Then0(f, ...)` is equivalent to `ThenAt(0, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
return self.ThenAt(0, f, *args, **kwargs) | python | def Then0(self, f, *args, **kwargs):
"""
`Then0(f, ...)` is equivalent to `ThenAt(0, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
return self.ThenAt(0, f, *args, **kwargs) | [
"def",
"Then0",
"(",
"self",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"ThenAt",
"(",
"0",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | `Then0(f, ...)` is equivalent to `ThenAt(0, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information. | [
"Then0",
"(",
"f",
"...",
")",
"is",
"equivalent",
"to",
"ThenAt",
"(",
"0",
"f",
"...",
")",
".",
"Checkout",
"phi",
".",
"builder",
".",
"Builder",
".",
"ThenAt",
"for",
"more",
"information",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L640-L644 |
cgarciae/phi | phi/dsl.py | Expression.Then | def Then(self, f, *args, **kwargs):
"""
`Then(f, ...)` is equivalent to `ThenAt(1, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
return self.ThenAt(1, f, *args, **kwargs) | python | def Then(self, f, *args, **kwargs):
"""
`Then(f, ...)` is equivalent to `ThenAt(1, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
return self.ThenAt(1, f, *args, **kwargs) | [
"def",
"Then",
"(",
"self",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"ThenAt",
"(",
"1",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | `Then(f, ...)` is equivalent to `ThenAt(1, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information. | [
"Then",
"(",
"f",
"...",
")",
"is",
"equivalent",
"to",
"ThenAt",
"(",
"1",
"f",
"...",
")",
".",
"Checkout",
"phi",
".",
"builder",
".",
"Builder",
".",
"ThenAt",
"for",
"more",
"information",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L646-L650 |
cgarciae/phi | phi/dsl.py | Expression.Then2 | def Then2(self, f, arg1, *args, **kwargs):
"""
`Then2(f, ...)` is equivalent to `ThenAt(2, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1,) + args
return self.ThenAt(2, f, *args, **kwargs) | python | def Then2(self, f, arg1, *args, **kwargs):
"""
`Then2(f, ...)` is equivalent to `ThenAt(2, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1,) + args
return self.ThenAt(2, f, *args, **kwargs) | [
"def",
"Then2",
"(",
"self",
",",
"f",
",",
"arg1",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"arg1",
",",
")",
"+",
"args",
"return",
"self",
".",
"ThenAt",
"(",
"2",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | `Then2(f, ...)` is equivalent to `ThenAt(2, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information. | [
"Then2",
"(",
"f",
"...",
")",
"is",
"equivalent",
"to",
"ThenAt",
"(",
"2",
"f",
"...",
")",
".",
"Checkout",
"phi",
".",
"builder",
".",
"Builder",
".",
"ThenAt",
"for",
"more",
"information",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L654-L659 |
cgarciae/phi | phi/dsl.py | Expression.Then3 | def Then3(self, f, arg1, arg2, *args, **kwargs):
"""
`Then3(f, ...)` is equivalent to `ThenAt(3, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2) + args
return self.ThenAt(3, f, *args, **kwargs) | python | def Then3(self, f, arg1, arg2, *args, **kwargs):
"""
`Then3(f, ...)` is equivalent to `ThenAt(3, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2) + args
return self.ThenAt(3, f, *args, **kwargs) | [
"def",
"Then3",
"(",
"self",
",",
"f",
",",
"arg1",
",",
"arg2",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"arg1",
",",
"arg2",
")",
"+",
"args",
"return",
"self",
".",
"ThenAt",
"(",
"3",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | `Then3(f, ...)` is equivalent to `ThenAt(3, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information. | [
"Then3",
"(",
"f",
"...",
")",
"is",
"equivalent",
"to",
"ThenAt",
"(",
"3",
"f",
"...",
")",
".",
"Checkout",
"phi",
".",
"builder",
".",
"Builder",
".",
"ThenAt",
"for",
"more",
"information",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L661-L666 |
cgarciae/phi | phi/dsl.py | Expression.Then4 | def Then4(self, f, arg1, arg2, arg3, *args, **kwargs):
"""
`Then4(f, ...)` is equivalent to `ThenAt(4, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2, arg3) + args
return self.ThenAt(4, f, *args, **kwargs) | python | def Then4(self, f, arg1, arg2, arg3, *args, **kwargs):
"""
`Then4(f, ...)` is equivalent to `ThenAt(4, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2, arg3) + args
return self.ThenAt(4, f, *args, **kwargs) | [
"def",
"Then4",
"(",
"self",
",",
"f",
",",
"arg1",
",",
"arg2",
",",
"arg3",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"arg1",
",",
"arg2",
",",
"arg3",
")",
"+",
"args",
"return",
"self",
".",
"ThenAt",
"(",
"4",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | `Then4(f, ...)` is equivalent to `ThenAt(4, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information. | [
"Then4",
"(",
"f",
"...",
")",
"is",
"equivalent",
"to",
"ThenAt",
"(",
"4",
"f",
"...",
")",
".",
"Checkout",
"phi",
".",
"builder",
".",
"Builder",
".",
"ThenAt",
"for",
"more",
"information",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L668-L673 |
cgarciae/phi | phi/dsl.py | Expression.Then5 | def Then5(self, f, arg1, arg2, arg3, arg4, *args, **kwargs):
"""
`Then5(f, ...)` is equivalent to `ThenAt(5, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2, arg3, arg4) + args
return self.ThenAt(5, f, *args, **kwargs) | python | def Then5(self, f, arg1, arg2, arg3, arg4, *args, **kwargs):
"""
`Then5(f, ...)` is equivalent to `ThenAt(5, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
args = (arg1, arg2, arg3, arg4) + args
return self.ThenAt(5, f, *args, **kwargs) | [
"def",
"Then5",
"(",
"self",
",",
"f",
",",
"arg1",
",",
"arg2",
",",
"arg3",
",",
"arg4",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"arg1",
",",
"arg2",
",",
"arg3",
",",
"arg4",
")",
"+",
"args",
"return",
"self",
".",
"ThenAt",
"(",
"5",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | `Then5(f, ...)` is equivalent to `ThenAt(5, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information. | [
"Then5",
"(",
"f",
"...",
")",
"is",
"equivalent",
"to",
"ThenAt",
"(",
"5",
"f",
"...",
")",
".",
"Checkout",
"phi",
".",
"builder",
".",
"Builder",
".",
"ThenAt",
"for",
"more",
"information",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L675-L680 |
cgarciae/phi | phi/dsl.py | Expression.List | def List(self, *branches, **kwargs):
"""
While `Seq` is sequential, `phi.dsl.Expression.List` allows you to split the computation and get back a list with the result of each path. While the list literal should be the most incarnation of this expresion, it can actually be any iterable (implements `__iter__`) that is not a tuple and yields a valid expresion.
The expression
k = List(f, g)
is equivalent to
k = lambda x: [ f(x), g(x) ]
In general, the following rules apply after compilation:
**General Branching**
List(f0, f1, ..., fn)
is equivalent to
lambda x: [ f0(x), f1(x), ..., fn(x) ]
**Composing & Branching**
It is interesting to see how braching interacts with composing. The expression
Seq(f, List(g, h))
is *almost* equivalent to
List( Seq(f, g), Seq(f, h) )
As you see its as if `f` where distributed over the List. We say *almost* because their implementation is different
def _lambda(x):
x = f(x)
return [ g(x), h(x) ]
vs
lambda x: [ g(f(x)), h(f(x)) ]
As you see `f` is only executed once in the first one. Both should yield the same result if `f` is a pure function.
### Examples
form phi import P, List
avg_word_length = P.Pipe(
"1 22 333",
lambda s: s.split(' '), # ['1', '22', '333']
lambda l: map(len, l), # [1, 2, 3]
List(
sum # 1 + 2 + 3 == 6
,
len # len([1, 2, 3]) == 3
),
lambda l: l[0] / l[1] # sum / len == 6 / 3 == 2
)
assert avg_word_length == 2
The previous could also be done more briefly like this
form phi import P, Obj, List
avg_word_length = P.Pipe(
"1 22 333", Obj
.split(' ') # ['1', '22', '333']
.map(len) # [1, 2, 3]
.List(
sum #sum([1, 2, 3]) == 6
,
len #len([1, 2, 3]) == 3
),
P[0] / P[1] #6 / 3 == 2
)
assert avg_word_length == 2
In the example above the last expression
P[0] / P[1]
works for a couple of reasons
1. The previous expression returns a list
2. In general the expression `P[x]` compiles to a function with the form `lambda obj: obj[x]`
3. The class `Expression` (the class from which the object `P` inherits) overrides most operators to create functions easily. For example, the expression
(P * 2) / (P + 1)
compile to a function of the form
lambda x: (x * 2) / (x + 1)
Check out the documentatio for Phi [lambdas](https://cgarciae.github.io/phi/lambdas.m.html).
"""
gs = [ _parse(code)._f for code in branches ]
def h(x, state):
ys = []
for g in gs:
y, state = g(x, state)
ys.append(y)
return (ys, state)
return self.__then__(h, **kwargs) | python | def List(self, *branches, **kwargs):
"""
While `Seq` is sequential, `phi.dsl.Expression.List` allows you to split the computation and get back a list with the result of each path. While the list literal should be the most incarnation of this expresion, it can actually be any iterable (implements `__iter__`) that is not a tuple and yields a valid expresion.
The expression
k = List(f, g)
is equivalent to
k = lambda x: [ f(x), g(x) ]
In general, the following rules apply after compilation:
**General Branching**
List(f0, f1, ..., fn)
is equivalent to
lambda x: [ f0(x), f1(x), ..., fn(x) ]
**Composing & Branching**
It is interesting to see how braching interacts with composing. The expression
Seq(f, List(g, h))
is *almost* equivalent to
List( Seq(f, g), Seq(f, h) )
As you see its as if `f` where distributed over the List. We say *almost* because their implementation is different
def _lambda(x):
x = f(x)
return [ g(x), h(x) ]
vs
lambda x: [ g(f(x)), h(f(x)) ]
As you see `f` is only executed once in the first one. Both should yield the same result if `f` is a pure function.
### Examples
form phi import P, List
avg_word_length = P.Pipe(
"1 22 333",
lambda s: s.split(' '), # ['1', '22', '333']
lambda l: map(len, l), # [1, 2, 3]
List(
sum # 1 + 2 + 3 == 6
,
len # len([1, 2, 3]) == 3
),
lambda l: l[0] / l[1] # sum / len == 6 / 3 == 2
)
assert avg_word_length == 2
The previous could also be done more briefly like this
form phi import P, Obj, List
avg_word_length = P.Pipe(
"1 22 333", Obj
.split(' ') # ['1', '22', '333']
.map(len) # [1, 2, 3]
.List(
sum #sum([1, 2, 3]) == 6
,
len #len([1, 2, 3]) == 3
),
P[0] / P[1] #6 / 3 == 2
)
assert avg_word_length == 2
In the example above the last expression
P[0] / P[1]
works for a couple of reasons
1. The previous expression returns a list
2. In general the expression `P[x]` compiles to a function with the form `lambda obj: obj[x]`
3. The class `Expression` (the class from which the object `P` inherits) overrides most operators to create functions easily. For example, the expression
(P * 2) / (P + 1)
compile to a function of the form
lambda x: (x * 2) / (x + 1)
Check out the documentatio for Phi [lambdas](https://cgarciae.github.io/phi/lambdas.m.html).
"""
gs = [ _parse(code)._f for code in branches ]
def h(x, state):
ys = []
for g in gs:
y, state = g(x, state)
ys.append(y)
return (ys, state)
return self.__then__(h, **kwargs) | [
"def",
"List",
"(",
"self",
",",
"*",
"branches",
",",
"*",
"*",
"kwargs",
")",
":",
"gs",
"=",
"[",
"_parse",
"(",
"code",
")",
".",
"_f",
"for",
"code",
"in",
"branches",
"]",
"def",
"h",
"(",
"x",
",",
"state",
")",
":",
"ys",
"=",
"[",
"]",
"for",
"g",
"in",
"gs",
":",
"y",
",",
"state",
"=",
"g",
"(",
"x",
",",
"state",
")",
"ys",
".",
"append",
"(",
"y",
")",
"return",
"(",
"ys",
",",
"state",
")",
"return",
"self",
".",
"__then__",
"(",
"h",
",",
"*",
"*",
"kwargs",
")"
] | While `Seq` is sequential, `phi.dsl.Expression.List` allows you to split the computation and get back a list with the result of each path. While the list literal should be the most incarnation of this expresion, it can actually be any iterable (implements `__iter__`) that is not a tuple and yields a valid expresion.
The expression
k = List(f, g)
is equivalent to
k = lambda x: [ f(x), g(x) ]
In general, the following rules apply after compilation:
**General Branching**
List(f0, f1, ..., fn)
is equivalent to
lambda x: [ f0(x), f1(x), ..., fn(x) ]
**Composing & Branching**
It is interesting to see how braching interacts with composing. The expression
Seq(f, List(g, h))
is *almost* equivalent to
List( Seq(f, g), Seq(f, h) )
As you see its as if `f` where distributed over the List. We say *almost* because their implementation is different
def _lambda(x):
x = f(x)
return [ g(x), h(x) ]
vs
lambda x: [ g(f(x)), h(f(x)) ]
As you see `f` is only executed once in the first one. Both should yield the same result if `f` is a pure function.
### Examples
form phi import P, List
avg_word_length = P.Pipe(
"1 22 333",
lambda s: s.split(' '), # ['1', '22', '333']
lambda l: map(len, l), # [1, 2, 3]
List(
sum # 1 + 2 + 3 == 6
,
len # len([1, 2, 3]) == 3
),
lambda l: l[0] / l[1] # sum / len == 6 / 3 == 2
)
assert avg_word_length == 2
The previous could also be done more briefly like this
form phi import P, Obj, List
avg_word_length = P.Pipe(
"1 22 333", Obj
.split(' ') # ['1', '22', '333']
.map(len) # [1, 2, 3]
.List(
sum #sum([1, 2, 3]) == 6
,
len #len([1, 2, 3]) == 3
),
P[0] / P[1] #6 / 3 == 2
)
assert avg_word_length == 2
In the example above the last expression
P[0] / P[1]
works for a couple of reasons
1. The previous expression returns a list
2. In general the expression `P[x]` compiles to a function with the form `lambda obj: obj[x]`
3. The class `Expression` (the class from which the object `P` inherits) overrides most operators to create functions easily. For example, the expression
(P * 2) / (P + 1)
compile to a function of the form
lambda x: (x * 2) / (x + 1)
Check out the documentatio for Phi [lambdas](https://cgarciae.github.io/phi/lambdas.m.html). | [
"While",
"Seq",
"is",
"sequential",
"phi",
".",
"dsl",
".",
"Expression",
".",
"List",
"allows",
"you",
"to",
"split",
"the",
"computation",
"and",
"get",
"back",
"a",
"list",
"with",
"the",
"result",
"of",
"each",
"path",
".",
"While",
"the",
"list",
"literal",
"should",
"be",
"the",
"most",
"incarnation",
"of",
"this",
"expresion",
"it",
"can",
"actually",
"be",
"any",
"iterable",
"(",
"implements",
"__iter__",
")",
"that",
"is",
"not",
"a",
"tuple",
"and",
"yields",
"a",
"valid",
"expresion",
"."
] | train | https://github.com/cgarciae/phi/blob/87fd7100a76f823232f4fd8360498b4b80675265/phi/dsl.py#L682-L793 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.