repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
awacha/sastool
sastool/io/credo_saxsctrl/header.py
Header.date
def date(self) -> datetime.datetime: """Date of the experiment (start of exposure)""" return self._data['Date'] - datetime.timedelta(0, float(self.exposuretime), 0)
python
def date(self) -> datetime.datetime: """Date of the experiment (start of exposure)""" return self._data['Date'] - datetime.timedelta(0, float(self.exposuretime), 0)
[ "def", "date", "(", "self", ")", "->", "datetime", ".", "datetime", ":", "return", "self", ".", "_data", "[", "'Date'", "]", "-", "datetime", ".", "timedelta", "(", "0", ",", "float", "(", "self", ".", "exposuretime", ")", ",", "0", ")" ]
Date of the experiment (start of exposure)
[ "Date", "of", "the", "experiment", "(", "start", "of", "exposure", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_saxsctrl/header.py#L211-L213
awacha/sastool
sastool/io/credo_saxsctrl/header.py
Header.flux
def flux(self) -> ErrorValue: """X-ray flux in photons/sec.""" try: return ErrorValue(self._data['Flux'], self._data.setdefault('FluxError',0.0)) except KeyError: return 1 / self.pixelsizex / self.pixelsizey / ErrorValue(self._data['NormFactor'], self._data.setdefault('NormFactorError',0.0))
python
def flux(self) -> ErrorValue: """X-ray flux in photons/sec.""" try: return ErrorValue(self._data['Flux'], self._data.setdefault('FluxError',0.0)) except KeyError: return 1 / self.pixelsizex / self.pixelsizey / ErrorValue(self._data['NormFactor'], self._data.setdefault('NormFactorError',0.0))
[ "def", "flux", "(", "self", ")", "->", "ErrorValue", ":", "try", ":", "return", "ErrorValue", "(", "self", ".", "_data", "[", "'Flux'", "]", ",", "self", ".", "_data", ".", "setdefault", "(", "'FluxError'", ",", "0.0", ")", ")", "except", "KeyError", ":", "return", "1", "/", "self", ".", "pixelsizex", "/", "self", ".", "pixelsizey", "/", "ErrorValue", "(", "self", ".", "_data", "[", "'NormFactor'", "]", ",", "self", ".", "_data", ".", "setdefault", "(", "'NormFactorError'", ",", "0.0", ")", ")" ]
X-ray flux in photons/sec.
[ "X", "-", "ray", "flux", "in", "photons", "/", "sec", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_saxsctrl/header.py#L276-L282
awacha/sastool
sastool/misc/easylsq.py
nonlinear_leastsquares
def nonlinear_leastsquares(x: np.ndarray, y: np.ndarray, dy: np.ndarray, func: Callable, params_init: np.ndarray, verbose: bool = False, **kwargs): """Perform a non-linear least squares fit, return the results as ErrorValue() instances. Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. In the array case, if any of its elements is NaN, the whole array is treated as NaN (= no weighting) func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted `verbose`: if various messages useful for debugging should be printed on stdout. other optional keyword arguments will be passed to leastsq(). Outputs: par1, par2, par3, ... , statdict par1, par2, par3, ...: fitted values of par1, par2, par3 etc as instances of ErrorValue. statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'R2': Coefficient of determination 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, nlsq_fit() is used, which in turn delegates the job to scipy.optimize.leastsq(). """ newfunc, newparinit = hide_fixedparams(func, params_init) p, dp, statdict = nlsq_fit(x, y, dy, newfunc, newparinit, verbose, **kwargs) p, statdict['Covariance'] = resubstitute_fixedparams(p, params_init, statdict['Covariance']) dp, statdict['Correlation_coeffs'] = resubstitute_fixedparams(dp, [type(p_)(0) for p_ in params_init], statdict['Correlation_coeffs']) def convert(p_, dp_): if isinstance(p_, FixedParameter) or isinstance(dp_, FixedParameter): return p_ else: return ErrorValue(p_, dp_) return tuple([convert(p_, dp_) for (p_, dp_) in zip(p, dp)] + [statdict])
python
def nonlinear_leastsquares(x: np.ndarray, y: np.ndarray, dy: np.ndarray, func: Callable, params_init: np.ndarray, verbose: bool = False, **kwargs): """Perform a non-linear least squares fit, return the results as ErrorValue() instances. Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. In the array case, if any of its elements is NaN, the whole array is treated as NaN (= no weighting) func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted `verbose`: if various messages useful for debugging should be printed on stdout. other optional keyword arguments will be passed to leastsq(). Outputs: par1, par2, par3, ... , statdict par1, par2, par3, ...: fitted values of par1, par2, par3 etc as instances of ErrorValue. statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'R2': Coefficient of determination 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, nlsq_fit() is used, which in turn delegates the job to scipy.optimize.leastsq(). """ newfunc, newparinit = hide_fixedparams(func, params_init) p, dp, statdict = nlsq_fit(x, y, dy, newfunc, newparinit, verbose, **kwargs) p, statdict['Covariance'] = resubstitute_fixedparams(p, params_init, statdict['Covariance']) dp, statdict['Correlation_coeffs'] = resubstitute_fixedparams(dp, [type(p_)(0) for p_ in params_init], statdict['Correlation_coeffs']) def convert(p_, dp_): if isinstance(p_, FixedParameter) or isinstance(dp_, FixedParameter): return p_ else: return ErrorValue(p_, dp_) return tuple([convert(p_, dp_) for (p_, dp_) in zip(p, dp)] + [statdict])
[ "def", "nonlinear_leastsquares", "(", "x", ":", "np", ".", "ndarray", ",", "y", ":", "np", ".", "ndarray", ",", "dy", ":", "np", ".", "ndarray", ",", "func", ":", "Callable", ",", "params_init", ":", "np", ".", "ndarray", ",", "verbose", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ")", ":", "newfunc", ",", "newparinit", "=", "hide_fixedparams", "(", "func", ",", "params_init", ")", "p", ",", "dp", ",", "statdict", "=", "nlsq_fit", "(", "x", ",", "y", ",", "dy", ",", "newfunc", ",", "newparinit", ",", "verbose", ",", "*", "*", "kwargs", ")", "p", ",", "statdict", "[", "'Covariance'", "]", "=", "resubstitute_fixedparams", "(", "p", ",", "params_init", ",", "statdict", "[", "'Covariance'", "]", ")", "dp", ",", "statdict", "[", "'Correlation_coeffs'", "]", "=", "resubstitute_fixedparams", "(", "dp", ",", "[", "type", "(", "p_", ")", "(", "0", ")", "for", "p_", "in", "params_init", "]", ",", "statdict", "[", "'Correlation_coeffs'", "]", ")", "def", "convert", "(", "p_", ",", "dp_", ")", ":", "if", "isinstance", "(", "p_", ",", "FixedParameter", ")", "or", "isinstance", "(", "dp_", ",", "FixedParameter", ")", ":", "return", "p_", "else", ":", "return", "ErrorValue", "(", "p_", ",", "dp_", ")", "return", "tuple", "(", "[", "convert", "(", "p_", ",", "dp_", ")", "for", "(", "p_", ",", "dp_", ")", "in", "zip", "(", "p", ",", "dp", ")", "]", "+", "[", "statdict", "]", ")" ]
Perform a non-linear least squares fit, return the results as ErrorValue() instances. Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. In the array case, if any of its elements is NaN, the whole array is treated as NaN (= no weighting) func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted `verbose`: if various messages useful for debugging should be printed on stdout. other optional keyword arguments will be passed to leastsq(). Outputs: par1, par2, par3, ... , statdict par1, par2, par3, ...: fitted values of par1, par2, par3 etc as instances of ErrorValue. statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'R2': Coefficient of determination 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, nlsq_fit() is used, which in turn delegates the job to scipy.optimize.leastsq().
[ "Perform", "a", "non", "-", "linear", "least", "squares", "fit", "return", "the", "results", "as", "ErrorValue", "()", "instances", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/easylsq.py#L64-L114
awacha/sastool
sastool/misc/easylsq.py
nonlinear_odr
def nonlinear_odr(x, y, dx, dy, func, params_init, **kwargs): """Perform a non-linear orthogonal distance regression, return the results as ErrorValue() instances. Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dx: absolute error (square root of the variance) of the independent variable. Either a one-dimensional numpy array or None. If None, weighting is disabled. Non-finite (NaN or inf) elements signify that the corresponding element in x is to be treated as fixed by ODRPACK. dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. If None, weighting is disabled. func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted other optional keyword arguments will be passed to leastsq(). Outputs: par1, par2, par3, ... , statdict par1, par2, par3, ...: fitted values of par1, par2, par3 etc as instances of ErrorValue. statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, the module scipy.odr is used. """ odrmodel=odr.Model(lambda pars, x: func(x,*pars)) if dx is not None: # treat non-finite values as fixed xfixed=np.isfinite(dx) else: xfixed=None odrdata=odr.RealData(x, y, sx=dx,sy=dy, fix=xfixed) odrodr=odr.ODR(odrdata,odrmodel,params_init,ifixb=[not(isinstance(p,FixedParameter)) for p in params_init], **kwargs) odroutput=odrodr.run() statdict=odroutput.__dict__.copy() statdict['Covariance']=odroutput.cov_beta statdict['Correlation_coeffs']=odroutput.cov_beta/np.outer(odroutput.sd_beta,odroutput.sd_beta) statdict['DoF']=len(x)-len(odroutput.beta) statdict['Chi2_reduced']=statdict['res_var'] statdict['func_value']=statdict['y'] statdict['Chi2']=statdict['sum_square'] def convert(p_, dp_, pi): if isinstance(pi, FixedParameter): return FixedParameter(p_) else: return ErrorValue(p_, dp_) return tuple([convert(p_, dp_, pi) for (p_, dp_, pi) in zip(odroutput.beta, odroutput.sd_beta, params_init)] + [statdict])
python
def nonlinear_odr(x, y, dx, dy, func, params_init, **kwargs): """Perform a non-linear orthogonal distance regression, return the results as ErrorValue() instances. Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dx: absolute error (square root of the variance) of the independent variable. Either a one-dimensional numpy array or None. If None, weighting is disabled. Non-finite (NaN or inf) elements signify that the corresponding element in x is to be treated as fixed by ODRPACK. dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. If None, weighting is disabled. func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted other optional keyword arguments will be passed to leastsq(). Outputs: par1, par2, par3, ... , statdict par1, par2, par3, ...: fitted values of par1, par2, par3 etc as instances of ErrorValue. statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, the module scipy.odr is used. """ odrmodel=odr.Model(lambda pars, x: func(x,*pars)) if dx is not None: # treat non-finite values as fixed xfixed=np.isfinite(dx) else: xfixed=None odrdata=odr.RealData(x, y, sx=dx,sy=dy, fix=xfixed) odrodr=odr.ODR(odrdata,odrmodel,params_init,ifixb=[not(isinstance(p,FixedParameter)) for p in params_init], **kwargs) odroutput=odrodr.run() statdict=odroutput.__dict__.copy() statdict['Covariance']=odroutput.cov_beta statdict['Correlation_coeffs']=odroutput.cov_beta/np.outer(odroutput.sd_beta,odroutput.sd_beta) statdict['DoF']=len(x)-len(odroutput.beta) statdict['Chi2_reduced']=statdict['res_var'] statdict['func_value']=statdict['y'] statdict['Chi2']=statdict['sum_square'] def convert(p_, dp_, pi): if isinstance(pi, FixedParameter): return FixedParameter(p_) else: return ErrorValue(p_, dp_) return tuple([convert(p_, dp_, pi) for (p_, dp_, pi) in zip(odroutput.beta, odroutput.sd_beta, params_init)] + [statdict])
[ "def", "nonlinear_odr", "(", "x", ",", "y", ",", "dx", ",", "dy", ",", "func", ",", "params_init", ",", "*", "*", "kwargs", ")", ":", "odrmodel", "=", "odr", ".", "Model", "(", "lambda", "pars", ",", "x", ":", "func", "(", "x", ",", "*", "pars", ")", ")", "if", "dx", "is", "not", "None", ":", "# treat non-finite values as fixed", "xfixed", "=", "np", ".", "isfinite", "(", "dx", ")", "else", ":", "xfixed", "=", "None", "odrdata", "=", "odr", ".", "RealData", "(", "x", ",", "y", ",", "sx", "=", "dx", ",", "sy", "=", "dy", ",", "fix", "=", "xfixed", ")", "odrodr", "=", "odr", ".", "ODR", "(", "odrdata", ",", "odrmodel", ",", "params_init", ",", "ifixb", "=", "[", "not", "(", "isinstance", "(", "p", ",", "FixedParameter", ")", ")", "for", "p", "in", "params_init", "]", ",", "*", "*", "kwargs", ")", "odroutput", "=", "odrodr", ".", "run", "(", ")", "statdict", "=", "odroutput", ".", "__dict__", ".", "copy", "(", ")", "statdict", "[", "'Covariance'", "]", "=", "odroutput", ".", "cov_beta", "statdict", "[", "'Correlation_coeffs'", "]", "=", "odroutput", ".", "cov_beta", "/", "np", ".", "outer", "(", "odroutput", ".", "sd_beta", ",", "odroutput", ".", "sd_beta", ")", "statdict", "[", "'DoF'", "]", "=", "len", "(", "x", ")", "-", "len", "(", "odroutput", ".", "beta", ")", "statdict", "[", "'Chi2_reduced'", "]", "=", "statdict", "[", "'res_var'", "]", "statdict", "[", "'func_value'", "]", "=", "statdict", "[", "'y'", "]", "statdict", "[", "'Chi2'", "]", "=", "statdict", "[", "'sum_square'", "]", "def", "convert", "(", "p_", ",", "dp_", ",", "pi", ")", ":", "if", "isinstance", "(", "pi", ",", "FixedParameter", ")", ":", "return", "FixedParameter", "(", "p_", ")", "else", ":", "return", "ErrorValue", "(", "p_", ",", "dp_", ")", "return", "tuple", "(", "[", "convert", "(", "p_", ",", "dp_", ",", "pi", ")", "for", "(", "p_", ",", "dp_", ",", "pi", ")", "in", "zip", "(", "odroutput", ".", "beta", ",", "odroutput", ".", "sd_beta", ",", "params_init", ")", "]", "+", "[", "statdict", "]", ")" ]
Perform a non-linear orthogonal distance regression, return the results as ErrorValue() instances. Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dx: absolute error (square root of the variance) of the independent variable. Either a one-dimensional numpy array or None. If None, weighting is disabled. Non-finite (NaN or inf) elements signify that the corresponding element in x is to be treated as fixed by ODRPACK. dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. If None, weighting is disabled. func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted other optional keyword arguments will be passed to leastsq(). Outputs: par1, par2, par3, ... , statdict par1, par2, par3, ...: fitted values of par1, par2, par3 etc as instances of ErrorValue. statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, the module scipy.odr is used.
[ "Perform", "a", "non", "-", "linear", "orthogonal", "distance", "regression", "return", "the", "results", "as", "ErrorValue", "()", "instances", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/easylsq.py#L116-L179
awacha/sastool
sastool/misc/easylsq.py
simultaneous_nonlinear_leastsquares
def simultaneous_nonlinear_leastsquares(xs, ys, dys, func, params_inits, verbose=False, **kwargs): """Do a simultaneous nonlinear least-squares fit and return the fitted parameters as instances of ErrorValue. Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `parset1, parset2 ...`: tuples of fitted parameters corresponding to curve1, curve2, etc. Each tuple contains the values of the fitted parameters as instances of ErrorValue, in the same order as they are in `params_init`. `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit`, except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve. """ p, dp, statdict = simultaneous_nlsq_fit(xs, ys, dys, func, params_inits, verbose, **kwargs) params = [[ErrorValue(p_, dp_) for (p_, dp_) in zip(pcurrent, dpcurrent)] for (pcurrent, dpcurrent) in zip(p, dp)] return tuple(params + [statdict])
python
def simultaneous_nonlinear_leastsquares(xs, ys, dys, func, params_inits, verbose=False, **kwargs): """Do a simultaneous nonlinear least-squares fit and return the fitted parameters as instances of ErrorValue. Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `parset1, parset2 ...`: tuples of fitted parameters corresponding to curve1, curve2, etc. Each tuple contains the values of the fitted parameters as instances of ErrorValue, in the same order as they are in `params_init`. `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit`, except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve. """ p, dp, statdict = simultaneous_nlsq_fit(xs, ys, dys, func, params_inits, verbose, **kwargs) params = [[ErrorValue(p_, dp_) for (p_, dp_) in zip(pcurrent, dpcurrent)] for (pcurrent, dpcurrent) in zip(p, dp)] return tuple(params + [statdict])
[ "def", "simultaneous_nonlinear_leastsquares", "(", "xs", ",", "ys", ",", "dys", ",", "func", ",", "params_inits", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "p", ",", "dp", ",", "statdict", "=", "simultaneous_nlsq_fit", "(", "xs", ",", "ys", ",", "dys", ",", "func", ",", "params_inits", ",", "verbose", ",", "*", "*", "kwargs", ")", "params", "=", "[", "[", "ErrorValue", "(", "p_", ",", "dp_", ")", "for", "(", "p_", ",", "dp_", ")", "in", "zip", "(", "pcurrent", ",", "dpcurrent", ")", "]", "for", "(", "pcurrent", ",", "dpcurrent", ")", "in", "zip", "(", "p", ",", "dp", ")", "]", "return", "tuple", "(", "params", "+", "[", "statdict", "]", ")" ]
Do a simultaneous nonlinear least-squares fit and return the fitted parameters as instances of ErrorValue. Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `parset1, parset2 ...`: tuples of fitted parameters corresponding to curve1, curve2, etc. Each tuple contains the values of the fitted parameters as instances of ErrorValue, in the same order as they are in `params_init`. `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit`, except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve.
[ "Do", "a", "simultaneous", "nonlinear", "least", "-", "squares", "fit", "and", "return", "the", "fitted", "parameters", "as", "instances", "of", "ErrorValue", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/easylsq.py#L182-L215
awacha/sastool
sastool/misc/easylsq.py
nlsq_fit
def nlsq_fit(x, y, dy, func, params_init, verbose=False, **kwargs): """Perform a non-linear least squares fit Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. In the array case, if any of its elements is NaN, the whole array is treated as NaN (= no weighting) func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted `verbose`: if various messages useful for debugging should be printed on stdout. other optional keyword arguments will be passed to leastsq(). Outputs: p, dp, statdict where p: list of fitted values of par1, par2 etc. dp: list of estimated errors statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'R2': Coefficient of determination 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, scipy.optimize.leastsq() is used. """ if verbose: t0 = time.monotonic() print("nlsq_fit starting.") else: t0 = 0 func_orig = func params_init_orig = params_init func, params_init = hide_fixedparams(func_orig, params_init_orig) if (dy is None) or (dy == np.nan).sum() > 0 or (dy <= 0).sum() > 0: if verbose: print("nlsq_fit: no weighting") dy = None def objectivefunc(params, x, y, dy): """The target function for leastsq().""" if dy is None: return (func(x, *(params.tolist())) - y) else: return (func(x, *(params.tolist())) - y) / dy # do the fitting if verbose: print("nlsq_fit: now doing the fitting...") t1 = time.monotonic() else: t1 = 0 par, cov, infodict, mesg, ier = leastsq(objectivefunc, np.array(params_init), (x, y, dy), full_output=True, **kwargs) if verbose: print("nlsq_fit: fitting done in %.2f seconds." % (time.monotonic() - t1)) print("nlsq_fit: status from scipy.optimize.leastsq(): %d (%s)" % (ier, mesg)) print("nlsq_fit: extracting statistics.") # test if the covariance was singular (cov is None) if cov is None: cov = np.ones((len(par), len(par))) * np.nan # set it to a NaN matrix # calculate the Pearson's R^2 parameter (coefficient of determination) if dy is None: sserr = np.sum(((func(x, *(par.tolist())) - y)) ** 2) sstot = np.sum((y - np.mean(y)) ** 2) else: sserr = np.sum(((func(x, *(par.tolist())) - y) / dy) ** 2) sstot = np.sum((y - np.mean(y)) ** 2 / dy ** 2) r2 = 1 - sserr / sstot # assemble the statistics dictionary statdict = {'DoF' : len(x) - len(par), # degrees of freedom 'Chi2' : (infodict['fvec'] ** 2).sum(), 'R2' : r2, 'num_func_eval' : infodict['nfev'], 'func_value' : func(x, *(par.tolist())), 'message' : mesg, 'error_flag' : ier, } statdict['Chi2_reduced'] = statdict['Chi2'] / statdict['DoF'] statdict['Covariance'] = cov * statdict['Chi2_reduced'] par, statdict['Covariance'] = resubstitute_fixedparams(par, params_init_orig, statdict['Covariance']) # calculate the estimated errors of the fit parameters dpar = np.sqrt(statdict['Covariance'].diagonal()) # Pearson's correlation coefficients (usually 'r') in a matrix. statdict['Correlation_coeffs'] = statdict['Covariance'] / np.outer(dpar, dpar) if verbose: print("nlsq_fit: returning with results.") print("nlsq_fit: total time: %.2f sec." % (time.monotonic() - t0)) return par, dpar, statdict
python
def nlsq_fit(x, y, dy, func, params_init, verbose=False, **kwargs): """Perform a non-linear least squares fit Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. In the array case, if any of its elements is NaN, the whole array is treated as NaN (= no weighting) func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted `verbose`: if various messages useful for debugging should be printed on stdout. other optional keyword arguments will be passed to leastsq(). Outputs: p, dp, statdict where p: list of fitted values of par1, par2 etc. dp: list of estimated errors statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'R2': Coefficient of determination 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, scipy.optimize.leastsq() is used. """ if verbose: t0 = time.monotonic() print("nlsq_fit starting.") else: t0 = 0 func_orig = func params_init_orig = params_init func, params_init = hide_fixedparams(func_orig, params_init_orig) if (dy is None) or (dy == np.nan).sum() > 0 or (dy <= 0).sum() > 0: if verbose: print("nlsq_fit: no weighting") dy = None def objectivefunc(params, x, y, dy): """The target function for leastsq().""" if dy is None: return (func(x, *(params.tolist())) - y) else: return (func(x, *(params.tolist())) - y) / dy # do the fitting if verbose: print("nlsq_fit: now doing the fitting...") t1 = time.monotonic() else: t1 = 0 par, cov, infodict, mesg, ier = leastsq(objectivefunc, np.array(params_init), (x, y, dy), full_output=True, **kwargs) if verbose: print("nlsq_fit: fitting done in %.2f seconds." % (time.monotonic() - t1)) print("nlsq_fit: status from scipy.optimize.leastsq(): %d (%s)" % (ier, mesg)) print("nlsq_fit: extracting statistics.") # test if the covariance was singular (cov is None) if cov is None: cov = np.ones((len(par), len(par))) * np.nan # set it to a NaN matrix # calculate the Pearson's R^2 parameter (coefficient of determination) if dy is None: sserr = np.sum(((func(x, *(par.tolist())) - y)) ** 2) sstot = np.sum((y - np.mean(y)) ** 2) else: sserr = np.sum(((func(x, *(par.tolist())) - y) / dy) ** 2) sstot = np.sum((y - np.mean(y)) ** 2 / dy ** 2) r2 = 1 - sserr / sstot # assemble the statistics dictionary statdict = {'DoF' : len(x) - len(par), # degrees of freedom 'Chi2' : (infodict['fvec'] ** 2).sum(), 'R2' : r2, 'num_func_eval' : infodict['nfev'], 'func_value' : func(x, *(par.tolist())), 'message' : mesg, 'error_flag' : ier, } statdict['Chi2_reduced'] = statdict['Chi2'] / statdict['DoF'] statdict['Covariance'] = cov * statdict['Chi2_reduced'] par, statdict['Covariance'] = resubstitute_fixedparams(par, params_init_orig, statdict['Covariance']) # calculate the estimated errors of the fit parameters dpar = np.sqrt(statdict['Covariance'].diagonal()) # Pearson's correlation coefficients (usually 'r') in a matrix. statdict['Correlation_coeffs'] = statdict['Covariance'] / np.outer(dpar, dpar) if verbose: print("nlsq_fit: returning with results.") print("nlsq_fit: total time: %.2f sec." % (time.monotonic() - t0)) return par, dpar, statdict
[ "def", "nlsq_fit", "(", "x", ",", "y", ",", "dy", ",", "func", ",", "params_init", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "verbose", ":", "t0", "=", "time", ".", "monotonic", "(", ")", "print", "(", "\"nlsq_fit starting.\"", ")", "else", ":", "t0", "=", "0", "func_orig", "=", "func", "params_init_orig", "=", "params_init", "func", ",", "params_init", "=", "hide_fixedparams", "(", "func_orig", ",", "params_init_orig", ")", "if", "(", "dy", "is", "None", ")", "or", "(", "dy", "==", "np", ".", "nan", ")", ".", "sum", "(", ")", ">", "0", "or", "(", "dy", "<=", "0", ")", ".", "sum", "(", ")", ">", "0", ":", "if", "verbose", ":", "print", "(", "\"nlsq_fit: no weighting\"", ")", "dy", "=", "None", "def", "objectivefunc", "(", "params", ",", "x", ",", "y", ",", "dy", ")", ":", "\"\"\"The target function for leastsq().\"\"\"", "if", "dy", "is", "None", ":", "return", "(", "func", "(", "x", ",", "*", "(", "params", ".", "tolist", "(", ")", ")", ")", "-", "y", ")", "else", ":", "return", "(", "func", "(", "x", ",", "*", "(", "params", ".", "tolist", "(", ")", ")", ")", "-", "y", ")", "/", "dy", "# do the fitting", "if", "verbose", ":", "print", "(", "\"nlsq_fit: now doing the fitting...\"", ")", "t1", "=", "time", ".", "monotonic", "(", ")", "else", ":", "t1", "=", "0", "par", ",", "cov", ",", "infodict", ",", "mesg", ",", "ier", "=", "leastsq", "(", "objectivefunc", ",", "np", ".", "array", "(", "params_init", ")", ",", "(", "x", ",", "y", ",", "dy", ")", ",", "full_output", "=", "True", ",", "*", "*", "kwargs", ")", "if", "verbose", ":", "print", "(", "\"nlsq_fit: fitting done in %.2f seconds.\"", "%", "(", "time", ".", "monotonic", "(", ")", "-", "t1", ")", ")", "print", "(", "\"nlsq_fit: status from scipy.optimize.leastsq(): %d (%s)\"", "%", "(", "ier", ",", "mesg", ")", ")", "print", "(", "\"nlsq_fit: extracting statistics.\"", ")", "# test if the covariance was singular (cov is None)", "if", "cov", "is", "None", ":", "cov", "=", "np", ".", "ones", "(", "(", "len", "(", "par", ")", ",", "len", "(", "par", ")", ")", ")", "*", "np", ".", "nan", "# set it to a NaN matrix", "# calculate the Pearson's R^2 parameter (coefficient of determination)", "if", "dy", "is", "None", ":", "sserr", "=", "np", ".", "sum", "(", "(", "(", "func", "(", "x", ",", "*", "(", "par", ".", "tolist", "(", ")", ")", ")", "-", "y", ")", ")", "**", "2", ")", "sstot", "=", "np", ".", "sum", "(", "(", "y", "-", "np", ".", "mean", "(", "y", ")", ")", "**", "2", ")", "else", ":", "sserr", "=", "np", ".", "sum", "(", "(", "(", "func", "(", "x", ",", "*", "(", "par", ".", "tolist", "(", ")", ")", ")", "-", "y", ")", "/", "dy", ")", "**", "2", ")", "sstot", "=", "np", ".", "sum", "(", "(", "y", "-", "np", ".", "mean", "(", "y", ")", ")", "**", "2", "/", "dy", "**", "2", ")", "r2", "=", "1", "-", "sserr", "/", "sstot", "# assemble the statistics dictionary", "statdict", "=", "{", "'DoF'", ":", "len", "(", "x", ")", "-", "len", "(", "par", ")", ",", "# degrees of freedom", "'Chi2'", ":", "(", "infodict", "[", "'fvec'", "]", "**", "2", ")", ".", "sum", "(", ")", ",", "'R2'", ":", "r2", ",", "'num_func_eval'", ":", "infodict", "[", "'nfev'", "]", ",", "'func_value'", ":", "func", "(", "x", ",", "*", "(", "par", ".", "tolist", "(", ")", ")", ")", ",", "'message'", ":", "mesg", ",", "'error_flag'", ":", "ier", ",", "}", "statdict", "[", "'Chi2_reduced'", "]", "=", "statdict", "[", "'Chi2'", "]", "/", "statdict", "[", "'DoF'", "]", "statdict", "[", "'Covariance'", "]", "=", "cov", "*", "statdict", "[", "'Chi2_reduced'", "]", "par", ",", "statdict", "[", "'Covariance'", "]", "=", "resubstitute_fixedparams", "(", "par", ",", "params_init_orig", ",", "statdict", "[", "'Covariance'", "]", ")", "# calculate the estimated errors of the fit parameters", "dpar", "=", "np", ".", "sqrt", "(", "statdict", "[", "'Covariance'", "]", ".", "diagonal", "(", ")", ")", "# Pearson's correlation coefficients (usually 'r') in a matrix.", "statdict", "[", "'Correlation_coeffs'", "]", "=", "statdict", "[", "'Covariance'", "]", "/", "np", ".", "outer", "(", "dpar", ",", "dpar", ")", "if", "verbose", ":", "print", "(", "\"nlsq_fit: returning with results.\"", ")", "print", "(", "\"nlsq_fit: total time: %.2f sec.\"", "%", "(", "time", ".", "monotonic", "(", ")", "-", "t0", ")", ")", "return", "par", ",", "dpar", ",", "statdict" ]
Perform a non-linear least squares fit Inputs: x: one-dimensional numpy array of the independent variable y: one-dimensional numpy array of the dependent variable dy: absolute error (square root of the variance) of the dependent variable. Either a one-dimensional numpy array or None. In the array case, if any of its elements is NaN, the whole array is treated as NaN (= no weighting) func: a callable with the signature func(x,par1,par2,par3,...) params_init: list or tuple of the first estimates of the parameters par1, par2, par3 etc. to be fitted `verbose`: if various messages useful for debugging should be printed on stdout. other optional keyword arguments will be passed to leastsq(). Outputs: p, dp, statdict where p: list of fitted values of par1, par2 etc. dp: list of estimated errors statdict: dictionary of various statistical parameters: 'DoF': Degrees of freedom 'Chi2': Chi-squared 'Chi2_reduced': Reduced Chi-squared 'R2': Coefficient of determination 'num_func_eval': number of function evaluations during fit. 'func_value': the function evaluated in the best fitting parameters 'message': status message from leastsq() 'error_flag': integer status flag from leastsq() ('ier') 'Covariance': covariance matrix (variances in the diagonal) 'Correlation_coeffs': Pearson's correlation coefficients (usually denoted by 'r') in a matrix. The diagonal is unity. Notes: for the actual fitting, scipy.optimize.leastsq() is used.
[ "Perform", "a", "non", "-", "linear", "least", "squares", "fit" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/easylsq.py#L217-L319
awacha/sastool
sastool/misc/easylsq.py
simultaneous_nlsq_fit
def simultaneous_nlsq_fit(xs, ys, dys, func, params_inits, verbose=False, **kwargs): """Do a simultaneous nonlinear least-squares fit Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `p`: tuple of a list of fitted parameters `dp`: tuple of a list of errors of the fitted parameters `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit` except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve. """ if not isinstance(xs, collections.Sequence) or \ not isinstance(ys, collections.Sequence) or \ not isinstance(dys, collections.Sequence) or \ not isinstance(params_inits, collections.Sequence): raise ValueError('Parameters `xs`, `ys`, `dys` and `params_inits` should be tuples or lists.') Ndata = len(xs) if len(ys) != Ndata or len(dys) != Ndata or len(params_inits) != Ndata: raise ValueError('Parameters `xs`, `ys`, `dys` and `params_inits` should have the same length.') if not all([isinstance(x, collections.Sequence) for x in params_inits]): raise ValueError('Elements of `params_inits` should be tuples or Python lists.') Ns = set([len(x) for x in params_inits]) if len(Ns) != 1: raise ValueError('Elements of `params_inits` should have the same length.') Npar = Ns.pop() for i in range(Ndata): if dys[i] is None: dys[i] = np.ones(len(xs[i]), np.double) * np.nan # concatenate the x, y and dy vectors xcat = np.concatenate(xs) ycat = np.concatenate(ys) dycat = np.concatenate(dys) # find the start and end indices for each dataset in the concatenated datasets. lens = [len(x) for x in xs] starts = [int(sum(lens[:i])) for i in range(len(lens))] ends = [int(sum(lens[:i + 1])) for i in range(len(lens))] # flatten the initial parameter list. A single list is needed, where the # constrained parameters occur only once. Of course, we have to do some # bookkeeping to be able to find the needed parameters for each sub-range # later during the fit. paramcat = [] # this will be the concatenated list of parameters param_indices = [] # this will have the same structure as params_inits (i.e. # a tuple of tuples of ints). Each tuple corresponds to a dataset. # Each integer number in each tuple holds # the index of the corresponding fit parameter in the # concatenated parameter list. for j in range(Ndata): # for each dataset param_indices.append([]) jorig = j for i in range(Npar): j = jorig while params_inits[j][i] is None and (j >= 0): j = j - 1 if j < 0: raise ValueError('None of the parameters in the very first dataset should be `None`.') if jorig == j: # not constrained parameter paramcat.append(params_inits[j][i]) param_indices[jorig].append(len(paramcat) - 1) else: param_indices[jorig].append(param_indices[j][i]) if verbose: print("Number of datasets for simultaneous fitting:", Ndata) print("Total number of data points:", len(xcat)) print("Number of parameters in each dataset:", Npar) print("Total number of parameters:", Ndata * Npar) print("Number of independent parameters:", len(paramcat)) # the flattened function def func_flat(x, *params): y = [] for j in range(Ndata): if verbose > 1: print("Simultaneous fitting: evaluating function for dataset #", j, "/", Ndata) pars = [params[i] for i in param_indices[j]] y.append(func(x[starts[j]:ends[j]], *pars)) return np.concatenate(tuple(y)) # Now we reduced the problem to a single least-squares fit. Carry it out and # interpret the results. pflat, dpflat, statdictflat = nlsq_fit(xcat, ycat, dycat, func_flat, paramcat, verbose, **kwargs) for n in ['func_value', 'R2', 'Chi2', 'Chi2_reduced', 'DoF', 'Covariance', 'Correlation_coeffs']: statdictflat[n + '_global'] = statdictflat[n] statdictflat[n] = [] p = [] dp = [] for j in range(Ndata): # unpack the results p.append([pflat[i] for i in param_indices[j]]) dp.append([dpflat[i] for i in param_indices[j]]) statdictflat['func_value'].append(statdictflat['func_value_global'][starts[j]:ends[j]]) if np.isfinite(dys[j]).all(): statdictflat['Chi2'].append((((statdictflat['func_value'][-1] - ys[j]) / dys[j]) ** 2).sum()) sstot = np.sum((ys[j] - np.mean(ys[j])) ** 2 / dys[j] ** 2) else: statdictflat['Chi2'].append(((statdictflat['func_value'][-1] - ys[j]) ** 2).sum()) sstot = np.sum((ys[j] - np.mean(ys[j])) ** 2) sserr = statdictflat['Chi2'][-1] statdictflat['R2'].append(1 - sserr / sstot) statdictflat['DoF'].append(len(xs[j] - len(p[-1]))) statdictflat['Covariance'].append(slice_covarmatrix(statdictflat['Covariance_global'], param_indices[j])) statdictflat['Correlation_coeffs'].append(slice_covarmatrix(statdictflat['Correlation_coeffs_global'], param_indices[j])) statdictflat['Chi2_reduced'].append(statdictflat['Chi2'][-1] / statdictflat['DoF'][-1]) return p, dp, statdictflat
python
def simultaneous_nlsq_fit(xs, ys, dys, func, params_inits, verbose=False, **kwargs): """Do a simultaneous nonlinear least-squares fit Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `p`: tuple of a list of fitted parameters `dp`: tuple of a list of errors of the fitted parameters `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit` except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve. """ if not isinstance(xs, collections.Sequence) or \ not isinstance(ys, collections.Sequence) or \ not isinstance(dys, collections.Sequence) or \ not isinstance(params_inits, collections.Sequence): raise ValueError('Parameters `xs`, `ys`, `dys` and `params_inits` should be tuples or lists.') Ndata = len(xs) if len(ys) != Ndata or len(dys) != Ndata or len(params_inits) != Ndata: raise ValueError('Parameters `xs`, `ys`, `dys` and `params_inits` should have the same length.') if not all([isinstance(x, collections.Sequence) for x in params_inits]): raise ValueError('Elements of `params_inits` should be tuples or Python lists.') Ns = set([len(x) for x in params_inits]) if len(Ns) != 1: raise ValueError('Elements of `params_inits` should have the same length.') Npar = Ns.pop() for i in range(Ndata): if dys[i] is None: dys[i] = np.ones(len(xs[i]), np.double) * np.nan # concatenate the x, y and dy vectors xcat = np.concatenate(xs) ycat = np.concatenate(ys) dycat = np.concatenate(dys) # find the start and end indices for each dataset in the concatenated datasets. lens = [len(x) for x in xs] starts = [int(sum(lens[:i])) for i in range(len(lens))] ends = [int(sum(lens[:i + 1])) for i in range(len(lens))] # flatten the initial parameter list. A single list is needed, where the # constrained parameters occur only once. Of course, we have to do some # bookkeeping to be able to find the needed parameters for each sub-range # later during the fit. paramcat = [] # this will be the concatenated list of parameters param_indices = [] # this will have the same structure as params_inits (i.e. # a tuple of tuples of ints). Each tuple corresponds to a dataset. # Each integer number in each tuple holds # the index of the corresponding fit parameter in the # concatenated parameter list. for j in range(Ndata): # for each dataset param_indices.append([]) jorig = j for i in range(Npar): j = jorig while params_inits[j][i] is None and (j >= 0): j = j - 1 if j < 0: raise ValueError('None of the parameters in the very first dataset should be `None`.') if jorig == j: # not constrained parameter paramcat.append(params_inits[j][i]) param_indices[jorig].append(len(paramcat) - 1) else: param_indices[jorig].append(param_indices[j][i]) if verbose: print("Number of datasets for simultaneous fitting:", Ndata) print("Total number of data points:", len(xcat)) print("Number of parameters in each dataset:", Npar) print("Total number of parameters:", Ndata * Npar) print("Number of independent parameters:", len(paramcat)) # the flattened function def func_flat(x, *params): y = [] for j in range(Ndata): if verbose > 1: print("Simultaneous fitting: evaluating function for dataset #", j, "/", Ndata) pars = [params[i] for i in param_indices[j]] y.append(func(x[starts[j]:ends[j]], *pars)) return np.concatenate(tuple(y)) # Now we reduced the problem to a single least-squares fit. Carry it out and # interpret the results. pflat, dpflat, statdictflat = nlsq_fit(xcat, ycat, dycat, func_flat, paramcat, verbose, **kwargs) for n in ['func_value', 'R2', 'Chi2', 'Chi2_reduced', 'DoF', 'Covariance', 'Correlation_coeffs']: statdictflat[n + '_global'] = statdictflat[n] statdictflat[n] = [] p = [] dp = [] for j in range(Ndata): # unpack the results p.append([pflat[i] for i in param_indices[j]]) dp.append([dpflat[i] for i in param_indices[j]]) statdictflat['func_value'].append(statdictflat['func_value_global'][starts[j]:ends[j]]) if np.isfinite(dys[j]).all(): statdictflat['Chi2'].append((((statdictflat['func_value'][-1] - ys[j]) / dys[j]) ** 2).sum()) sstot = np.sum((ys[j] - np.mean(ys[j])) ** 2 / dys[j] ** 2) else: statdictflat['Chi2'].append(((statdictflat['func_value'][-1] - ys[j]) ** 2).sum()) sstot = np.sum((ys[j] - np.mean(ys[j])) ** 2) sserr = statdictflat['Chi2'][-1] statdictflat['R2'].append(1 - sserr / sstot) statdictflat['DoF'].append(len(xs[j] - len(p[-1]))) statdictflat['Covariance'].append(slice_covarmatrix(statdictflat['Covariance_global'], param_indices[j])) statdictflat['Correlation_coeffs'].append(slice_covarmatrix(statdictflat['Correlation_coeffs_global'], param_indices[j])) statdictflat['Chi2_reduced'].append(statdictflat['Chi2'][-1] / statdictflat['DoF'][-1]) return p, dp, statdictflat
[ "def", "simultaneous_nlsq_fit", "(", "xs", ",", "ys", ",", "dys", ",", "func", ",", "params_inits", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "xs", ",", "collections", ".", "Sequence", ")", "or", "not", "isinstance", "(", "ys", ",", "collections", ".", "Sequence", ")", "or", "not", "isinstance", "(", "dys", ",", "collections", ".", "Sequence", ")", "or", "not", "isinstance", "(", "params_inits", ",", "collections", ".", "Sequence", ")", ":", "raise", "ValueError", "(", "'Parameters `xs`, `ys`, `dys` and `params_inits` should be tuples or lists.'", ")", "Ndata", "=", "len", "(", "xs", ")", "if", "len", "(", "ys", ")", "!=", "Ndata", "or", "len", "(", "dys", ")", "!=", "Ndata", "or", "len", "(", "params_inits", ")", "!=", "Ndata", ":", "raise", "ValueError", "(", "'Parameters `xs`, `ys`, `dys` and `params_inits` should have the same length.'", ")", "if", "not", "all", "(", "[", "isinstance", "(", "x", ",", "collections", ".", "Sequence", ")", "for", "x", "in", "params_inits", "]", ")", ":", "raise", "ValueError", "(", "'Elements of `params_inits` should be tuples or Python lists.'", ")", "Ns", "=", "set", "(", "[", "len", "(", "x", ")", "for", "x", "in", "params_inits", "]", ")", "if", "len", "(", "Ns", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Elements of `params_inits` should have the same length.'", ")", "Npar", "=", "Ns", ".", "pop", "(", ")", "for", "i", "in", "range", "(", "Ndata", ")", ":", "if", "dys", "[", "i", "]", "is", "None", ":", "dys", "[", "i", "]", "=", "np", ".", "ones", "(", "len", "(", "xs", "[", "i", "]", ")", ",", "np", ".", "double", ")", "*", "np", ".", "nan", "# concatenate the x, y and dy vectors", "xcat", "=", "np", ".", "concatenate", "(", "xs", ")", "ycat", "=", "np", ".", "concatenate", "(", "ys", ")", "dycat", "=", "np", ".", "concatenate", "(", "dys", ")", "# find the start and end indices for each dataset in the concatenated datasets.", "lens", "=", "[", "len", "(", "x", ")", "for", "x", "in", "xs", "]", "starts", "=", "[", "int", "(", "sum", "(", "lens", "[", ":", "i", "]", ")", ")", "for", "i", "in", "range", "(", "len", "(", "lens", ")", ")", "]", "ends", "=", "[", "int", "(", "sum", "(", "lens", "[", ":", "i", "+", "1", "]", ")", ")", "for", "i", "in", "range", "(", "len", "(", "lens", ")", ")", "]", "# flatten the initial parameter list. A single list is needed, where the", "# constrained parameters occur only once. Of course, we have to do some", "# bookkeeping to be able to find the needed parameters for each sub-range", "# later during the fit.", "paramcat", "=", "[", "]", "# this will be the concatenated list of parameters", "param_indices", "=", "[", "]", "# this will have the same structure as params_inits (i.e.", "# a tuple of tuples of ints). Each tuple corresponds to a dataset.", "# Each integer number in each tuple holds", "# the index of the corresponding fit parameter in the ", "# concatenated parameter list.", "for", "j", "in", "range", "(", "Ndata", ")", ":", "# for each dataset", "param_indices", ".", "append", "(", "[", "]", ")", "jorig", "=", "j", "for", "i", "in", "range", "(", "Npar", ")", ":", "j", "=", "jorig", "while", "params_inits", "[", "j", "]", "[", "i", "]", "is", "None", "and", "(", "j", ">=", "0", ")", ":", "j", "=", "j", "-", "1", "if", "j", "<", "0", ":", "raise", "ValueError", "(", "'None of the parameters in the very first dataset should be `None`.'", ")", "if", "jorig", "==", "j", ":", "# not constrained parameter", "paramcat", ".", "append", "(", "params_inits", "[", "j", "]", "[", "i", "]", ")", "param_indices", "[", "jorig", "]", ".", "append", "(", "len", "(", "paramcat", ")", "-", "1", ")", "else", ":", "param_indices", "[", "jorig", "]", ".", "append", "(", "param_indices", "[", "j", "]", "[", "i", "]", ")", "if", "verbose", ":", "print", "(", "\"Number of datasets for simultaneous fitting:\"", ",", "Ndata", ")", "print", "(", "\"Total number of data points:\"", ",", "len", "(", "xcat", ")", ")", "print", "(", "\"Number of parameters in each dataset:\"", ",", "Npar", ")", "print", "(", "\"Total number of parameters:\"", ",", "Ndata", "*", "Npar", ")", "print", "(", "\"Number of independent parameters:\"", ",", "len", "(", "paramcat", ")", ")", "# the flattened function", "def", "func_flat", "(", "x", ",", "*", "params", ")", ":", "y", "=", "[", "]", "for", "j", "in", "range", "(", "Ndata", ")", ":", "if", "verbose", ">", "1", ":", "print", "(", "\"Simultaneous fitting: evaluating function for dataset #\"", ",", "j", ",", "\"/\"", ",", "Ndata", ")", "pars", "=", "[", "params", "[", "i", "]", "for", "i", "in", "param_indices", "[", "j", "]", "]", "y", ".", "append", "(", "func", "(", "x", "[", "starts", "[", "j", "]", ":", "ends", "[", "j", "]", "]", ",", "*", "pars", ")", ")", "return", "np", ".", "concatenate", "(", "tuple", "(", "y", ")", ")", "# Now we reduced the problem to a single least-squares fit. Carry it out and", "# interpret the results.", "pflat", ",", "dpflat", ",", "statdictflat", "=", "nlsq_fit", "(", "xcat", ",", "ycat", ",", "dycat", ",", "func_flat", ",", "paramcat", ",", "verbose", ",", "*", "*", "kwargs", ")", "for", "n", "in", "[", "'func_value'", ",", "'R2'", ",", "'Chi2'", ",", "'Chi2_reduced'", ",", "'DoF'", ",", "'Covariance'", ",", "'Correlation_coeffs'", "]", ":", "statdictflat", "[", "n", "+", "'_global'", "]", "=", "statdictflat", "[", "n", "]", "statdictflat", "[", "n", "]", "=", "[", "]", "p", "=", "[", "]", "dp", "=", "[", "]", "for", "j", "in", "range", "(", "Ndata", ")", ":", "# unpack the results", "p", ".", "append", "(", "[", "pflat", "[", "i", "]", "for", "i", "in", "param_indices", "[", "j", "]", "]", ")", "dp", ".", "append", "(", "[", "dpflat", "[", "i", "]", "for", "i", "in", "param_indices", "[", "j", "]", "]", ")", "statdictflat", "[", "'func_value'", "]", ".", "append", "(", "statdictflat", "[", "'func_value_global'", "]", "[", "starts", "[", "j", "]", ":", "ends", "[", "j", "]", "]", ")", "if", "np", ".", "isfinite", "(", "dys", "[", "j", "]", ")", ".", "all", "(", ")", ":", "statdictflat", "[", "'Chi2'", "]", ".", "append", "(", "(", "(", "(", "statdictflat", "[", "'func_value'", "]", "[", "-", "1", "]", "-", "ys", "[", "j", "]", ")", "/", "dys", "[", "j", "]", ")", "**", "2", ")", ".", "sum", "(", ")", ")", "sstot", "=", "np", ".", "sum", "(", "(", "ys", "[", "j", "]", "-", "np", ".", "mean", "(", "ys", "[", "j", "]", ")", ")", "**", "2", "/", "dys", "[", "j", "]", "**", "2", ")", "else", ":", "statdictflat", "[", "'Chi2'", "]", ".", "append", "(", "(", "(", "statdictflat", "[", "'func_value'", "]", "[", "-", "1", "]", "-", "ys", "[", "j", "]", ")", "**", "2", ")", ".", "sum", "(", ")", ")", "sstot", "=", "np", ".", "sum", "(", "(", "ys", "[", "j", "]", "-", "np", ".", "mean", "(", "ys", "[", "j", "]", ")", ")", "**", "2", ")", "sserr", "=", "statdictflat", "[", "'Chi2'", "]", "[", "-", "1", "]", "statdictflat", "[", "'R2'", "]", ".", "append", "(", "1", "-", "sserr", "/", "sstot", ")", "statdictflat", "[", "'DoF'", "]", ".", "append", "(", "len", "(", "xs", "[", "j", "]", "-", "len", "(", "p", "[", "-", "1", "]", ")", ")", ")", "statdictflat", "[", "'Covariance'", "]", ".", "append", "(", "slice_covarmatrix", "(", "statdictflat", "[", "'Covariance_global'", "]", ",", "param_indices", "[", "j", "]", ")", ")", "statdictflat", "[", "'Correlation_coeffs'", "]", ".", "append", "(", "slice_covarmatrix", "(", "statdictflat", "[", "'Correlation_coeffs_global'", "]", ",", "param_indices", "[", "j", "]", ")", ")", "statdictflat", "[", "'Chi2_reduced'", "]", ".", "append", "(", "statdictflat", "[", "'Chi2'", "]", "[", "-", "1", "]", "/", "statdictflat", "[", "'DoF'", "]", "[", "-", "1", "]", ")", "return", "p", ",", "dp", ",", "statdictflat" ]
Do a simultaneous nonlinear least-squares fit Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `p`: tuple of a list of fitted parameters `dp`: tuple of a list of errors of the fitted parameters `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit` except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve.
[ "Do", "a", "simultaneous", "nonlinear", "least", "-", "squares", "fit" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/easylsq.py#L329-L449
awacha/sastool
sastool/misc/errorvalue.py
ErrorValue.tostring
def tostring(self: 'ErrorValue', extra_digits: int = 0, plusminus: str = ' +/- ', fmt: str = None) -> str: """Make a string representation of the value and its uncertainty. Inputs: ------- ``extra_digits``: integer how many extra digits should be shown (plus or minus, zero means that the number of digits should be defined by the magnitude of the uncertainty). ``plusminus``: string the character sequence to be inserted in place of '+/-' including delimiting whitespace. ``fmt``: string or None how to format the output. Currently only strings ending in 'tex' are supported, which render ascii-exponentials (i.e. 3.1415e-2) into a format which is more appropriate to TeX. Outputs: -------- the string representation. """ if isinstance(fmt, str) and fmt.lower().endswith('tex'): return re.subn('(\d*)(\.(\d)*)?[eE]([+-]?\d+)', lambda m: (r'$%s%s\cdot 10^{%s}$' % (m.group(1), m.group(2), m.group(4))).replace('None', ''), self.tostring(extra_digits=extra_digits, plusminus=plusminus, fmt=None))[0] if isinstance(self.val, numbers.Real): try: Ndigits = -int(math.floor(math.log10(self.err))) + extra_digits except (OverflowError, ValueError): return str(self.val) + plusminus + str(self.err) else: return str(round(self.val, Ndigits)) + plusminus + str(round(self.err, Ndigits)) return str(self.val) + ' +/- ' + str(self.err)
python
def tostring(self: 'ErrorValue', extra_digits: int = 0, plusminus: str = ' +/- ', fmt: str = None) -> str: """Make a string representation of the value and its uncertainty. Inputs: ------- ``extra_digits``: integer how many extra digits should be shown (plus or minus, zero means that the number of digits should be defined by the magnitude of the uncertainty). ``plusminus``: string the character sequence to be inserted in place of '+/-' including delimiting whitespace. ``fmt``: string or None how to format the output. Currently only strings ending in 'tex' are supported, which render ascii-exponentials (i.e. 3.1415e-2) into a format which is more appropriate to TeX. Outputs: -------- the string representation. """ if isinstance(fmt, str) and fmt.lower().endswith('tex'): return re.subn('(\d*)(\.(\d)*)?[eE]([+-]?\d+)', lambda m: (r'$%s%s\cdot 10^{%s}$' % (m.group(1), m.group(2), m.group(4))).replace('None', ''), self.tostring(extra_digits=extra_digits, plusminus=plusminus, fmt=None))[0] if isinstance(self.val, numbers.Real): try: Ndigits = -int(math.floor(math.log10(self.err))) + extra_digits except (OverflowError, ValueError): return str(self.val) + plusminus + str(self.err) else: return str(round(self.val, Ndigits)) + plusminus + str(round(self.err, Ndigits)) return str(self.val) + ' +/- ' + str(self.err)
[ "def", "tostring", "(", "self", ":", "'ErrorValue'", ",", "extra_digits", ":", "int", "=", "0", ",", "plusminus", ":", "str", "=", "' +/- '", ",", "fmt", ":", "str", "=", "None", ")", "->", "str", ":", "if", "isinstance", "(", "fmt", ",", "str", ")", "and", "fmt", ".", "lower", "(", ")", ".", "endswith", "(", "'tex'", ")", ":", "return", "re", ".", "subn", "(", "'(\\d*)(\\.(\\d)*)?[eE]([+-]?\\d+)'", ",", "lambda", "m", ":", "(", "r'$%s%s\\cdot 10^{%s}$'", "%", "(", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", ",", "m", ".", "group", "(", "4", ")", ")", ")", ".", "replace", "(", "'None'", ",", "''", ")", ",", "self", ".", "tostring", "(", "extra_digits", "=", "extra_digits", ",", "plusminus", "=", "plusminus", ",", "fmt", "=", "None", ")", ")", "[", "0", "]", "if", "isinstance", "(", "self", ".", "val", ",", "numbers", ".", "Real", ")", ":", "try", ":", "Ndigits", "=", "-", "int", "(", "math", ".", "floor", "(", "math", ".", "log10", "(", "self", ".", "err", ")", ")", ")", "+", "extra_digits", "except", "(", "OverflowError", ",", "ValueError", ")", ":", "return", "str", "(", "self", ".", "val", ")", "+", "plusminus", "+", "str", "(", "self", ".", "err", ")", "else", ":", "return", "str", "(", "round", "(", "self", ".", "val", ",", "Ndigits", ")", ")", "+", "plusminus", "+", "str", "(", "round", "(", "self", ".", "err", ",", "Ndigits", ")", ")", "return", "str", "(", "self", ".", "val", ")", "+", "' +/- '", "+", "str", "(", "self", ".", "err", ")" ]
Make a string representation of the value and its uncertainty. Inputs: ------- ``extra_digits``: integer how many extra digits should be shown (plus or minus, zero means that the number of digits should be defined by the magnitude of the uncertainty). ``plusminus``: string the character sequence to be inserted in place of '+/-' including delimiting whitespace. ``fmt``: string or None how to format the output. Currently only strings ending in 'tex' are supported, which render ascii-exponentials (i.e. 3.1415e-2) into a format which is more appropriate to TeX. Outputs: -------- the string representation.
[ "Make", "a", "string", "representation", "of", "the", "value", "and", "its", "uncertainty", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/errorvalue.py#L157-L190
awacha/sastool
sastool/misc/errorvalue.py
ErrorValue.random
def random(self: 'ErrorValue') -> np.ndarray: """Sample a random number (array) of the distribution defined by mean=`self.val` and variance=`self.err`^2. """ if isinstance(self.val, np.ndarray): # IGNORE:E1103 return np.random.randn(self.val.shape) * self.err + self.val else: return np.random.randn() * self.err + self.val
python
def random(self: 'ErrorValue') -> np.ndarray: """Sample a random number (array) of the distribution defined by mean=`self.val` and variance=`self.err`^2. """ if isinstance(self.val, np.ndarray): # IGNORE:E1103 return np.random.randn(self.val.shape) * self.err + self.val else: return np.random.randn() * self.err + self.val
[ "def", "random", "(", "self", ":", "'ErrorValue'", ")", "->", "np", ".", "ndarray", ":", "if", "isinstance", "(", "self", ".", "val", ",", "np", ".", "ndarray", ")", ":", "# IGNORE:E1103", "return", "np", ".", "random", ".", "randn", "(", "self", ".", "val", ".", "shape", ")", "*", "self", ".", "err", "+", "self", ".", "val", "else", ":", "return", "np", ".", "random", ".", "randn", "(", ")", "*", "self", ".", "err", "+", "self", ".", "val" ]
Sample a random number (array) of the distribution defined by mean=`self.val` and variance=`self.err`^2.
[ "Sample", "a", "random", "number", "(", "array", ")", "of", "the", "distribution", "defined", "by", "mean", "=", "self", ".", "val", "and", "variance", "=", "self", ".", "err", "^2", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/errorvalue.py#L240-L248
awacha/sastool
sastool/misc/errorvalue.py
ErrorValue.evalfunc
def evalfunc(cls, func, *args, **kwargs): """Evaluate a function with error propagation. Inputs: ------- ``func``: callable this is the function to be evaluated. Should return either a number or a np.ndarray. ``*args``: other positional arguments of func. Arguments which are not instances of `ErrorValue` are taken as constants. keyword arguments supported: ``NMC``: number of Monte-Carlo steps. If not defined, defaults to 1000 ``exceptions_to_retry``: list of exception types to ignore: if one of these is raised the given MC step is repeated once again. Notice that this might induce an infinite loop! The exception types in this list should be subclasses of ``Exception``. ``exceptions_to_skip``: list of exception types to skip: if one of these is raised the given MC step is skipped, never to be repeated. The exception types in this list should be subclasses of ``Exception``. Output: ------- ``result``: an `ErrorValue` with the result. The error is estimated via a Monte-Carlo approach to Gaussian error propagation. """ def do_random(x): if isinstance(x, cls): return x.random() else: return x if 'NMC' not in kwargs: kwargs['NMC'] = 1000 if 'exceptions_to_skip' not in kwargs: kwargs['exceptions_to_skip'] = [] if 'exceptions_to_repeat' not in kwargs: kwargs['exceptions_to_repeat'] = [] meanvalue = func(*args) # this way we get either a number or a np.array stdcollector = meanvalue * 0 mciters = 0 while mciters < kwargs['NMC']: try: # IGNORE:W0142 stdcollector += (func(*[do_random(a) for a in args]) - meanvalue) ** 2 mciters += 1 except Exception as e: # IGNORE:W0703 if any(isinstance(e, etype) for etype in kwargs['exceptions_to_skip']): kwargs['NMC'] -= 1 elif any(isinstance(e, etype) for etype in kwargs['exceptions_to_repeat']): pass else: raise return cls(meanvalue, stdcollector ** 0.5 / (kwargs['NMC'] - 1))
python
def evalfunc(cls, func, *args, **kwargs): """Evaluate a function with error propagation. Inputs: ------- ``func``: callable this is the function to be evaluated. Should return either a number or a np.ndarray. ``*args``: other positional arguments of func. Arguments which are not instances of `ErrorValue` are taken as constants. keyword arguments supported: ``NMC``: number of Monte-Carlo steps. If not defined, defaults to 1000 ``exceptions_to_retry``: list of exception types to ignore: if one of these is raised the given MC step is repeated once again. Notice that this might induce an infinite loop! The exception types in this list should be subclasses of ``Exception``. ``exceptions_to_skip``: list of exception types to skip: if one of these is raised the given MC step is skipped, never to be repeated. The exception types in this list should be subclasses of ``Exception``. Output: ------- ``result``: an `ErrorValue` with the result. The error is estimated via a Monte-Carlo approach to Gaussian error propagation. """ def do_random(x): if isinstance(x, cls): return x.random() else: return x if 'NMC' not in kwargs: kwargs['NMC'] = 1000 if 'exceptions_to_skip' not in kwargs: kwargs['exceptions_to_skip'] = [] if 'exceptions_to_repeat' not in kwargs: kwargs['exceptions_to_repeat'] = [] meanvalue = func(*args) # this way we get either a number or a np.array stdcollector = meanvalue * 0 mciters = 0 while mciters < kwargs['NMC']: try: # IGNORE:W0142 stdcollector += (func(*[do_random(a) for a in args]) - meanvalue) ** 2 mciters += 1 except Exception as e: # IGNORE:W0703 if any(isinstance(e, etype) for etype in kwargs['exceptions_to_skip']): kwargs['NMC'] -= 1 elif any(isinstance(e, etype) for etype in kwargs['exceptions_to_repeat']): pass else: raise return cls(meanvalue, stdcollector ** 0.5 / (kwargs['NMC'] - 1))
[ "def", "evalfunc", "(", "cls", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "do_random", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "cls", ")", ":", "return", "x", ".", "random", "(", ")", "else", ":", "return", "x", "if", "'NMC'", "not", "in", "kwargs", ":", "kwargs", "[", "'NMC'", "]", "=", "1000", "if", "'exceptions_to_skip'", "not", "in", "kwargs", ":", "kwargs", "[", "'exceptions_to_skip'", "]", "=", "[", "]", "if", "'exceptions_to_repeat'", "not", "in", "kwargs", ":", "kwargs", "[", "'exceptions_to_repeat'", "]", "=", "[", "]", "meanvalue", "=", "func", "(", "*", "args", ")", "# this way we get either a number or a np.array", "stdcollector", "=", "meanvalue", "*", "0", "mciters", "=", "0", "while", "mciters", "<", "kwargs", "[", "'NMC'", "]", ":", "try", ":", "# IGNORE:W0142", "stdcollector", "+=", "(", "func", "(", "*", "[", "do_random", "(", "a", ")", "for", "a", "in", "args", "]", ")", "-", "meanvalue", ")", "**", "2", "mciters", "+=", "1", "except", "Exception", "as", "e", ":", "# IGNORE:W0703", "if", "any", "(", "isinstance", "(", "e", ",", "etype", ")", "for", "etype", "in", "kwargs", "[", "'exceptions_to_skip'", "]", ")", ":", "kwargs", "[", "'NMC'", "]", "-=", "1", "elif", "any", "(", "isinstance", "(", "e", ",", "etype", ")", "for", "etype", "in", "kwargs", "[", "'exceptions_to_repeat'", "]", ")", ":", "pass", "else", ":", "raise", "return", "cls", "(", "meanvalue", ",", "stdcollector", "**", "0.5", "/", "(", "kwargs", "[", "'NMC'", "]", "-", "1", ")", ")" ]
Evaluate a function with error propagation. Inputs: ------- ``func``: callable this is the function to be evaluated. Should return either a number or a np.ndarray. ``*args``: other positional arguments of func. Arguments which are not instances of `ErrorValue` are taken as constants. keyword arguments supported: ``NMC``: number of Monte-Carlo steps. If not defined, defaults to 1000 ``exceptions_to_retry``: list of exception types to ignore: if one of these is raised the given MC step is repeated once again. Notice that this might induce an infinite loop! The exception types in this list should be subclasses of ``Exception``. ``exceptions_to_skip``: list of exception types to skip: if one of these is raised the given MC step is skipped, never to be repeated. The exception types in this list should be subclasses of ``Exception``. Output: ------- ``result``: an `ErrorValue` with the result. The error is estimated via a Monte-Carlo approach to Gaussian error propagation.
[ "Evaluate", "a", "function", "with", "error", "propagation", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/errorvalue.py#L251-L311
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
Fsphere
def Fsphere(q, R): """Scattering form-factor amplitude of a sphere normalized to F(q=0)=V Inputs: ------- ``q``: independent variable ``R``: sphere radius Formula: -------- ``4*pi/q^3 * (sin(qR) - qR*cos(qR))`` """ return 4 * np.pi / q ** 3 * (np.sin(q * R) - q * R * np.cos(q * R))
python
def Fsphere(q, R): """Scattering form-factor amplitude of a sphere normalized to F(q=0)=V Inputs: ------- ``q``: independent variable ``R``: sphere radius Formula: -------- ``4*pi/q^3 * (sin(qR) - qR*cos(qR))`` """ return 4 * np.pi / q ** 3 * (np.sin(q * R) - q * R * np.cos(q * R))
[ "def", "Fsphere", "(", "q", ",", "R", ")", ":", "return", "4", "*", "np", ".", "pi", "/", "q", "**", "3", "*", "(", "np", ".", "sin", "(", "q", "*", "R", ")", "-", "q", "*", "R", "*", "np", ".", "cos", "(", "q", "*", "R", ")", ")" ]
Scattering form-factor amplitude of a sphere normalized to F(q=0)=V Inputs: ------- ``q``: independent variable ``R``: sphere radius Formula: -------- ``4*pi/q^3 * (sin(qR) - qR*cos(qR))``
[ "Scattering", "form", "-", "factor", "amplitude", "of", "a", "sphere", "normalized", "to", "F", "(", "q", "=", "0", ")", "=", "V" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L10-L22
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
GeneralGuinier
def GeneralGuinier(q, G, Rg, s): """Generalized Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor ``Rg``: radius of gyration ``s``: dimensionality parameter (can be 1, 2, 3) Formula: -------- ``G/q**(3-s)*exp(-(q^2*Rg^2)/s)`` """ return G / q ** (3 - s) * np.exp(-(q * Rg) ** 2 / s)
python
def GeneralGuinier(q, G, Rg, s): """Generalized Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor ``Rg``: radius of gyration ``s``: dimensionality parameter (can be 1, 2, 3) Formula: -------- ``G/q**(3-s)*exp(-(q^2*Rg^2)/s)`` """ return G / q ** (3 - s) * np.exp(-(q * Rg) ** 2 / s)
[ "def", "GeneralGuinier", "(", "q", ",", "G", ",", "Rg", ",", "s", ")", ":", "return", "G", "/", "q", "**", "(", "3", "-", "s", ")", "*", "np", ".", "exp", "(", "-", "(", "q", "*", "Rg", ")", "**", "2", "/", "s", ")" ]
Generalized Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor ``Rg``: radius of gyration ``s``: dimensionality parameter (can be 1, 2, 3) Formula: -------- ``G/q**(3-s)*exp(-(q^2*Rg^2)/s)``
[ "Generalized", "Guinier", "scattering" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L39-L53
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
GuinierPorod
def GuinierPorod(q, G, Rg, alpha): """Empirical Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor of the Guinier-branch ``Rg``: radius of gyration ``alpha``: power-law exponent Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``a`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return GuinierPorodMulti(q, G, Rg, alpha)
python
def GuinierPorod(q, G, Rg, alpha): """Empirical Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor of the Guinier-branch ``Rg``: radius of gyration ``alpha``: power-law exponent Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``a`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return GuinierPorodMulti(q, G, Rg, alpha)
[ "def", "GuinierPorod", "(", "q", ",", "G", ",", "Rg", ",", "alpha", ")", ":", "return", "GuinierPorodMulti", "(", "q", ",", "G", ",", "Rg", ",", "alpha", ")" ]
Empirical Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor of the Guinier-branch ``Rg``: radius of gyration ``alpha``: power-law exponent Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``a`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "Guinier", "-", "Porod", "scattering" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L86-L107
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
PorodGuinier
def PorodGuinier(q, a, alpha, Rg): """Empirical Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``a``: factor of the power-law branch ``alpha``: power-law exponent ``Rg``: radius of gyration Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``G`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return PorodGuinierMulti(q, a, alpha, Rg)
python
def PorodGuinier(q, a, alpha, Rg): """Empirical Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``a``: factor of the power-law branch ``alpha``: power-law exponent ``Rg``: radius of gyration Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``G`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return PorodGuinierMulti(q, a, alpha, Rg)
[ "def", "PorodGuinier", "(", "q", ",", "a", ",", "alpha", ",", "Rg", ")", ":", "return", "PorodGuinierMulti", "(", "q", ",", "a", ",", "alpha", ",", "Rg", ")" ]
Empirical Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``a``: factor of the power-law branch ``alpha``: power-law exponent ``Rg``: radius of gyration Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``G`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "Porod", "-", "Guinier", "scattering" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L109-L130
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
PorodGuinierPorod
def PorodGuinierPorod(q, a, alpha, Rg, beta): """Empirical Porod-Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``a``: factor of the first power-law branch ``alpha``: exponent of the first power-law branch ``Rg``: radius of gyration ``beta``: exponent of the second power-law branch Formula: -------- ``a*q^alpha`` if ``q<q_sep1``. ``G * exp(-q^2*Rg^2/3)`` if ``q_sep1<q<q_sep2`` and ``b*q^beta`` if ``q_sep2<q``. ``q_sep1``, ``q_sep2``, ``G`` and ``b`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return PorodGuinierMulti(q, a, alpha, Rg, beta)
python
def PorodGuinierPorod(q, a, alpha, Rg, beta): """Empirical Porod-Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``a``: factor of the first power-law branch ``alpha``: exponent of the first power-law branch ``Rg``: radius of gyration ``beta``: exponent of the second power-law branch Formula: -------- ``a*q^alpha`` if ``q<q_sep1``. ``G * exp(-q^2*Rg^2/3)`` if ``q_sep1<q<q_sep2`` and ``b*q^beta`` if ``q_sep2<q``. ``q_sep1``, ``q_sep2``, ``G`` and ``b`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return PorodGuinierMulti(q, a, alpha, Rg, beta)
[ "def", "PorodGuinierPorod", "(", "q", ",", "a", ",", "alpha", ",", "Rg", ",", "beta", ")", ":", "return", "PorodGuinierMulti", "(", "q", ",", "a", ",", "alpha", ",", "Rg", ",", "beta", ")" ]
Empirical Porod-Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``a``: factor of the first power-law branch ``alpha``: exponent of the first power-law branch ``Rg``: radius of gyration ``beta``: exponent of the second power-law branch Formula: -------- ``a*q^alpha`` if ``q<q_sep1``. ``G * exp(-q^2*Rg^2/3)`` if ``q_sep1<q<q_sep2`` and ``b*q^beta`` if ``q_sep2<q``. ``q_sep1``, ``q_sep2``, ``G`` and ``b`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "Porod", "-", "Guinier", "-", "Porod", "scattering" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L132-L155
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
GuinierPorodGuinier
def GuinierPorodGuinier(q, G, Rg1, alpha, Rg2): """Empirical Guinier-Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch ``Rg1``: the first radius of gyration ``alpha``: the power-law exponent ``Rg2``: the second radius of gyration Formula: -------- ``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``. ``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``. ``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``. The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return GuinierPorodMulti(q, G, Rg1, alpha, Rg2)
python
def GuinierPorodGuinier(q, G, Rg1, alpha, Rg2): """Empirical Guinier-Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch ``Rg1``: the first radius of gyration ``alpha``: the power-law exponent ``Rg2``: the second radius of gyration Formula: -------- ``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``. ``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``. ``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``. The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return GuinierPorodMulti(q, G, Rg1, alpha, Rg2)
[ "def", "GuinierPorodGuinier", "(", "q", ",", "G", ",", "Rg1", ",", "alpha", ",", "Rg2", ")", ":", "return", "GuinierPorodMulti", "(", "q", ",", "G", ",", "Rg1", ",", "alpha", ",", "Rg2", ")" ]
Empirical Guinier-Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch ``Rg1``: the first radius of gyration ``alpha``: the power-law exponent ``Rg2``: the second radius of gyration Formula: -------- ``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``. ``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``. ``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``. The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "Guinier", "-", "Porod", "-", "Guinier", "scattering" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L157-L182
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
DampedPowerlaw
def DampedPowerlaw(q, a, alpha, sigma): """Damped power-law Inputs: ------- ``q``: independent variable ``a``: factor ``alpha``: exponent ``sigma``: hwhm of the damping Gaussian Formula: -------- ``a*q^alpha*exp(-q^2/(2*sigma^2))`` """ return a * q ** alpha * np.exp(-q ** 2 / (2 * sigma ** 2))
python
def DampedPowerlaw(q, a, alpha, sigma): """Damped power-law Inputs: ------- ``q``: independent variable ``a``: factor ``alpha``: exponent ``sigma``: hwhm of the damping Gaussian Formula: -------- ``a*q^alpha*exp(-q^2/(2*sigma^2))`` """ return a * q ** alpha * np.exp(-q ** 2 / (2 * sigma ** 2))
[ "def", "DampedPowerlaw", "(", "q", ",", "a", ",", "alpha", ",", "sigma", ")", ":", "return", "a", "*", "q", "**", "alpha", "*", "np", ".", "exp", "(", "-", "q", "**", "2", "/", "(", "2", "*", "sigma", "**", "2", ")", ")" ]
Damped power-law Inputs: ------- ``q``: independent variable ``a``: factor ``alpha``: exponent ``sigma``: hwhm of the damping Gaussian Formula: -------- ``a*q^alpha*exp(-q^2/(2*sigma^2))``
[ "Damped", "power", "-", "law" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L185-L199
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
LogNormSpheres
def LogNormSpheres(q, A, mu, sigma, N=1000): """Scattering of a population of non-correlated spheres (radii from a log-normal distribution) Inputs: ------- ``q``: independent variable ``A``: scaling factor ``mu``: expectation of ``ln(R)`` ``sigma``: hwhm of ``ln(R)`` Non-fittable inputs: -------------------- ``N``: the (integer) number of spheres Formula: -------- The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a log-normal distribution of the radii. """ Rmin = 0 Rmax = np.exp(mu + 3 * sigma) R = np.linspace(Rmin, Rmax, N + 1)[1:] P = 1 / np.sqrt(2 * np.pi * sigma ** 2 * R ** 2) * np.exp(-(np.log(R) - mu) ** 2 / (2 * sigma ** 2)) def Fsphere_outer(q, R): qR = np.outer(q, R) q1 = np.outer(q, np.ones_like(R)) return 4 * np.pi / q1 ** 3 * (np.sin(qR) - qR * np.cos(qR)) I = (Fsphere_outer(q, R) ** 2 * np.outer(np.ones_like(q), P)) return A * I.sum(1) / P.sum()
python
def LogNormSpheres(q, A, mu, sigma, N=1000): """Scattering of a population of non-correlated spheres (radii from a log-normal distribution) Inputs: ------- ``q``: independent variable ``A``: scaling factor ``mu``: expectation of ``ln(R)`` ``sigma``: hwhm of ``ln(R)`` Non-fittable inputs: -------------------- ``N``: the (integer) number of spheres Formula: -------- The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a log-normal distribution of the radii. """ Rmin = 0 Rmax = np.exp(mu + 3 * sigma) R = np.linspace(Rmin, Rmax, N + 1)[1:] P = 1 / np.sqrt(2 * np.pi * sigma ** 2 * R ** 2) * np.exp(-(np.log(R) - mu) ** 2 / (2 * sigma ** 2)) def Fsphere_outer(q, R): qR = np.outer(q, R) q1 = np.outer(q, np.ones_like(R)) return 4 * np.pi / q1 ** 3 * (np.sin(qR) - qR * np.cos(qR)) I = (Fsphere_outer(q, R) ** 2 * np.outer(np.ones_like(q), P)) return A * I.sum(1) / P.sum()
[ "def", "LogNormSpheres", "(", "q", ",", "A", ",", "mu", ",", "sigma", ",", "N", "=", "1000", ")", ":", "Rmin", "=", "0", "Rmax", "=", "np", ".", "exp", "(", "mu", "+", "3", "*", "sigma", ")", "R", "=", "np", ".", "linspace", "(", "Rmin", ",", "Rmax", ",", "N", "+", "1", ")", "[", "1", ":", "]", "P", "=", "1", "/", "np", ".", "sqrt", "(", "2", "*", "np", ".", "pi", "*", "sigma", "**", "2", "*", "R", "**", "2", ")", "*", "np", ".", "exp", "(", "-", "(", "np", ".", "log", "(", "R", ")", "-", "mu", ")", "**", "2", "/", "(", "2", "*", "sigma", "**", "2", ")", ")", "def", "Fsphere_outer", "(", "q", ",", "R", ")", ":", "qR", "=", "np", ".", "outer", "(", "q", ",", "R", ")", "q1", "=", "np", ".", "outer", "(", "q", ",", "np", ".", "ones_like", "(", "R", ")", ")", "return", "4", "*", "np", ".", "pi", "/", "q1", "**", "3", "*", "(", "np", ".", "sin", "(", "qR", ")", "-", "qR", "*", "np", ".", "cos", "(", "qR", ")", ")", "I", "=", "(", "Fsphere_outer", "(", "q", ",", "R", ")", "**", "2", "*", "np", ".", "outer", "(", "np", ".", "ones_like", "(", "q", ")", ",", "P", ")", ")", "return", "A", "*", "I", ".", "sum", "(", "1", ")", "/", "P", ".", "sum", "(", ")" ]
Scattering of a population of non-correlated spheres (radii from a log-normal distribution) Inputs: ------- ``q``: independent variable ``A``: scaling factor ``mu``: expectation of ``ln(R)`` ``sigma``: hwhm of ``ln(R)`` Non-fittable inputs: -------------------- ``N``: the (integer) number of spheres Formula: -------- The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a log-normal distribution of the radii.
[ "Scattering", "of", "a", "population", "of", "non", "-", "correlated", "spheres", "(", "radii", "from", "a", "log", "-", "normal", "distribution", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L201-L230
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
GaussSpheres
def GaussSpheres(q, A, R0, sigma, N=1000, weighting='intensity'): """Scattering of a population of non-correlated spheres (radii from a gaussian distribution) Inputs: ------- ``q``: independent variable ``A``: scaling factor ``R0``: expectation of ``R`` ``sigma``: hwhm of ``R`` ``weighting``: 'intensity' (default), 'volume' or 'number' Non-fittable inputs: -------------------- ``N``: the (integer) number of spheres Formula: -------- The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a gaussian (normal) distribution of the radii. """ Rmin = max(0, R0 - 3 * sigma) Rmax = R0 + 3 * sigma R = np.linspace(Rmin, Rmax, N + 1)[1:] P = 1 / np.sqrt(2 * np.pi * sigma ** 2) * np.exp(-(R - R0) ** 2 / (2 * sigma ** 2)) def Fsphere_outer(q, R): qR = np.outer(q, R) return 3 / qR ** 3 * (np.sin(qR) - qR * np.cos(qR)) V=R**3*4*np.pi/3. if weighting=='intensity': P=P*V*V elif weighting=='volume': P=P*V elif weighting=='number': pass else: raise ValueError('Invalid weighting: '+str(weighting)) I = (Fsphere_outer(q, R) ** 2 * np.outer(np.ones_like(q), P)) return A * I.sum(1) / P.sum()
python
def GaussSpheres(q, A, R0, sigma, N=1000, weighting='intensity'): """Scattering of a population of non-correlated spheres (radii from a gaussian distribution) Inputs: ------- ``q``: independent variable ``A``: scaling factor ``R0``: expectation of ``R`` ``sigma``: hwhm of ``R`` ``weighting``: 'intensity' (default), 'volume' or 'number' Non-fittable inputs: -------------------- ``N``: the (integer) number of spheres Formula: -------- The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a gaussian (normal) distribution of the radii. """ Rmin = max(0, R0 - 3 * sigma) Rmax = R0 + 3 * sigma R = np.linspace(Rmin, Rmax, N + 1)[1:] P = 1 / np.sqrt(2 * np.pi * sigma ** 2) * np.exp(-(R - R0) ** 2 / (2 * sigma ** 2)) def Fsphere_outer(q, R): qR = np.outer(q, R) return 3 / qR ** 3 * (np.sin(qR) - qR * np.cos(qR)) V=R**3*4*np.pi/3. if weighting=='intensity': P=P*V*V elif weighting=='volume': P=P*V elif weighting=='number': pass else: raise ValueError('Invalid weighting: '+str(weighting)) I = (Fsphere_outer(q, R) ** 2 * np.outer(np.ones_like(q), P)) return A * I.sum(1) / P.sum()
[ "def", "GaussSpheres", "(", "q", ",", "A", ",", "R0", ",", "sigma", ",", "N", "=", "1000", ",", "weighting", "=", "'intensity'", ")", ":", "Rmin", "=", "max", "(", "0", ",", "R0", "-", "3", "*", "sigma", ")", "Rmax", "=", "R0", "+", "3", "*", "sigma", "R", "=", "np", ".", "linspace", "(", "Rmin", ",", "Rmax", ",", "N", "+", "1", ")", "[", "1", ":", "]", "P", "=", "1", "/", "np", ".", "sqrt", "(", "2", "*", "np", ".", "pi", "*", "sigma", "**", "2", ")", "*", "np", ".", "exp", "(", "-", "(", "R", "-", "R0", ")", "**", "2", "/", "(", "2", "*", "sigma", "**", "2", ")", ")", "def", "Fsphere_outer", "(", "q", ",", "R", ")", ":", "qR", "=", "np", ".", "outer", "(", "q", ",", "R", ")", "return", "3", "/", "qR", "**", "3", "*", "(", "np", ".", "sin", "(", "qR", ")", "-", "qR", "*", "np", ".", "cos", "(", "qR", ")", ")", "V", "=", "R", "**", "3", "*", "4", "*", "np", ".", "pi", "/", "3.", "if", "weighting", "==", "'intensity'", ":", "P", "=", "P", "*", "V", "*", "V", "elif", "weighting", "==", "'volume'", ":", "P", "=", "P", "*", "V", "elif", "weighting", "==", "'number'", ":", "pass", "else", ":", "raise", "ValueError", "(", "'Invalid weighting: '", "+", "str", "(", "weighting", ")", ")", "I", "=", "(", "Fsphere_outer", "(", "q", ",", "R", ")", "**", "2", "*", "np", ".", "outer", "(", "np", ".", "ones_like", "(", "q", ")", ",", "P", ")", ")", "return", "A", "*", "I", ".", "sum", "(", "1", ")", "/", "P", ".", "sum", "(", ")" ]
Scattering of a population of non-correlated spheres (radii from a gaussian distribution) Inputs: ------- ``q``: independent variable ``A``: scaling factor ``R0``: expectation of ``R`` ``sigma``: hwhm of ``R`` ``weighting``: 'intensity' (default), 'volume' or 'number' Non-fittable inputs: -------------------- ``N``: the (integer) number of spheres Formula: -------- The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a gaussian (normal) distribution of the radii.
[ "Scattering", "of", "a", "population", "of", "non", "-", "correlated", "spheres", "(", "radii", "from", "a", "gaussian", "distribution", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L232-L270
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
PowerlawGuinierPorodConst
def PowerlawGuinierPorodConst(q, A, alpha, G, Rg, beta, C): """Sum of a Power-law, a Guinier-Porod curve and a constant. Inputs: ------- ``q``: independent variable (momentum transfer) ``A``: scaling factor of the power-law ``alpha``: power-law exponent ``G``: scaling factor of the Guinier-Porod curve ``Rg``: Radius of gyration ``beta``: power-law exponent of the Guinier-Porod curve ``C``: additive constant Formula: -------- ``A*q^alpha + GuinierPorod(q,G,Rg,beta) + C`` """ return PowerlawPlusConstant(q, A, alpha, C) + GuinierPorod(q, G, Rg, beta)
python
def PowerlawGuinierPorodConst(q, A, alpha, G, Rg, beta, C): """Sum of a Power-law, a Guinier-Porod curve and a constant. Inputs: ------- ``q``: independent variable (momentum transfer) ``A``: scaling factor of the power-law ``alpha``: power-law exponent ``G``: scaling factor of the Guinier-Porod curve ``Rg``: Radius of gyration ``beta``: power-law exponent of the Guinier-Porod curve ``C``: additive constant Formula: -------- ``A*q^alpha + GuinierPorod(q,G,Rg,beta) + C`` """ return PowerlawPlusConstant(q, A, alpha, C) + GuinierPorod(q, G, Rg, beta)
[ "def", "PowerlawGuinierPorodConst", "(", "q", ",", "A", ",", "alpha", ",", "G", ",", "Rg", ",", "beta", ",", "C", ")", ":", "return", "PowerlawPlusConstant", "(", "q", ",", "A", ",", "alpha", ",", "C", ")", "+", "GuinierPorod", "(", "q", ",", "G", ",", "Rg", ",", "beta", ")" ]
Sum of a Power-law, a Guinier-Porod curve and a constant. Inputs: ------- ``q``: independent variable (momentum transfer) ``A``: scaling factor of the power-law ``alpha``: power-law exponent ``G``: scaling factor of the Guinier-Porod curve ``Rg``: Radius of gyration ``beta``: power-law exponent of the Guinier-Porod curve ``C``: additive constant Formula: -------- ``A*q^alpha + GuinierPorod(q,G,Rg,beta) + C``
[ "Sum", "of", "a", "Power", "-", "law", "a", "Guinier", "-", "Porod", "curve", "and", "a", "constant", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L273-L290
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
GuinierPorodMulti
def GuinierPorodMulti(q, G, *Rgsalphas): """Empirical multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch other arguments: [Rg1, alpha1, Rg2, alpha2, Rg3 ...] the radii of gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ scalefactor = G funcs = [lambda q: Guinier(q, G, Rgsalphas[0])] indices = np.ones_like(q, dtype=np.bool) constraints = [] for i in range(1, len(Rgsalphas)): if i % 2: # Rgsalphas[i] is an exponent, Rgsalphas[i-1] is a radius of gyration qsep = _PGgen_qsep(Rgsalphas[i], Rgsalphas[i - 1], 3) scalefactor = _PGgen_A(Rgsalphas[i], Rgsalphas[i - 1], 3, scalefactor) funcs.append(lambda q, a=scalefactor, alpha=Rgsalphas[i]: Powerlaw(q, a, alpha)) else: # Rgsalphas[i] is a radius of gyration, Rgsalphas[i-1] is a power-law exponent qsep = _PGgen_qsep(Rgsalphas[i - 1], Rgsalphas[i], 3) scalefactor = _PGgen_G(Rgsalphas[i - 1], Rgsalphas[i], 3, scalefactor) funcs.append(lambda q, G=scalefactor, Rg=Rgsalphas[i]: Guinier(q, G, Rg)) # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
python
def GuinierPorodMulti(q, G, *Rgsalphas): """Empirical multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch other arguments: [Rg1, alpha1, Rg2, alpha2, Rg3 ...] the radii of gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ scalefactor = G funcs = [lambda q: Guinier(q, G, Rgsalphas[0])] indices = np.ones_like(q, dtype=np.bool) constraints = [] for i in range(1, len(Rgsalphas)): if i % 2: # Rgsalphas[i] is an exponent, Rgsalphas[i-1] is a radius of gyration qsep = _PGgen_qsep(Rgsalphas[i], Rgsalphas[i - 1], 3) scalefactor = _PGgen_A(Rgsalphas[i], Rgsalphas[i - 1], 3, scalefactor) funcs.append(lambda q, a=scalefactor, alpha=Rgsalphas[i]: Powerlaw(q, a, alpha)) else: # Rgsalphas[i] is a radius of gyration, Rgsalphas[i-1] is a power-law exponent qsep = _PGgen_qsep(Rgsalphas[i - 1], Rgsalphas[i], 3) scalefactor = _PGgen_G(Rgsalphas[i - 1], Rgsalphas[i], 3, scalefactor) funcs.append(lambda q, G=scalefactor, Rg=Rgsalphas[i]: Guinier(q, G, Rg)) # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
[ "def", "GuinierPorodMulti", "(", "q", ",", "G", ",", "*", "Rgsalphas", ")", ":", "scalefactor", "=", "G", "funcs", "=", "[", "lambda", "q", ":", "Guinier", "(", "q", ",", "G", ",", "Rgsalphas", "[", "0", "]", ")", "]", "indices", "=", "np", ".", "ones_like", "(", "q", ",", "dtype", "=", "np", ".", "bool", ")", "constraints", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "Rgsalphas", ")", ")", ":", "if", "i", "%", "2", ":", "# Rgsalphas[i] is an exponent, Rgsalphas[i-1] is a radius of gyration", "qsep", "=", "_PGgen_qsep", "(", "Rgsalphas", "[", "i", "]", ",", "Rgsalphas", "[", "i", "-", "1", "]", ",", "3", ")", "scalefactor", "=", "_PGgen_A", "(", "Rgsalphas", "[", "i", "]", ",", "Rgsalphas", "[", "i", "-", "1", "]", ",", "3", ",", "scalefactor", ")", "funcs", ".", "append", "(", "lambda", "q", ",", "a", "=", "scalefactor", ",", "alpha", "=", "Rgsalphas", "[", "i", "]", ":", "Powerlaw", "(", "q", ",", "a", ",", "alpha", ")", ")", "else", ":", "# Rgsalphas[i] is a radius of gyration, Rgsalphas[i-1] is a power-law exponent", "qsep", "=", "_PGgen_qsep", "(", "Rgsalphas", "[", "i", "-", "1", "]", ",", "Rgsalphas", "[", "i", "]", ",", "3", ")", "scalefactor", "=", "_PGgen_G", "(", "Rgsalphas", "[", "i", "-", "1", "]", ",", "Rgsalphas", "[", "i", "]", ",", "3", ",", "scalefactor", ")", "funcs", ".", "append", "(", "lambda", "q", ",", "G", "=", "scalefactor", ",", "Rg", "=", "Rgsalphas", "[", "i", "]", ":", "Guinier", "(", "q", ",", "G", ",", "Rg", ")", ")", "# this belongs to the previous", "constraints", ".", "append", "(", "indices", "&", "(", "q", "<", "qsep", ")", ")", "indices", "[", "q", "<", "qsep", "]", "=", "False", "constraints", ".", "append", "(", "indices", ")", "return", "np", ".", "piecewise", "(", "q", ",", "constraints", ",", "funcs", ")" ]
Empirical multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch other arguments: [Rg1, alpha1, Rg2, alpha2, Rg3 ...] the radii of gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "multi", "-", "part", "Guinier", "-", "Porod", "scattering" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L320-L364
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
PorodGuinierMulti
def PorodGuinierMulti(q, A, *alphasRgs): """Empirical multi-part Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``A``: factor for the first Power-law-branch other arguments: [alpha1, Rg1, alpha2, Rg2, alpha3 ...] the radii of gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ scalefactor = A funcs = [lambda q: Powerlaw(q, A, alphasRgs[0])] indices = np.ones_like(q, dtype=np.bool) constraints = [] for i in range(1, len(alphasRgs)): if i % 2: # alphasRgs[i] is a radius of gyration, alphasRgs[i-1] is a power-law exponent qsep = _PGgen_qsep(alphasRgs[i - 1], alphasRgs[i], 3) scalefactor = _PGgen_G(alphasRgs[i - 1], alphasRgs[i], 3, scalefactor) funcs.append(lambda q, G=scalefactor, Rg=alphasRgs[i]: Guinier(q, G, Rg)) else: # alphasRgs[i] is an exponent, alphasRgs[i-1] is a radius of gyration qsep = _PGgen_qsep(alphasRgs[i], alphasRgs[i - 1], 3) scalefactor = _PGgen_A(alphasRgs[i], alphasRgs[i - 1], 3, scalefactor) funcs.append(lambda q, a=scalefactor, alpha=alphasRgs[i]: a * q ** alpha) # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
python
def PorodGuinierMulti(q, A, *alphasRgs): """Empirical multi-part Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``A``: factor for the first Power-law-branch other arguments: [alpha1, Rg1, alpha2, Rg2, alpha3 ...] the radii of gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ scalefactor = A funcs = [lambda q: Powerlaw(q, A, alphasRgs[0])] indices = np.ones_like(q, dtype=np.bool) constraints = [] for i in range(1, len(alphasRgs)): if i % 2: # alphasRgs[i] is a radius of gyration, alphasRgs[i-1] is a power-law exponent qsep = _PGgen_qsep(alphasRgs[i - 1], alphasRgs[i], 3) scalefactor = _PGgen_G(alphasRgs[i - 1], alphasRgs[i], 3, scalefactor) funcs.append(lambda q, G=scalefactor, Rg=alphasRgs[i]: Guinier(q, G, Rg)) else: # alphasRgs[i] is an exponent, alphasRgs[i-1] is a radius of gyration qsep = _PGgen_qsep(alphasRgs[i], alphasRgs[i - 1], 3) scalefactor = _PGgen_A(alphasRgs[i], alphasRgs[i - 1], 3, scalefactor) funcs.append(lambda q, a=scalefactor, alpha=alphasRgs[i]: a * q ** alpha) # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
[ "def", "PorodGuinierMulti", "(", "q", ",", "A", ",", "*", "alphasRgs", ")", ":", "scalefactor", "=", "A", "funcs", "=", "[", "lambda", "q", ":", "Powerlaw", "(", "q", ",", "A", ",", "alphasRgs", "[", "0", "]", ")", "]", "indices", "=", "np", ".", "ones_like", "(", "q", ",", "dtype", "=", "np", ".", "bool", ")", "constraints", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "alphasRgs", ")", ")", ":", "if", "i", "%", "2", ":", "# alphasRgs[i] is a radius of gyration, alphasRgs[i-1] is a power-law exponent", "qsep", "=", "_PGgen_qsep", "(", "alphasRgs", "[", "i", "-", "1", "]", ",", "alphasRgs", "[", "i", "]", ",", "3", ")", "scalefactor", "=", "_PGgen_G", "(", "alphasRgs", "[", "i", "-", "1", "]", ",", "alphasRgs", "[", "i", "]", ",", "3", ",", "scalefactor", ")", "funcs", ".", "append", "(", "lambda", "q", ",", "G", "=", "scalefactor", ",", "Rg", "=", "alphasRgs", "[", "i", "]", ":", "Guinier", "(", "q", ",", "G", ",", "Rg", ")", ")", "else", ":", "# alphasRgs[i] is an exponent, alphasRgs[i-1] is a radius of gyration", "qsep", "=", "_PGgen_qsep", "(", "alphasRgs", "[", "i", "]", ",", "alphasRgs", "[", "i", "-", "1", "]", ",", "3", ")", "scalefactor", "=", "_PGgen_A", "(", "alphasRgs", "[", "i", "]", ",", "alphasRgs", "[", "i", "-", "1", "]", ",", "3", ",", "scalefactor", ")", "funcs", ".", "append", "(", "lambda", "q", ",", "a", "=", "scalefactor", ",", "alpha", "=", "alphasRgs", "[", "i", "]", ":", "a", "*", "q", "**", "alpha", ")", "# this belongs to the previous", "constraints", ".", "append", "(", "indices", "&", "(", "q", "<", "qsep", ")", ")", "indices", "[", "q", "<", "qsep", "]", "=", "False", "constraints", ".", "append", "(", "indices", ")", "return", "np", ".", "piecewise", "(", "q", ",", "constraints", ",", "funcs", ")" ]
Empirical multi-part Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``A``: factor for the first Power-law-branch other arguments: [alpha1, Rg1, alpha2, Rg2, alpha3 ...] the radii of gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "multi", "-", "part", "Porod", "-", "Guinier", "scattering" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L366-L410
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
GeneralGuinierPorod
def GeneralGuinierPorod(q, factor, *args, **kwargs): """Empirical generalized multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``factor``: factor for the first branch other arguments (*args): the defining arguments of the consecutive parts: radius of gyration (``Rg``) and dimensionality parameter (``s``) for Guinier and exponent (``alpha``) for power-law parts. supported keyword arguments: ``startswithguinier``: True if the first segment is a Guinier-type scattering (this is the default) or False if it is a power-law Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*q**(3-s)*exp(-q^2*Rg1^2/s)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. The exact number of parts is determined from the number of positional arguments (*args). Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ if kwargs.get('startswithguinier', True): funcs = [lambda q, A = factor:GeneralGuinier(q, A, args[0], args[1])] i = 2 guiniernext = False else: funcs = [lambda q, A = factor: Powerlaw(q, A, args[0])] i = 1 guiniernext = True indices = np.ones_like(q, dtype=np.bool) constraints = [] while i < len(args): if guiniernext: # args[i] is a radius of gyration, args[i+1] is a dimensionality parameter, args[i-1] is a power-law exponent qsep = _PGgen_qsep(args[i - 1], args[i], args[i + 1]) factor = _PGgen_G(args[i - 1], args[i], args[i + 1], factor) funcs.append(lambda q, G=factor, Rg=args[i], s=args[i + 1]: GeneralGuinier(q, G, Rg, s)) guiniernext = False i += 2 else: # args[i] is an exponent, args[i-2] is a radius of gyration, args[i-1] is a dimensionality parameter qsep = _PGgen_qsep(args[i], args[i - 2], args[i - 1]) factor = _PGgen_A(args[i], args[i - 2], args[i - 1], factor) funcs.append(lambda q, a=factor, alpha=args[i]: a * q ** alpha) guiniernext = True i += 1 # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
python
def GeneralGuinierPorod(q, factor, *args, **kwargs): """Empirical generalized multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``factor``: factor for the first branch other arguments (*args): the defining arguments of the consecutive parts: radius of gyration (``Rg``) and dimensionality parameter (``s``) for Guinier and exponent (``alpha``) for power-law parts. supported keyword arguments: ``startswithguinier``: True if the first segment is a Guinier-type scattering (this is the default) or False if it is a power-law Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*q**(3-s)*exp(-q^2*Rg1^2/s)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. The exact number of parts is determined from the number of positional arguments (*args). Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ if kwargs.get('startswithguinier', True): funcs = [lambda q, A = factor:GeneralGuinier(q, A, args[0], args[1])] i = 2 guiniernext = False else: funcs = [lambda q, A = factor: Powerlaw(q, A, args[0])] i = 1 guiniernext = True indices = np.ones_like(q, dtype=np.bool) constraints = [] while i < len(args): if guiniernext: # args[i] is a radius of gyration, args[i+1] is a dimensionality parameter, args[i-1] is a power-law exponent qsep = _PGgen_qsep(args[i - 1], args[i], args[i + 1]) factor = _PGgen_G(args[i - 1], args[i], args[i + 1], factor) funcs.append(lambda q, G=factor, Rg=args[i], s=args[i + 1]: GeneralGuinier(q, G, Rg, s)) guiniernext = False i += 2 else: # args[i] is an exponent, args[i-2] is a radius of gyration, args[i-1] is a dimensionality parameter qsep = _PGgen_qsep(args[i], args[i - 2], args[i - 1]) factor = _PGgen_A(args[i], args[i - 2], args[i - 1], factor) funcs.append(lambda q, a=factor, alpha=args[i]: a * q ** alpha) guiniernext = True i += 1 # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
[ "def", "GeneralGuinierPorod", "(", "q", ",", "factor", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'startswithguinier'", ",", "True", ")", ":", "funcs", "=", "[", "lambda", "q", ",", "A", "=", "factor", ":", "GeneralGuinier", "(", "q", ",", "A", ",", "args", "[", "0", "]", ",", "args", "[", "1", "]", ")", "]", "i", "=", "2", "guiniernext", "=", "False", "else", ":", "funcs", "=", "[", "lambda", "q", ",", "A", "=", "factor", ":", "Powerlaw", "(", "q", ",", "A", ",", "args", "[", "0", "]", ")", "]", "i", "=", "1", "guiniernext", "=", "True", "indices", "=", "np", ".", "ones_like", "(", "q", ",", "dtype", "=", "np", ".", "bool", ")", "constraints", "=", "[", "]", "while", "i", "<", "len", "(", "args", ")", ":", "if", "guiniernext", ":", "# args[i] is a radius of gyration, args[i+1] is a dimensionality parameter, args[i-1] is a power-law exponent", "qsep", "=", "_PGgen_qsep", "(", "args", "[", "i", "-", "1", "]", ",", "args", "[", "i", "]", ",", "args", "[", "i", "+", "1", "]", ")", "factor", "=", "_PGgen_G", "(", "args", "[", "i", "-", "1", "]", ",", "args", "[", "i", "]", ",", "args", "[", "i", "+", "1", "]", ",", "factor", ")", "funcs", ".", "append", "(", "lambda", "q", ",", "G", "=", "factor", ",", "Rg", "=", "args", "[", "i", "]", ",", "s", "=", "args", "[", "i", "+", "1", "]", ":", "GeneralGuinier", "(", "q", ",", "G", ",", "Rg", ",", "s", ")", ")", "guiniernext", "=", "False", "i", "+=", "2", "else", ":", "# args[i] is an exponent, args[i-2] is a radius of gyration, args[i-1] is a dimensionality parameter", "qsep", "=", "_PGgen_qsep", "(", "args", "[", "i", "]", ",", "args", "[", "i", "-", "2", "]", ",", "args", "[", "i", "-", "1", "]", ")", "factor", "=", "_PGgen_A", "(", "args", "[", "i", "]", ",", "args", "[", "i", "-", "2", "]", ",", "args", "[", "i", "-", "1", "]", ",", "factor", ")", "funcs", ".", "append", "(", "lambda", "q", ",", "a", "=", "factor", ",", "alpha", "=", "args", "[", "i", "]", ":", "a", "*", "q", "**", "alpha", ")", "guiniernext", "=", "True", "i", "+=", "1", "# this belongs to the previous", "constraints", ".", "append", "(", "indices", "&", "(", "q", "<", "qsep", ")", ")", "indices", "[", "q", "<", "qsep", "]", "=", "False", "constraints", ".", "append", "(", "indices", ")", "return", "np", ".", "piecewise", "(", "q", ",", "constraints", ",", "funcs", ")" ]
Empirical generalized multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``factor``: factor for the first branch other arguments (*args): the defining arguments of the consecutive parts: radius of gyration (``Rg``) and dimensionality parameter (``s``) for Guinier and exponent (``alpha``) for power-law parts. supported keyword arguments: ``startswithguinier``: True if the first segment is a Guinier-type scattering (this is the default) or False if it is a power-law Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*q**(3-s)*exp(-q^2*Rg1^2/s)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. The exact number of parts is determined from the number of positional arguments (*args). Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "generalized", "multi", "-", "part", "Guinier", "-", "Porod", "scattering" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L412-L472
awacha/sastool
sastool/fitting/fitfunctions/saspolymer.py
DebyeChain
def DebyeChain(q, Rg): """Scattering form-factor intensity of a Gaussian chain (Debye) Inputs: ------- ``q``: independent variable ``Rg``: radius of gyration Formula: -------- ``2*(exp(-a)-1+a)/a^2`` where ``a=(q*Rg)^2`` """ a = (q * Rg) ** 2 return 2 * (np.exp(-a) - 1 + a) / a ** 2
python
def DebyeChain(q, Rg): """Scattering form-factor intensity of a Gaussian chain (Debye) Inputs: ------- ``q``: independent variable ``Rg``: radius of gyration Formula: -------- ``2*(exp(-a)-1+a)/a^2`` where ``a=(q*Rg)^2`` """ a = (q * Rg) ** 2 return 2 * (np.exp(-a) - 1 + a) / a ** 2
[ "def", "DebyeChain", "(", "q", ",", "Rg", ")", ":", "a", "=", "(", "q", "*", "Rg", ")", "**", "2", "return", "2", "*", "(", "np", ".", "exp", "(", "-", "a", ")", "-", "1", "+", "a", ")", "/", "a", "**", "2" ]
Scattering form-factor intensity of a Gaussian chain (Debye) Inputs: ------- ``q``: independent variable ``Rg``: radius of gyration Formula: -------- ``2*(exp(-a)-1+a)/a^2`` where ``a=(q*Rg)^2``
[ "Scattering", "form", "-", "factor", "intensity", "of", "a", "Gaussian", "chain", "(", "Debye", ")" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/saspolymer.py#L6-L19
awacha/sastool
sastool/fitting/fitfunctions/saspolymer.py
ExcludedVolumeChain
def ExcludedVolumeChain(q, Rg, nu): """Scattering intensity of a generalized excluded-volume Gaussian chain Inputs: ------- ``q``: independent variable ``Rg``: radius of gyration ``nu``: excluded volume exponent Formula: -------- ``(u^(1/nu)*gamma(0.5/nu)*gammainc_lower(0.5/nu,u)- gamma(1/nu)*gammainc_lower(1/nu,u)) / (nu*u^(1/nu))`` where ``u = q^2*Rg^2*(2*nu+1)*(2*nu+2)/6`` is the reduced scattering variable, ``gamma(x)`` is the gamma function and ``gammainc_lower(x,t)`` is the lower incomplete gamma function. Literature: ----------- SASFit manual 6. nov. 2010. Equation (3.60b) """ u = (q * Rg) ** 2 * (2 * nu + 1) * (2 * nu + 2) / 6. return (u ** (0.5 / nu) * gamma(0.5 / nu) * gammainc(0.5 / nu, u) - gamma(1. / nu) * gammainc(1. / nu, u)) / (nu * u ** (1. / nu))
python
def ExcludedVolumeChain(q, Rg, nu): """Scattering intensity of a generalized excluded-volume Gaussian chain Inputs: ------- ``q``: independent variable ``Rg``: radius of gyration ``nu``: excluded volume exponent Formula: -------- ``(u^(1/nu)*gamma(0.5/nu)*gammainc_lower(0.5/nu,u)- gamma(1/nu)*gammainc_lower(1/nu,u)) / (nu*u^(1/nu))`` where ``u = q^2*Rg^2*(2*nu+1)*(2*nu+2)/6`` is the reduced scattering variable, ``gamma(x)`` is the gamma function and ``gammainc_lower(x,t)`` is the lower incomplete gamma function. Literature: ----------- SASFit manual 6. nov. 2010. Equation (3.60b) """ u = (q * Rg) ** 2 * (2 * nu + 1) * (2 * nu + 2) / 6. return (u ** (0.5 / nu) * gamma(0.5 / nu) * gammainc(0.5 / nu, u) - gamma(1. / nu) * gammainc(1. / nu, u)) / (nu * u ** (1. / nu))
[ "def", "ExcludedVolumeChain", "(", "q", ",", "Rg", ",", "nu", ")", ":", "u", "=", "(", "q", "*", "Rg", ")", "**", "2", "*", "(", "2", "*", "nu", "+", "1", ")", "*", "(", "2", "*", "nu", "+", "2", ")", "/", "6.", "return", "(", "u", "**", "(", "0.5", "/", "nu", ")", "*", "gamma", "(", "0.5", "/", "nu", ")", "*", "gammainc", "(", "0.5", "/", "nu", ",", "u", ")", "-", "gamma", "(", "1.", "/", "nu", ")", "*", "gammainc", "(", "1.", "/", "nu", ",", "u", ")", ")", "/", "(", "nu", "*", "u", "**", "(", "1.", "/", "nu", ")", ")" ]
Scattering intensity of a generalized excluded-volume Gaussian chain Inputs: ------- ``q``: independent variable ``Rg``: radius of gyration ``nu``: excluded volume exponent Formula: -------- ``(u^(1/nu)*gamma(0.5/nu)*gammainc_lower(0.5/nu,u)- gamma(1/nu)*gammainc_lower(1/nu,u)) / (nu*u^(1/nu))`` where ``u = q^2*Rg^2*(2*nu+1)*(2*nu+2)/6`` is the reduced scattering variable, ``gamma(x)`` is the gamma function and ``gammainc_lower(x,t)`` is the lower incomplete gamma function. Literature: ----------- SASFit manual 6. nov. 2010. Equation (3.60b)
[ "Scattering", "intensity", "of", "a", "generalized", "excluded", "-", "volume", "Gaussian", "chain" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/saspolymer.py#L21-L44
awacha/sastool
sastool/fitting/fitfunctions/saspolymer.py
BorueErukhimovich
def BorueErukhimovich(q, C, r0, s, t): """Borue-Erukhimovich model of microphase separation in polyelectrolytes Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` Literature: ----------- o Borue and Erukhimovich. Macromolecules (1988) 21 (11) 3240-3249 o Shibayama and Tanaka. J. Chem. Phys (1995) 102 (23) 9392 o Moussaid et. al. J. Phys II (France) (1993) 3 (4) 573-594 o Ermi and Amis. Macromolecules (1997) 30 (22) 6937-6942 """ x = q * r0 return C * (x ** 2 + s) / ((x ** 2 + s) * (x ** 2 + t) + 1)
python
def BorueErukhimovich(q, C, r0, s, t): """Borue-Erukhimovich model of microphase separation in polyelectrolytes Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` Literature: ----------- o Borue and Erukhimovich. Macromolecules (1988) 21 (11) 3240-3249 o Shibayama and Tanaka. J. Chem. Phys (1995) 102 (23) 9392 o Moussaid et. al. J. Phys II (France) (1993) 3 (4) 573-594 o Ermi and Amis. Macromolecules (1997) 30 (22) 6937-6942 """ x = q * r0 return C * (x ** 2 + s) / ((x ** 2 + s) * (x ** 2 + t) + 1)
[ "def", "BorueErukhimovich", "(", "q", ",", "C", ",", "r0", ",", "s", ",", "t", ")", ":", "x", "=", "q", "*", "r0", "return", "C", "*", "(", "x", "**", "2", "+", "s", ")", "/", "(", "(", "x", "**", "2", "+", "s", ")", "*", "(", "x", "**", "2", "+", "t", ")", "+", "1", ")" ]
Borue-Erukhimovich model of microphase separation in polyelectrolytes Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` Literature: ----------- o Borue and Erukhimovich. Macromolecules (1988) 21 (11) 3240-3249 o Shibayama and Tanaka. J. Chem. Phys (1995) 102 (23) 9392 o Moussaid et. al. J. Phys II (France) (1993) 3 (4) 573-594 o Ermi and Amis. Macromolecules (1997) 30 (22) 6937-6942
[ "Borue", "-", "Erukhimovich", "model", "of", "microphase", "separation", "in", "polyelectrolytes" ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/saspolymer.py#L46-L69
awacha/sastool
sastool/fitting/fitfunctions/saspolymer.py
BorueErukhimovich_Powerlaw
def BorueErukhimovich_Powerlaw(q, C, r0, s, t, nu): """Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over. """ def get_xsep(alpha, s, t): A = alpha + 2 B = 2 * s * alpha + t * alpha + 4 * s C = s * t * alpha + alpha + alpha * s ** 2 + alpha * s * t - 2 + 2 * s ** 2 D = alpha * s ** 2 * t + alpha * s r = np.roots([A, B, C, D]) #print "get_xsep: ", alpha, s, t, r return r[r > 0][0] ** 0.5 get_B = lambda C, xsep, s, t, nu:C * (xsep ** 2 + s) / ((xsep ** 2 + s) * (xsep ** 2 + t) + 1) * xsep ** (1.0 / nu) x = q * r0 xsep = np.real_if_close(get_xsep(-1.0 / nu, s, t)) A = get_B(C, xsep, s, t, nu) return np.piecewise(q, (x < xsep, x >= xsep), (lambda a:BorueErukhimovich(a, C, r0, s, t), lambda a:A * (a * r0) ** (-1.0 / nu)))
python
def BorueErukhimovich_Powerlaw(q, C, r0, s, t, nu): """Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over. """ def get_xsep(alpha, s, t): A = alpha + 2 B = 2 * s * alpha + t * alpha + 4 * s C = s * t * alpha + alpha + alpha * s ** 2 + alpha * s * t - 2 + 2 * s ** 2 D = alpha * s ** 2 * t + alpha * s r = np.roots([A, B, C, D]) #print "get_xsep: ", alpha, s, t, r return r[r > 0][0] ** 0.5 get_B = lambda C, xsep, s, t, nu:C * (xsep ** 2 + s) / ((xsep ** 2 + s) * (xsep ** 2 + t) + 1) * xsep ** (1.0 / nu) x = q * r0 xsep = np.real_if_close(get_xsep(-1.0 / nu, s, t)) A = get_B(C, xsep, s, t, nu) return np.piecewise(q, (x < xsep, x >= xsep), (lambda a:BorueErukhimovich(a, C, r0, s, t), lambda a:A * (a * r0) ** (-1.0 / nu)))
[ "def", "BorueErukhimovich_Powerlaw", "(", "q", ",", "C", ",", "r0", ",", "s", ",", "t", ",", "nu", ")", ":", "def", "get_xsep", "(", "alpha", ",", "s", ",", "t", ")", ":", "A", "=", "alpha", "+", "2", "B", "=", "2", "*", "s", "*", "alpha", "+", "t", "*", "alpha", "+", "4", "*", "s", "C", "=", "s", "*", "t", "*", "alpha", "+", "alpha", "+", "alpha", "*", "s", "**", "2", "+", "alpha", "*", "s", "*", "t", "-", "2", "+", "2", "*", "s", "**", "2", "D", "=", "alpha", "*", "s", "**", "2", "*", "t", "+", "alpha", "*", "s", "r", "=", "np", ".", "roots", "(", "[", "A", ",", "B", ",", "C", ",", "D", "]", ")", "#print \"get_xsep: \", alpha, s, t, r", "return", "r", "[", "r", ">", "0", "]", "[", "0", "]", "**", "0.5", "get_B", "=", "lambda", "C", ",", "xsep", ",", "s", ",", "t", ",", "nu", ":", "C", "*", "(", "xsep", "**", "2", "+", "s", ")", "/", "(", "(", "xsep", "**", "2", "+", "s", ")", "*", "(", "xsep", "**", "2", "+", "t", ")", "+", "1", ")", "*", "xsep", "**", "(", "1.0", "/", "nu", ")", "x", "=", "q", "*", "r0", "xsep", "=", "np", ".", "real_if_close", "(", "get_xsep", "(", "-", "1.0", "/", "nu", ",", "s", ",", "t", ")", ")", "A", "=", "get_B", "(", "C", ",", "xsep", ",", "s", ",", "t", ",", "nu", ")", "return", "np", ".", "piecewise", "(", "q", ",", "(", "x", "<", "xsep", ",", "x", ">=", "xsep", ")", ",", "(", "lambda", "a", ":", "BorueErukhimovich", "(", "a", ",", "C", ",", "r0", ",", "s", ",", "t", ")", ",", "lambda", "a", ":", "A", "*", "(", "a", "*", "r0", ")", "**", "(", "-", "1.0", "/", "nu", ")", ")", ")" ]
Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over.
[ "Borue", "-", "Erukhimovich", "model", "ending", "in", "a", "power", "-", "law", "." ]
train
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/saspolymer.py#L71-L104
bmcfee/pumpp
pumpp/sampler.py
Sampler.sample
def sample(self, data, interval): '''Sample a patch from the data object Parameters ---------- data : dict A data dict as produced by pumpp.Pump.transform interval : slice The time interval to sample Returns ------- data_slice : dict `data` restricted to `interval`. ''' data_slice = dict() for key in data: if '_valid' in key: continue index = [slice(None)] * data[key].ndim # if we have multiple observations for this key, pick one index[0] = self.rng.randint(0, data[key].shape[0]) index[0] = slice(index[0], index[0] + 1) for tdim in self._time[key]: index[tdim] = interval data_slice[key] = data[key][tuple(index)] return data_slice
python
def sample(self, data, interval): '''Sample a patch from the data object Parameters ---------- data : dict A data dict as produced by pumpp.Pump.transform interval : slice The time interval to sample Returns ------- data_slice : dict `data` restricted to `interval`. ''' data_slice = dict() for key in data: if '_valid' in key: continue index = [slice(None)] * data[key].ndim # if we have multiple observations for this key, pick one index[0] = self.rng.randint(0, data[key].shape[0]) index[0] = slice(index[0], index[0] + 1) for tdim in self._time[key]: index[tdim] = interval data_slice[key] = data[key][tuple(index)] return data_slice
[ "def", "sample", "(", "self", ",", "data", ",", "interval", ")", ":", "data_slice", "=", "dict", "(", ")", "for", "key", "in", "data", ":", "if", "'_valid'", "in", "key", ":", "continue", "index", "=", "[", "slice", "(", "None", ")", "]", "*", "data", "[", "key", "]", ".", "ndim", "# if we have multiple observations for this key, pick one", "index", "[", "0", "]", "=", "self", ".", "rng", ".", "randint", "(", "0", ",", "data", "[", "key", "]", ".", "shape", "[", "0", "]", ")", "index", "[", "0", "]", "=", "slice", "(", "index", "[", "0", "]", ",", "index", "[", "0", "]", "+", "1", ")", "for", "tdim", "in", "self", ".", "_time", "[", "key", "]", ":", "index", "[", "tdim", "]", "=", "interval", "data_slice", "[", "key", "]", "=", "data", "[", "key", "]", "[", "tuple", "(", "index", ")", "]", "return", "data_slice" ]
Sample a patch from the data object Parameters ---------- data : dict A data dict as produced by pumpp.Pump.transform interval : slice The time interval to sample Returns ------- data_slice : dict `data` restricted to `interval`.
[ "Sample", "a", "patch", "from", "the", "data", "object" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/sampler.py#L87-L120
bmcfee/pumpp
pumpp/sampler.py
Sampler.indices
def indices(self, data): '''Generate patch indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch ''' duration = self.data_duration(data) if self.duration > duration: raise DataError('Data duration={} is less than ' 'sample duration={}'.format(duration, self.duration)) while True: # Generate a sampling interval yield self.rng.randint(0, duration - self.duration + 1)
python
def indices(self, data): '''Generate patch indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch ''' duration = self.data_duration(data) if self.duration > duration: raise DataError('Data duration={} is less than ' 'sample duration={}'.format(duration, self.duration)) while True: # Generate a sampling interval yield self.rng.randint(0, duration - self.duration + 1)
[ "def", "indices", "(", "self", ",", "data", ")", ":", "duration", "=", "self", ".", "data_duration", "(", "data", ")", "if", "self", ".", "duration", ">", "duration", ":", "raise", "DataError", "(", "'Data duration={} is less than '", "'sample duration={}'", ".", "format", "(", "duration", ",", "self", ".", "duration", ")", ")", "while", "True", ":", "# Generate a sampling interval", "yield", "self", ".", "rng", ".", "randint", "(", "0", ",", "duration", "-", "self", ".", "duration", "+", "1", ")" ]
Generate patch indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch
[ "Generate", "patch", "indices" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/sampler.py#L122-L143
bmcfee/pumpp
pumpp/sampler.py
SequentialSampler.indices
def indices(self, data): '''Generate patch start indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch ''' duration = self.data_duration(data) for start in range(0, duration - self.duration, self.stride): yield start
python
def indices(self, data): '''Generate patch start indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch ''' duration = self.data_duration(data) for start in range(0, duration - self.duration, self.stride): yield start
[ "def", "indices", "(", "self", ",", "data", ")", ":", "duration", "=", "self", ".", "data_duration", "(", "data", ")", "for", "start", "in", "range", "(", "0", ",", "duration", "-", "self", ".", "duration", ",", "self", ".", "stride", ")", ":", "yield", "start" ]
Generate patch start indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch
[ "Generate", "patch", "start", "indices" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/sampler.py#L210-L226
bmcfee/pumpp
pumpp/sampler.py
VariableLengthSampler.indices
def indices(self, data): '''Generate patch indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch ''' duration = self.data_duration(data) while True: # Generate a sampling interval yield self.rng.randint(0, duration - self.min_duration + 1)
python
def indices(self, data): '''Generate patch indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch ''' duration = self.data_duration(data) while True: # Generate a sampling interval yield self.rng.randint(0, duration - self.min_duration + 1)
[ "def", "indices", "(", "self", ",", "data", ")", ":", "duration", "=", "self", ".", "data_duration", "(", "data", ")", "while", "True", ":", "# Generate a sampling interval", "yield", "self", ".", "rng", ".", "randint", "(", "0", ",", "duration", "-", "self", ".", "min_duration", "+", "1", ")" ]
Generate patch indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch
[ "Generate", "patch", "indices" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/sampler.py#L279-L296
bmcfee/pumpp
pumpp/base.py
Scope.scope
def scope(self, key): '''Apply the name scope to a key Parameters ---------- key : string Returns ------- `name/key` if `name` is not `None`; otherwise, `key`. ''' if self.name is None: return key return '{:s}/{:s}'.format(self.name, key)
python
def scope(self, key): '''Apply the name scope to a key Parameters ---------- key : string Returns ------- `name/key` if `name` is not `None`; otherwise, `key`. ''' if self.name is None: return key return '{:s}/{:s}'.format(self.name, key)
[ "def", "scope", "(", "self", ",", "key", ")", ":", "if", "self", ".", "name", "is", "None", ":", "return", "key", "return", "'{:s}/{:s}'", ".", "format", "(", "self", ".", "name", ",", "key", ")" ]
Apply the name scope to a key Parameters ---------- key : string Returns ------- `name/key` if `name` is not `None`; otherwise, `key`.
[ "Apply", "the", "name", "scope", "to", "a", "key" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/base.py#L36-L50
bmcfee/pumpp
pumpp/base.py
Scope.register
def register(self, field, shape, dtype): '''Register a field as a tensor with specified shape and type. A `Tensor` of the given shape and type will be registered in this object's `fields` dict. Parameters ---------- field : str The name of the field shape : iterable of `int` or `None` The shape of the output variable. This does not include a dimension for multiple outputs. `None` may be used to indicate variable-length outputs dtype : type The data type of the field Raises ------ ParameterError If dtype or shape are improperly specified ''' if not isinstance(dtype, type): raise ParameterError('dtype={} must be a type'.format(dtype)) if not (isinstance(shape, Iterable) and all([s is None or isinstance(s, int) for s in shape])): raise ParameterError('shape={} must be an iterable of integers'.format(shape)) self.fields[self.scope(field)] = Tensor(tuple(shape), dtype)
python
def register(self, field, shape, dtype): '''Register a field as a tensor with specified shape and type. A `Tensor` of the given shape and type will be registered in this object's `fields` dict. Parameters ---------- field : str The name of the field shape : iterable of `int` or `None` The shape of the output variable. This does not include a dimension for multiple outputs. `None` may be used to indicate variable-length outputs dtype : type The data type of the field Raises ------ ParameterError If dtype or shape are improperly specified ''' if not isinstance(dtype, type): raise ParameterError('dtype={} must be a type'.format(dtype)) if not (isinstance(shape, Iterable) and all([s is None or isinstance(s, int) for s in shape])): raise ParameterError('shape={} must be an iterable of integers'.format(shape)) self.fields[self.scope(field)] = Tensor(tuple(shape), dtype)
[ "def", "register", "(", "self", ",", "field", ",", "shape", ",", "dtype", ")", ":", "if", "not", "isinstance", "(", "dtype", ",", "type", ")", ":", "raise", "ParameterError", "(", "'dtype={} must be a type'", ".", "format", "(", "dtype", ")", ")", "if", "not", "(", "isinstance", "(", "shape", ",", "Iterable", ")", "and", "all", "(", "[", "s", "is", "None", "or", "isinstance", "(", "s", ",", "int", ")", "for", "s", "in", "shape", "]", ")", ")", ":", "raise", "ParameterError", "(", "'shape={} must be an iterable of integers'", ".", "format", "(", "shape", ")", ")", "self", ".", "fields", "[", "self", ".", "scope", "(", "field", ")", "]", "=", "Tensor", "(", "tuple", "(", "shape", ")", ",", "dtype", ")" ]
Register a field as a tensor with specified shape and type. A `Tensor` of the given shape and type will be registered in this object's `fields` dict. Parameters ---------- field : str The name of the field shape : iterable of `int` or `None` The shape of the output variable. This does not include a dimension for multiple outputs. `None` may be used to indicate variable-length outputs dtype : type The data type of the field Raises ------ ParameterError If dtype or shape are improperly specified
[ "Register", "a", "field", "as", "a", "tensor", "with", "specified", "shape", "and", "type", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/base.py#L52-L84
bmcfee/pumpp
pumpp/base.py
Scope.merge
def merge(self, data): '''Merge an array of output dictionaries into a single dictionary with properly scoped names. Parameters ---------- data : list of dict Output dicts as produced by `pumpp.task.BaseTaskTransformer.transform` or `pumpp.feature.FeatureExtractor.transform`. Returns ------- data_out : dict All elements of the input dicts are stacked along the 0 axis, and keys are re-mapped by `scope`. ''' data_out = dict() # Iterate over all keys in data for key in set().union(*data): data_out[self.scope(key)] = np.stack([np.asarray(d[key]) for d in data], axis=0) return data_out
python
def merge(self, data): '''Merge an array of output dictionaries into a single dictionary with properly scoped names. Parameters ---------- data : list of dict Output dicts as produced by `pumpp.task.BaseTaskTransformer.transform` or `pumpp.feature.FeatureExtractor.transform`. Returns ------- data_out : dict All elements of the input dicts are stacked along the 0 axis, and keys are re-mapped by `scope`. ''' data_out = dict() # Iterate over all keys in data for key in set().union(*data): data_out[self.scope(key)] = np.stack([np.asarray(d[key]) for d in data], axis=0) return data_out
[ "def", "merge", "(", "self", ",", "data", ")", ":", "data_out", "=", "dict", "(", ")", "# Iterate over all keys in data", "for", "key", "in", "set", "(", ")", ".", "union", "(", "*", "data", ")", ":", "data_out", "[", "self", ".", "scope", "(", "key", ")", "]", "=", "np", ".", "stack", "(", "[", "np", ".", "asarray", "(", "d", "[", "key", "]", ")", "for", "d", "in", "data", "]", ",", "axis", "=", "0", ")", "return", "data_out" ]
Merge an array of output dictionaries into a single dictionary with properly scoped names. Parameters ---------- data : list of dict Output dicts as produced by `pumpp.task.BaseTaskTransformer.transform` or `pumpp.feature.FeatureExtractor.transform`. Returns ------- data_out : dict All elements of the input dicts are stacked along the 0 axis, and keys are re-mapped by `scope`.
[ "Merge", "an", "array", "of", "output", "dictionaries", "into", "a", "single", "dictionary", "with", "properly", "scoped", "names", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/base.py#L89-L111
bmcfee/pumpp
pumpp/base.py
Slicer.add
def add(self, operator): '''Add an operator to the Slicer Parameters ---------- operator : Scope (TaskTransformer or FeatureExtractor) The new operator to add ''' if not isinstance(operator, Scope): raise ParameterError('Operator {} must be a TaskTransformer ' 'or FeatureExtractor'.format(operator)) for key in operator.fields: self._time[key] = [] # We add 1 to the dimension here to account for batching for tdim, idx in enumerate(operator.fields[key].shape, 1): if idx is None: self._time[key].append(tdim)
python
def add(self, operator): '''Add an operator to the Slicer Parameters ---------- operator : Scope (TaskTransformer or FeatureExtractor) The new operator to add ''' if not isinstance(operator, Scope): raise ParameterError('Operator {} must be a TaskTransformer ' 'or FeatureExtractor'.format(operator)) for key in operator.fields: self._time[key] = [] # We add 1 to the dimension here to account for batching for tdim, idx in enumerate(operator.fields[key].shape, 1): if idx is None: self._time[key].append(tdim)
[ "def", "add", "(", "self", ",", "operator", ")", ":", "if", "not", "isinstance", "(", "operator", ",", "Scope", ")", ":", "raise", "ParameterError", "(", "'Operator {} must be a TaskTransformer '", "'or FeatureExtractor'", ".", "format", "(", "operator", ")", ")", "for", "key", "in", "operator", ".", "fields", ":", "self", ".", "_time", "[", "key", "]", "=", "[", "]", "# We add 1 to the dimension here to account for batching", "for", "tdim", ",", "idx", "in", "enumerate", "(", "operator", ".", "fields", "[", "key", "]", ".", "shape", ",", "1", ")", ":", "if", "idx", "is", "None", ":", "self", ".", "_time", "[", "key", "]", ".", "append", "(", "tdim", ")" ]
Add an operator to the Slicer Parameters ---------- operator : Scope (TaskTransformer or FeatureExtractor) The new operator to add
[ "Add", "an", "operator", "to", "the", "Slicer" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/base.py#L132-L148
bmcfee/pumpp
pumpp/base.py
Slicer.data_duration
def data_duration(self, data): '''Compute the valid data duration of a dict Parameters ---------- data : dict As produced by pumpp.transform Returns ------- length : int The minimum temporal extent of a dynamic observation in data ''' # Find all the time-like indices of the data lengths = [] for key in self._time: for idx in self._time.get(key, []): lengths.append(data[key].shape[idx]) return min(lengths)
python
def data_duration(self, data): '''Compute the valid data duration of a dict Parameters ---------- data : dict As produced by pumpp.transform Returns ------- length : int The minimum temporal extent of a dynamic observation in data ''' # Find all the time-like indices of the data lengths = [] for key in self._time: for idx in self._time.get(key, []): lengths.append(data[key].shape[idx]) return min(lengths)
[ "def", "data_duration", "(", "self", ",", "data", ")", ":", "# Find all the time-like indices of the data", "lengths", "=", "[", "]", "for", "key", "in", "self", ".", "_time", ":", "for", "idx", "in", "self", ".", "_time", ".", "get", "(", "key", ",", "[", "]", ")", ":", "lengths", ".", "append", "(", "data", "[", "key", "]", ".", "shape", "[", "idx", "]", ")", "return", "min", "(", "lengths", ")" ]
Compute the valid data duration of a dict Parameters ---------- data : dict As produced by pumpp.transform Returns ------- length : int The minimum temporal extent of a dynamic observation in data
[ "Compute", "the", "valid", "data", "duration", "of", "a", "dict" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/base.py#L150-L169
bmcfee/pumpp
pumpp/base.py
Slicer.crop
def crop(self, data): '''Crop a data dictionary down to its common time Parameters ---------- data : dict As produced by pumpp.transform Returns ------- data_cropped : dict Like `data` but with all time-like axes truncated to the minimum common duration ''' duration = self.data_duration(data) data_out = dict() for key in data: idx = [slice(None)] * data[key].ndim for tdim in self._time.get(key, []): idx[tdim] = slice(duration) data_out[key] = data[key][tuple(idx)] return data_out
python
def crop(self, data): '''Crop a data dictionary down to its common time Parameters ---------- data : dict As produced by pumpp.transform Returns ------- data_cropped : dict Like `data` but with all time-like axes truncated to the minimum common duration ''' duration = self.data_duration(data) data_out = dict() for key in data: idx = [slice(None)] * data[key].ndim for tdim in self._time.get(key, []): idx[tdim] = slice(duration) data_out[key] = data[key][tuple(idx)] return data_out
[ "def", "crop", "(", "self", ",", "data", ")", ":", "duration", "=", "self", ".", "data_duration", "(", "data", ")", "data_out", "=", "dict", "(", ")", "for", "key", "in", "data", ":", "idx", "=", "[", "slice", "(", "None", ")", "]", "*", "data", "[", "key", "]", ".", "ndim", "for", "tdim", "in", "self", ".", "_time", ".", "get", "(", "key", ",", "[", "]", ")", ":", "idx", "[", "tdim", "]", "=", "slice", "(", "duration", ")", "data_out", "[", "key", "]", "=", "data", "[", "key", "]", "[", "tuple", "(", "idx", ")", "]", "return", "data_out" ]
Crop a data dictionary down to its common time Parameters ---------- data : dict As produced by pumpp.transform Returns ------- data_cropped : dict Like `data` but with all time-like axes truncated to the minimum common duration
[ "Crop", "a", "data", "dictionary", "down", "to", "its", "common", "time" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/base.py#L171-L194
bmcfee/pumpp
pumpp/feature/mel.py
Mel.transform_audio
def transform_audio(self, y): '''Compute the Mel spectrogram Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_mels) The Mel spectrogram ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) mel = np.sqrt(melspectrogram(y=y, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels, fmax=self.fmax)).astype(np.float32) mel = fix_length(mel, n_frames) if self.log: mel = amplitude_to_db(mel, ref=np.max) return {'mag': mel.T[self.idx]}
python
def transform_audio(self, y): '''Compute the Mel spectrogram Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_mels) The Mel spectrogram ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) mel = np.sqrt(melspectrogram(y=y, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels, fmax=self.fmax)).astype(np.float32) mel = fix_length(mel, n_frames) if self.log: mel = amplitude_to_db(mel, ref=np.max) return {'mag': mel.T[self.idx]}
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "n_frames", "=", "self", ".", "n_frames", "(", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ")", ")", "mel", "=", "np", ".", "sqrt", "(", "melspectrogram", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ",", "n_fft", "=", "self", ".", "n_fft", ",", "hop_length", "=", "self", ".", "hop_length", ",", "n_mels", "=", "self", ".", "n_mels", ",", "fmax", "=", "self", ".", "fmax", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "mel", "=", "fix_length", "(", "mel", ",", "n_frames", ")", "if", "self", ".", "log", ":", "mel", "=", "amplitude_to_db", "(", "mel", ",", "ref", "=", "np", ".", "max", ")", "return", "{", "'mag'", ":", "mel", ".", "T", "[", "self", ".", "idx", "]", "}" ]
Compute the Mel spectrogram Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_mels) The Mel spectrogram
[ "Compute", "the", "Mel", "spectrogram" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/mel.py#L54-L81
bmcfee/pumpp
pumpp/task/regression.py
VectorTransformer.empty
def empty(self, duration): '''Empty vector annotations. This returns an annotation with a single observation vector consisting of all-zeroes. Parameters ---------- duration : number >0 Length of the track Returns ------- ann : jams.Annotation The empty annotation ''' ann = super(VectorTransformer, self).empty(duration) ann.append(time=0, duration=duration, confidence=0, value=np.zeros(self.dimension, dtype=np.float32)) return ann
python
def empty(self, duration): '''Empty vector annotations. This returns an annotation with a single observation vector consisting of all-zeroes. Parameters ---------- duration : number >0 Length of the track Returns ------- ann : jams.Annotation The empty annotation ''' ann = super(VectorTransformer, self).empty(duration) ann.append(time=0, duration=duration, confidence=0, value=np.zeros(self.dimension, dtype=np.float32)) return ann
[ "def", "empty", "(", "self", ",", "duration", ")", ":", "ann", "=", "super", "(", "VectorTransformer", ",", "self", ")", ".", "empty", "(", "duration", ")", "ann", ".", "append", "(", "time", "=", "0", ",", "duration", "=", "duration", ",", "confidence", "=", "0", ",", "value", "=", "np", ".", "zeros", "(", "self", ".", "dimension", ",", "dtype", "=", "np", ".", "float32", ")", ")", "return", "ann" ]
Empty vector annotations. This returns an annotation with a single observation vector consisting of all-zeroes. Parameters ---------- duration : number >0 Length of the track Returns ------- ann : jams.Annotation The empty annotation
[ "Empty", "vector", "annotations", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/regression.py#L42-L62
bmcfee/pumpp
pumpp/task/regression.py
VectorTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Apply the vector transformation. Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the track Returns ------- data : dict data['vector'] : np.ndarray, shape=(dimension,) Raises ------ DataError If the input dimension does not match ''' _, values = ann.to_interval_values() vector = np.asarray(values[0], dtype=self.dtype) if len(vector) != self.dimension: raise DataError('vector dimension({:0}) ' '!= self.dimension({:1})' .format(len(vector), self.dimension)) return {'vector': vector}
python
def transform_annotation(self, ann, duration): '''Apply the vector transformation. Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the track Returns ------- data : dict data['vector'] : np.ndarray, shape=(dimension,) Raises ------ DataError If the input dimension does not match ''' _, values = ann.to_interval_values() vector = np.asarray(values[0], dtype=self.dtype) if len(vector) != self.dimension: raise DataError('vector dimension({:0}) ' '!= self.dimension({:1})' .format(len(vector), self.dimension)) return {'vector': vector}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "_", ",", "values", "=", "ann", ".", "to_interval_values", "(", ")", "vector", "=", "np", ".", "asarray", "(", "values", "[", "0", "]", ",", "dtype", "=", "self", ".", "dtype", ")", "if", "len", "(", "vector", ")", "!=", "self", ".", "dimension", ":", "raise", "DataError", "(", "'vector dimension({:0}) '", "'!= self.dimension({:1})'", ".", "format", "(", "len", "(", "vector", ")", ",", "self", ".", "dimension", ")", ")", "return", "{", "'vector'", ":", "vector", "}" ]
Apply the vector transformation. Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the track Returns ------- data : dict data['vector'] : np.ndarray, shape=(dimension,) Raises ------ DataError If the input dimension does not match
[ "Apply", "the", "vector", "transformation", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/regression.py#L64-L92
bmcfee/pumpp
pumpp/task/regression.py
VectorTransformer.inverse
def inverse(self, vector, duration=None): '''Inverse vector transformer''' ann = jams.Annotation(namespace=self.namespace, duration=duration) if duration is None: duration = 0 ann.append(time=0, duration=duration, value=vector) return ann
python
def inverse(self, vector, duration=None): '''Inverse vector transformer''' ann = jams.Annotation(namespace=self.namespace, duration=duration) if duration is None: duration = 0 ann.append(time=0, duration=duration, value=vector) return ann
[ "def", "inverse", "(", "self", ",", "vector", ",", "duration", "=", "None", ")", ":", "ann", "=", "jams", ".", "Annotation", "(", "namespace", "=", "self", ".", "namespace", ",", "duration", "=", "duration", ")", "if", "duration", "is", "None", ":", "duration", "=", "0", "ann", ".", "append", "(", "time", "=", "0", ",", "duration", "=", "duration", ",", "value", "=", "vector", ")", "return", "ann" ]
Inverse vector transformer
[ "Inverse", "vector", "transformer" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/regression.py#L94-L103
bmcfee/pumpp
pumpp/task/tags.py
DynamicLabelTransformer.set_transition
def set_transition(self, p_self): '''Set the transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(n_labels,)] Optional self-loop probability(ies), used for Viterbi decoding ''' if p_self is None: self.transition = None else: self.transition = np.empty((len(self._classes), 2, 2)) if np.isscalar(p_self): self.transition = transition_loop(2, p_self) elif len(p_self) != len(self._classes): raise ParameterError('Invalid p_self.shape={} for vocabulary size={}'.format(p_self.shape, len(self._classes))) else: for i in range(len(self._classes)): self.transition[i] = transition_loop(2, p_self[i])
python
def set_transition(self, p_self): '''Set the transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(n_labels,)] Optional self-loop probability(ies), used for Viterbi decoding ''' if p_self is None: self.transition = None else: self.transition = np.empty((len(self._classes), 2, 2)) if np.isscalar(p_self): self.transition = transition_loop(2, p_self) elif len(p_self) != len(self._classes): raise ParameterError('Invalid p_self.shape={} for vocabulary size={}'.format(p_self.shape, len(self._classes))) else: for i in range(len(self._classes)): self.transition[i] = transition_loop(2, p_self[i])
[ "def", "set_transition", "(", "self", ",", "p_self", ")", ":", "if", "p_self", "is", "None", ":", "self", ".", "transition", "=", "None", "else", ":", "self", ".", "transition", "=", "np", ".", "empty", "(", "(", "len", "(", "self", ".", "_classes", ")", ",", "2", ",", "2", ")", ")", "if", "np", ".", "isscalar", "(", "p_self", ")", ":", "self", ".", "transition", "=", "transition_loop", "(", "2", ",", "p_self", ")", "elif", "len", "(", "p_self", ")", "!=", "len", "(", "self", ".", "_classes", ")", ":", "raise", "ParameterError", "(", "'Invalid p_self.shape={} for vocabulary size={}'", ".", "format", "(", "p_self", ".", "shape", ",", "len", "(", "self", ".", "_classes", ")", ")", ")", "else", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_classes", ")", ")", ":", "self", ".", "transition", "[", "i", "]", "=", "transition_loop", "(", "2", ",", "p_self", "[", "i", "]", ")" ]
Set the transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(n_labels,)] Optional self-loop probability(ies), used for Viterbi decoding
[ "Set", "the", "transition", "matrix", "according", "to", "self", "-", "loop", "probabilities", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/tags.py#L86-L104
bmcfee/pumpp
pumpp/task/tags.py
DynamicLabelTransformer.empty
def empty(self, duration): '''Empty label annotations. Constructs a single observation with an empty value (None). Parameters ---------- duration : number > 0 The duration of the annotation ''' ann = super(DynamicLabelTransformer, self).empty(duration) ann.append(time=0, duration=duration, value=None) return ann
python
def empty(self, duration): '''Empty label annotations. Constructs a single observation with an empty value (None). Parameters ---------- duration : number > 0 The duration of the annotation ''' ann = super(DynamicLabelTransformer, self).empty(duration) ann.append(time=0, duration=duration, value=None) return ann
[ "def", "empty", "(", "self", ",", "duration", ")", ":", "ann", "=", "super", "(", "DynamicLabelTransformer", ",", "self", ")", ".", "empty", "(", "duration", ")", "ann", ".", "append", "(", "time", "=", "0", ",", "duration", "=", "duration", ",", "value", "=", "None", ")", "return", "ann" ]
Empty label annotations. Constructs a single observation with an empty value (None). Parameters ---------- duration : number > 0 The duration of the annotation
[ "Empty", "label", "annotations", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/tags.py#L106-L118
bmcfee/pumpp
pumpp/task/tags.py
DynamicLabelTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Transform an annotation to dynamic label encoding. Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['tags'] : np.ndarray, shape=(n, n_labels) A time-varying binary encoding of the labels ''' intervals, values = ann.to_interval_values() # Suppress all intervals not in the encoder tags = [] for v in values: if v in self._classes: tags.extend(self.encoder.transform([[v]])) else: tags.extend(self.encoder.transform([[]])) tags = np.asarray(tags) target = self.encode_intervals(duration, intervals, tags) return {'tags': target}
python
def transform_annotation(self, ann, duration): '''Transform an annotation to dynamic label encoding. Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['tags'] : np.ndarray, shape=(n, n_labels) A time-varying binary encoding of the labels ''' intervals, values = ann.to_interval_values() # Suppress all intervals not in the encoder tags = [] for v in values: if v in self._classes: tags.extend(self.encoder.transform([[v]])) else: tags.extend(self.encoder.transform([[]])) tags = np.asarray(tags) target = self.encode_intervals(duration, intervals, tags) return {'tags': target}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "intervals", ",", "values", "=", "ann", ".", "to_interval_values", "(", ")", "# Suppress all intervals not in the encoder", "tags", "=", "[", "]", "for", "v", "in", "values", ":", "if", "v", "in", "self", ".", "_classes", ":", "tags", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "[", "v", "]", "]", ")", ")", "else", ":", "tags", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "[", "]", "]", ")", ")", "tags", "=", "np", ".", "asarray", "(", "tags", ")", "target", "=", "self", ".", "encode_intervals", "(", "duration", ",", "intervals", ",", "tags", ")", "return", "{", "'tags'", ":", "target", "}" ]
Transform an annotation to dynamic label encoding. Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['tags'] : np.ndarray, shape=(n, n_labels) A time-varying binary encoding of the labels
[ "Transform", "an", "annotation", "to", "dynamic", "label", "encoding", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/tags.py#L120-L150
bmcfee/pumpp
pumpp/task/tags.py
DynamicLabelTransformer.inverse
def inverse(self, encoded, duration=None): '''Inverse transformation''' ann = jams.Annotation(namespace=self.namespace, duration=duration) for start, end, value in self.decode_intervals(encoded, duration=duration, transition=self.transition, p_init=self.p_init, p_state=self.p_state): # Map start:end to frames f_start, f_end = time_to_frames([start, end], sr=self.sr, hop_length=self.hop_length) confidence = np.mean(encoded[f_start:f_end+1, value]) value_dec = self.encoder.inverse_transform(np.atleast_2d(value))[0] for vd in value_dec: ann.append(time=start, duration=end-start, value=vd, confidence=confidence) return ann
python
def inverse(self, encoded, duration=None): '''Inverse transformation''' ann = jams.Annotation(namespace=self.namespace, duration=duration) for start, end, value in self.decode_intervals(encoded, duration=duration, transition=self.transition, p_init=self.p_init, p_state=self.p_state): # Map start:end to frames f_start, f_end = time_to_frames([start, end], sr=self.sr, hop_length=self.hop_length) confidence = np.mean(encoded[f_start:f_end+1, value]) value_dec = self.encoder.inverse_transform(np.atleast_2d(value))[0] for vd in value_dec: ann.append(time=start, duration=end-start, value=vd, confidence=confidence) return ann
[ "def", "inverse", "(", "self", ",", "encoded", ",", "duration", "=", "None", ")", ":", "ann", "=", "jams", ".", "Annotation", "(", "namespace", "=", "self", ".", "namespace", ",", "duration", "=", "duration", ")", "for", "start", ",", "end", ",", "value", "in", "self", ".", "decode_intervals", "(", "encoded", ",", "duration", "=", "duration", ",", "transition", "=", "self", ".", "transition", ",", "p_init", "=", "self", ".", "p_init", ",", "p_state", "=", "self", ".", "p_state", ")", ":", "# Map start:end to frames", "f_start", ",", "f_end", "=", "time_to_frames", "(", "[", "start", ",", "end", "]", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "confidence", "=", "np", ".", "mean", "(", "encoded", "[", "f_start", ":", "f_end", "+", "1", ",", "value", "]", ")", "value_dec", "=", "self", ".", "encoder", ".", "inverse_transform", "(", "np", ".", "atleast_2d", "(", "value", ")", ")", "[", "0", "]", "for", "vd", "in", "value_dec", ":", "ann", ".", "append", "(", "time", "=", "start", ",", "duration", "=", "end", "-", "start", ",", "value", "=", "vd", ",", "confidence", "=", "confidence", ")", "return", "ann" ]
Inverse transformation
[ "Inverse", "transformation" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/tags.py#L152-L176
bmcfee/pumpp
pumpp/task/tags.py
StaticLabelTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Transform an annotation to static label encoding. Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['tags'] : np.ndarray, shape=(n_labels,) A static binary encoding of the labels ''' intervals = np.asarray([[0, 1]]) values = list([obs.value for obs in ann]) intervals = np.tile(intervals, [len(values), 1]) # Suppress all intervals not in the encoder tags = [v for v in values if v in self._classes] if len(tags): target = self.encoder.transform([tags]).astype(np.bool).max(axis=0) else: target = np.zeros(len(self._classes), dtype=np.bool) return {'tags': target}
python
def transform_annotation(self, ann, duration): '''Transform an annotation to static label encoding. Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['tags'] : np.ndarray, shape=(n_labels,) A static binary encoding of the labels ''' intervals = np.asarray([[0, 1]]) values = list([obs.value for obs in ann]) intervals = np.tile(intervals, [len(values), 1]) # Suppress all intervals not in the encoder tags = [v for v in values if v in self._classes] if len(tags): target = self.encoder.transform([tags]).astype(np.bool).max(axis=0) else: target = np.zeros(len(self._classes), dtype=np.bool) return {'tags': target}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "intervals", "=", "np", ".", "asarray", "(", "[", "[", "0", ",", "1", "]", "]", ")", "values", "=", "list", "(", "[", "obs", ".", "value", "for", "obs", "in", "ann", "]", ")", "intervals", "=", "np", ".", "tile", "(", "intervals", ",", "[", "len", "(", "values", ")", ",", "1", "]", ")", "# Suppress all intervals not in the encoder", "tags", "=", "[", "v", "for", "v", "in", "values", "if", "v", "in", "self", ".", "_classes", "]", "if", "len", "(", "tags", ")", ":", "target", "=", "self", ".", "encoder", ".", "transform", "(", "[", "tags", "]", ")", ".", "astype", "(", "np", ".", "bool", ")", ".", "max", "(", "axis", "=", "0", ")", "else", ":", "target", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "_classes", ")", ",", "dtype", "=", "np", ".", "bool", ")", "return", "{", "'tags'", ":", "target", "}" ]
Transform an annotation to static label encoding. Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['tags'] : np.ndarray, shape=(n_labels,) A static binary encoding of the labels
[ "Transform", "an", "annotation", "to", "static", "label", "encoding", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/tags.py#L214-L242
bmcfee/pumpp
pumpp/task/tags.py
StaticLabelTransformer.inverse
def inverse(self, encoded, duration=None): '''Inverse static tag transformation''' ann = jams.Annotation(namespace=self.namespace, duration=duration) if np.isrealobj(encoded): detected = (encoded >= 0.5) else: detected = encoded for vd in self.encoder.inverse_transform(np.atleast_2d(detected))[0]: vid = np.flatnonzero(self.encoder.transform(np.atleast_2d(vd))) ann.append(time=0, duration=duration, value=vd, confidence=encoded[vid]) return ann
python
def inverse(self, encoded, duration=None): '''Inverse static tag transformation''' ann = jams.Annotation(namespace=self.namespace, duration=duration) if np.isrealobj(encoded): detected = (encoded >= 0.5) else: detected = encoded for vd in self.encoder.inverse_transform(np.atleast_2d(detected))[0]: vid = np.flatnonzero(self.encoder.transform(np.atleast_2d(vd))) ann.append(time=0, duration=duration, value=vd, confidence=encoded[vid]) return ann
[ "def", "inverse", "(", "self", ",", "encoded", ",", "duration", "=", "None", ")", ":", "ann", "=", "jams", ".", "Annotation", "(", "namespace", "=", "self", ".", "namespace", ",", "duration", "=", "duration", ")", "if", "np", ".", "isrealobj", "(", "encoded", ")", ":", "detected", "=", "(", "encoded", ">=", "0.5", ")", "else", ":", "detected", "=", "encoded", "for", "vd", "in", "self", ".", "encoder", ".", "inverse_transform", "(", "np", ".", "atleast_2d", "(", "detected", ")", ")", "[", "0", "]", ":", "vid", "=", "np", ".", "flatnonzero", "(", "self", ".", "encoder", ".", "transform", "(", "np", ".", "atleast_2d", "(", "vd", ")", ")", ")", "ann", ".", "append", "(", "time", "=", "0", ",", "duration", "=", "duration", ",", "value", "=", "vd", ",", "confidence", "=", "encoded", "[", "vid", "]", ")", "return", "ann" ]
Inverse static tag transformation
[ "Inverse", "static", "tag", "transformation" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/tags.py#L244-L260
bmcfee/pumpp
pumpp/feature/time.py
TimePosition.transform_audio
def transform_audio(self, y): '''Compute the time position encoding Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['relative'] = np.ndarray, shape=(n_frames, 2) data['absolute'] = np.ndarray, shape=(n_frames, 2) Relative and absolute time positional encodings. ''' duration = get_duration(y=y, sr=self.sr) n_frames = self.n_frames(duration) relative = np.zeros((n_frames, 2), dtype=np.float32) relative[:, 0] = np.cos(np.pi * np.linspace(0, 1, num=n_frames)) relative[:, 1] = np.sin(np.pi * np.linspace(0, 1, num=n_frames)) absolute = relative * np.sqrt(duration) return {'relative': relative[self.idx], 'absolute': absolute[self.idx]}
python
def transform_audio(self, y): '''Compute the time position encoding Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['relative'] = np.ndarray, shape=(n_frames, 2) data['absolute'] = np.ndarray, shape=(n_frames, 2) Relative and absolute time positional encodings. ''' duration = get_duration(y=y, sr=self.sr) n_frames = self.n_frames(duration) relative = np.zeros((n_frames, 2), dtype=np.float32) relative[:, 0] = np.cos(np.pi * np.linspace(0, 1, num=n_frames)) relative[:, 1] = np.sin(np.pi * np.linspace(0, 1, num=n_frames)) absolute = relative * np.sqrt(duration) return {'relative': relative[self.idx], 'absolute': absolute[self.idx]}
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "duration", "=", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ")", "n_frames", "=", "self", ".", "n_frames", "(", "duration", ")", "relative", "=", "np", ".", "zeros", "(", "(", "n_frames", ",", "2", ")", ",", "dtype", "=", "np", ".", "float32", ")", "relative", "[", ":", ",", "0", "]", "=", "np", ".", "cos", "(", "np", ".", "pi", "*", "np", ".", "linspace", "(", "0", ",", "1", ",", "num", "=", "n_frames", ")", ")", "relative", "[", ":", ",", "1", "]", "=", "np", ".", "sin", "(", "np", ".", "pi", "*", "np", ".", "linspace", "(", "0", ",", "1", ",", "num", "=", "n_frames", ")", ")", "absolute", "=", "relative", "*", "np", ".", "sqrt", "(", "duration", ")", "return", "{", "'relative'", ":", "relative", "[", "self", ".", "idx", "]", ",", "'absolute'", ":", "absolute", "[", "self", ".", "idx", "]", "}" ]
Compute the time position encoding Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['relative'] = np.ndarray, shape=(n_frames, 2) data['absolute'] = np.ndarray, shape=(n_frames, 2) Relative and absolute time positional encodings.
[ "Compute", "the", "time", "position", "encoding" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/time.py#L34-L61
bmcfee/pumpp
pumpp/core.py
Pump.add
def add(self, operator): '''Add an operation to this pump. Parameters ---------- operator : BaseTaskTransformer, FeatureExtractor The operation to add Raises ------ ParameterError if `op` is not of a correct type ''' if not isinstance(operator, (BaseTaskTransformer, FeatureExtractor)): raise ParameterError('operator={} must be one of ' '(BaseTaskTransformer, FeatureExtractor)' .format(operator)) if operator.name in self.opmap: raise ParameterError('Duplicate operator name detected: ' '{}'.format(operator)) super(Pump, self).add(operator) self.opmap[operator.name] = operator self.ops.append(operator)
python
def add(self, operator): '''Add an operation to this pump. Parameters ---------- operator : BaseTaskTransformer, FeatureExtractor The operation to add Raises ------ ParameterError if `op` is not of a correct type ''' if not isinstance(operator, (BaseTaskTransformer, FeatureExtractor)): raise ParameterError('operator={} must be one of ' '(BaseTaskTransformer, FeatureExtractor)' .format(operator)) if operator.name in self.opmap: raise ParameterError('Duplicate operator name detected: ' '{}'.format(operator)) super(Pump, self).add(operator) self.opmap[operator.name] = operator self.ops.append(operator)
[ "def", "add", "(", "self", ",", "operator", ")", ":", "if", "not", "isinstance", "(", "operator", ",", "(", "BaseTaskTransformer", ",", "FeatureExtractor", ")", ")", ":", "raise", "ParameterError", "(", "'operator={} must be one of '", "'(BaseTaskTransformer, FeatureExtractor)'", ".", "format", "(", "operator", ")", ")", "if", "operator", ".", "name", "in", "self", ".", "opmap", ":", "raise", "ParameterError", "(", "'Duplicate operator name detected: '", "'{}'", ".", "format", "(", "operator", ")", ")", "super", "(", "Pump", ",", "self", ")", ".", "add", "(", "operator", ")", "self", ".", "opmap", "[", "operator", ".", "name", "]", "=", "operator", "self", ".", "ops", ".", "append", "(", "operator", ")" ]
Add an operation to this pump. Parameters ---------- operator : BaseTaskTransformer, FeatureExtractor The operation to add Raises ------ ParameterError if `op` is not of a correct type
[ "Add", "an", "operation", "to", "this", "pump", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/core.py#L72-L96
bmcfee/pumpp
pumpp/core.py
Pump.transform
def transform(self, audio_f=None, jam=None, y=None, sr=None, crop=False): '''Apply the transformations to an audio file, and optionally JAMS object. Parameters ---------- audio_f : str Path to audio file jam : optional, `jams.JAMS`, str or file-like Optional JAMS object/path to JAMS file/open file descriptor. If provided, this will provide data for task transformers. y : np.ndarray sr : number > 0 If provided, operate directly on an existing audio buffer `y` at sampling rate `sr` rather than load from `audio_f`. crop : bool If `True`, then data are cropped to a common time index across all fields. Otherwise, data may have different time extents. Returns ------- data : dict Data dictionary containing the transformed audio (and annotations) Raises ------ ParameterError At least one of `audio_f` or `(y, sr)` must be provided. ''' if y is None: if audio_f is None: raise ParameterError('At least one of `y` or `audio_f` ' 'must be provided') # Load the audio y, sr = librosa.load(audio_f, sr=sr, mono=True) if sr is None: raise ParameterError('If audio is provided as `y`, you must ' 'specify the sampling rate as sr=') if jam is None: jam = jams.JAMS() jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr) # Load the jams if not isinstance(jam, jams.JAMS): jam = jams.load(jam) data = dict() for operator in self.ops: if isinstance(operator, BaseTaskTransformer): data.update(operator.transform(jam)) elif isinstance(operator, FeatureExtractor): data.update(operator.transform(y, sr)) if crop: data = self.crop(data) return data
python
def transform(self, audio_f=None, jam=None, y=None, sr=None, crop=False): '''Apply the transformations to an audio file, and optionally JAMS object. Parameters ---------- audio_f : str Path to audio file jam : optional, `jams.JAMS`, str or file-like Optional JAMS object/path to JAMS file/open file descriptor. If provided, this will provide data for task transformers. y : np.ndarray sr : number > 0 If provided, operate directly on an existing audio buffer `y` at sampling rate `sr` rather than load from `audio_f`. crop : bool If `True`, then data are cropped to a common time index across all fields. Otherwise, data may have different time extents. Returns ------- data : dict Data dictionary containing the transformed audio (and annotations) Raises ------ ParameterError At least one of `audio_f` or `(y, sr)` must be provided. ''' if y is None: if audio_f is None: raise ParameterError('At least one of `y` or `audio_f` ' 'must be provided') # Load the audio y, sr = librosa.load(audio_f, sr=sr, mono=True) if sr is None: raise ParameterError('If audio is provided as `y`, you must ' 'specify the sampling rate as sr=') if jam is None: jam = jams.JAMS() jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr) # Load the jams if not isinstance(jam, jams.JAMS): jam = jams.load(jam) data = dict() for operator in self.ops: if isinstance(operator, BaseTaskTransformer): data.update(operator.transform(jam)) elif isinstance(operator, FeatureExtractor): data.update(operator.transform(y, sr)) if crop: data = self.crop(data) return data
[ "def", "transform", "(", "self", ",", "audio_f", "=", "None", ",", "jam", "=", "None", ",", "y", "=", "None", ",", "sr", "=", "None", ",", "crop", "=", "False", ")", ":", "if", "y", "is", "None", ":", "if", "audio_f", "is", "None", ":", "raise", "ParameterError", "(", "'At least one of `y` or `audio_f` '", "'must be provided'", ")", "# Load the audio", "y", ",", "sr", "=", "librosa", ".", "load", "(", "audio_f", ",", "sr", "=", "sr", ",", "mono", "=", "True", ")", "if", "sr", "is", "None", ":", "raise", "ParameterError", "(", "'If audio is provided as `y`, you must '", "'specify the sampling rate as sr='", ")", "if", "jam", "is", "None", ":", "jam", "=", "jams", ".", "JAMS", "(", ")", "jam", ".", "file_metadata", ".", "duration", "=", "librosa", ".", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "sr", ")", "# Load the jams", "if", "not", "isinstance", "(", "jam", ",", "jams", ".", "JAMS", ")", ":", "jam", "=", "jams", ".", "load", "(", "jam", ")", "data", "=", "dict", "(", ")", "for", "operator", "in", "self", ".", "ops", ":", "if", "isinstance", "(", "operator", ",", "BaseTaskTransformer", ")", ":", "data", ".", "update", "(", "operator", ".", "transform", "(", "jam", ")", ")", "elif", "isinstance", "(", "operator", ",", "FeatureExtractor", ")", ":", "data", ".", "update", "(", "operator", ".", "transform", "(", "y", ",", "sr", ")", ")", "if", "crop", ":", "data", "=", "self", ".", "crop", "(", "data", ")", "return", "data" ]
Apply the transformations to an audio file, and optionally JAMS object. Parameters ---------- audio_f : str Path to audio file jam : optional, `jams.JAMS`, str or file-like Optional JAMS object/path to JAMS file/open file descriptor. If provided, this will provide data for task transformers. y : np.ndarray sr : number > 0 If provided, operate directly on an existing audio buffer `y` at sampling rate `sr` rather than load from `audio_f`. crop : bool If `True`, then data are cropped to a common time index across all fields. Otherwise, data may have different time extents. Returns ------- data : dict Data dictionary containing the transformed audio (and annotations) Raises ------ ParameterError At least one of `audio_f` or `(y, sr)` must be provided.
[ "Apply", "the", "transformations", "to", "an", "audio", "file", "and", "optionally", "JAMS", "object", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/core.py#L98-L161
bmcfee/pumpp
pumpp/core.py
Pump.sampler
def sampler(self, n_samples, duration, random_state=None): '''Construct a sampler object for this pump's operators. Parameters ---------- n_samples : None or int > 0 The number of samples to generate duration : int > 0 The duration (in frames) of each sample patch random_state : None, int, or np.random.RandomState If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. Returns ------- sampler : pumpp.Sampler The sampler object See Also -------- pumpp.sampler.Sampler ''' return Sampler(n_samples, duration, random_state=random_state, *self.ops)
python
def sampler(self, n_samples, duration, random_state=None): '''Construct a sampler object for this pump's operators. Parameters ---------- n_samples : None or int > 0 The number of samples to generate duration : int > 0 The duration (in frames) of each sample patch random_state : None, int, or np.random.RandomState If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. Returns ------- sampler : pumpp.Sampler The sampler object See Also -------- pumpp.sampler.Sampler ''' return Sampler(n_samples, duration, random_state=random_state, *self.ops)
[ "def", "sampler", "(", "self", ",", "n_samples", ",", "duration", ",", "random_state", "=", "None", ")", ":", "return", "Sampler", "(", "n_samples", ",", "duration", ",", "random_state", "=", "random_state", ",", "*", "self", ".", "ops", ")" ]
Construct a sampler object for this pump's operators. Parameters ---------- n_samples : None or int > 0 The number of samples to generate duration : int > 0 The duration (in frames) of each sample patch random_state : None, int, or np.random.RandomState If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. Returns ------- sampler : pumpp.Sampler The sampler object See Also -------- pumpp.sampler.Sampler
[ "Construct", "a", "sampler", "object", "for", "this", "pump", "s", "operators", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/core.py#L163-L196
bmcfee/pumpp
pumpp/core.py
Pump.fields
def fields(self): '''A dictionary of fields constructed by this pump''' out = dict() for operator in self.ops: out.update(**operator.fields) return out
python
def fields(self): '''A dictionary of fields constructed by this pump''' out = dict() for operator in self.ops: out.update(**operator.fields) return out
[ "def", "fields", "(", "self", ")", ":", "out", "=", "dict", "(", ")", "for", "operator", "in", "self", ".", "ops", ":", "out", ".", "update", "(", "*", "*", "operator", ".", "fields", ")", "return", "out" ]
A dictionary of fields constructed by this pump
[ "A", "dictionary", "of", "fields", "constructed", "by", "this", "pump" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/core.py#L199-L205
bmcfee/pumpp
pumpp/core.py
Pump.layers
def layers(self): '''Construct Keras input layers for all feature transformers in the pump. Returns ------- layers : {field: keras.layers.Input} A dictionary of keras input layers, keyed by the corresponding fields. ''' layermap = dict() for operator in self.ops: if hasattr(operator, 'layers'): layermap.update(operator.layers()) return layermap
python
def layers(self): '''Construct Keras input layers for all feature transformers in the pump. Returns ------- layers : {field: keras.layers.Input} A dictionary of keras input layers, keyed by the corresponding fields. ''' layermap = dict() for operator in self.ops: if hasattr(operator, 'layers'): layermap.update(operator.layers()) return layermap
[ "def", "layers", "(", "self", ")", ":", "layermap", "=", "dict", "(", ")", "for", "operator", "in", "self", ".", "ops", ":", "if", "hasattr", "(", "operator", ",", "'layers'", ")", ":", "layermap", ".", "update", "(", "operator", ".", "layers", "(", ")", ")", "return", "layermap" ]
Construct Keras input layers for all feature transformers in the pump. Returns ------- layers : {field: keras.layers.Input} A dictionary of keras input layers, keyed by the corresponding fields.
[ "Construct", "Keras", "input", "layers", "for", "all", "feature", "transformers", "in", "the", "pump", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/core.py#L207-L222
bmcfee/pumpp
pumpp/task/beat.py
BeatTransformer.set_transition_beat
def set_transition_beat(self, p_self): '''Set the beat-tracking transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(2,)] Optional self-loop probability(ies), used for Viterbi decoding ''' if p_self is None: self.beat_transition = None else: self.beat_transition = transition_loop(2, p_self)
python
def set_transition_beat(self, p_self): '''Set the beat-tracking transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(2,)] Optional self-loop probability(ies), used for Viterbi decoding ''' if p_self is None: self.beat_transition = None else: self.beat_transition = transition_loop(2, p_self)
[ "def", "set_transition_beat", "(", "self", ",", "p_self", ")", ":", "if", "p_self", "is", "None", ":", "self", ".", "beat_transition", "=", "None", "else", ":", "self", ".", "beat_transition", "=", "transition_loop", "(", "2", ",", "p_self", ")" ]
Set the beat-tracking transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(2,)] Optional self-loop probability(ies), used for Viterbi decoding
[ "Set", "the", "beat", "-", "tracking", "transition", "matrix", "according", "to", "self", "-", "loop", "probabilities", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/beat.py#L92-L104
bmcfee/pumpp
pumpp/task/beat.py
BeatTransformer.set_transition_down
def set_transition_down(self, p_self): '''Set the downbeat-tracking transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(2,)] Optional self-loop probability(ies), used for Viterbi decoding ''' if p_self is None: self.down_transition = None else: self.down_transition = transition_loop(2, p_self)
python
def set_transition_down(self, p_self): '''Set the downbeat-tracking transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(2,)] Optional self-loop probability(ies), used for Viterbi decoding ''' if p_self is None: self.down_transition = None else: self.down_transition = transition_loop(2, p_self)
[ "def", "set_transition_down", "(", "self", ",", "p_self", ")", ":", "if", "p_self", "is", "None", ":", "self", ".", "down_transition", "=", "None", "else", ":", "self", ".", "down_transition", "=", "transition_loop", "(", "2", ",", "p_self", ")" ]
Set the downbeat-tracking transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(2,)] Optional self-loop probability(ies), used for Viterbi decoding
[ "Set", "the", "downbeat", "-", "tracking", "transition", "matrix", "according", "to", "self", "-", "loop", "probabilities", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/beat.py#L106-L118
bmcfee/pumpp
pumpp/task/beat.py
BeatTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Apply the beat transformer Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the audio Returns ------- data : dict data['beat'] : np.ndarray, shape=(n, 1) Binary indicator of beat/non-beat data['downbeat'] : np.ndarray, shape=(n, 1) Binary indicator of downbeat/non-downbeat mask_downbeat : bool True if downbeat annotations are present ''' mask_downbeat = False intervals, values = ann.to_interval_values() values = np.asarray(values) beat_events = intervals[:, 0] beat_labels = np.ones((len(beat_events), 1)) idx = (values == 1) if np.any(idx): downbeat_events = beat_events[idx] downbeat_labels = np.ones((len(downbeat_events), 1)) mask_downbeat = True else: downbeat_events = np.zeros(0) downbeat_labels = np.zeros((0, 1)) target_beat = self.encode_events(duration, beat_events, beat_labels) target_downbeat = self.encode_events(duration, downbeat_events, downbeat_labels) return {'beat': target_beat, 'downbeat': target_downbeat, 'mask_downbeat': mask_downbeat}
python
def transform_annotation(self, ann, duration): '''Apply the beat transformer Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the audio Returns ------- data : dict data['beat'] : np.ndarray, shape=(n, 1) Binary indicator of beat/non-beat data['downbeat'] : np.ndarray, shape=(n, 1) Binary indicator of downbeat/non-downbeat mask_downbeat : bool True if downbeat annotations are present ''' mask_downbeat = False intervals, values = ann.to_interval_values() values = np.asarray(values) beat_events = intervals[:, 0] beat_labels = np.ones((len(beat_events), 1)) idx = (values == 1) if np.any(idx): downbeat_events = beat_events[idx] downbeat_labels = np.ones((len(downbeat_events), 1)) mask_downbeat = True else: downbeat_events = np.zeros(0) downbeat_labels = np.zeros((0, 1)) target_beat = self.encode_events(duration, beat_events, beat_labels) target_downbeat = self.encode_events(duration, downbeat_events, downbeat_labels) return {'beat': target_beat, 'downbeat': target_downbeat, 'mask_downbeat': mask_downbeat}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "mask_downbeat", "=", "False", "intervals", ",", "values", "=", "ann", ".", "to_interval_values", "(", ")", "values", "=", "np", ".", "asarray", "(", "values", ")", "beat_events", "=", "intervals", "[", ":", ",", "0", "]", "beat_labels", "=", "np", ".", "ones", "(", "(", "len", "(", "beat_events", ")", ",", "1", ")", ")", "idx", "=", "(", "values", "==", "1", ")", "if", "np", ".", "any", "(", "idx", ")", ":", "downbeat_events", "=", "beat_events", "[", "idx", "]", "downbeat_labels", "=", "np", ".", "ones", "(", "(", "len", "(", "downbeat_events", ")", ",", "1", ")", ")", "mask_downbeat", "=", "True", "else", ":", "downbeat_events", "=", "np", ".", "zeros", "(", "0", ")", "downbeat_labels", "=", "np", ".", "zeros", "(", "(", "0", ",", "1", ")", ")", "target_beat", "=", "self", ".", "encode_events", "(", "duration", ",", "beat_events", ",", "beat_labels", ")", "target_downbeat", "=", "self", ".", "encode_events", "(", "duration", ",", "downbeat_events", ",", "downbeat_labels", ")", "return", "{", "'beat'", ":", "target_beat", ",", "'downbeat'", ":", "target_downbeat", ",", "'mask_downbeat'", ":", "mask_downbeat", "}" ]
Apply the beat transformer Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the audio Returns ------- data : dict data['beat'] : np.ndarray, shape=(n, 1) Binary indicator of beat/non-beat data['downbeat'] : np.ndarray, shape=(n, 1) Binary indicator of downbeat/non-downbeat mask_downbeat : bool True if downbeat annotations are present
[ "Apply", "the", "beat", "transformer" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/beat.py#L120-L171
bmcfee/pumpp
pumpp/task/beat.py
BeatTransformer.inverse
def inverse(self, encoded, downbeat=None, duration=None): '''Inverse transformation for beats and optional downbeats''' ann = jams.Annotation(namespace=self.namespace, duration=duration) beat_times = np.asarray([t for t, _ in self.decode_events(encoded, transition=self.beat_transition, p_init=self.beat_p_init, p_state=self.beat_p_state) if _]) beat_frames = time_to_frames(beat_times, sr=self.sr, hop_length=self.hop_length) if downbeat is not None: downbeat_times = set([t for t, _ in self.decode_events(downbeat, transition=self.down_transition, p_init=self.down_p_init, p_state=self.down_p_state) if _]) pickup_beats = len([t for t in beat_times if t < min(downbeat_times)]) else: downbeat_times = set() pickup_beats = 0 value = - pickup_beats - 1 for beat_t, beat_f in zip(beat_times, beat_frames): if beat_t in downbeat_times: value = 1 else: value += 1 confidence = encoded[beat_f] ann.append(time=beat_t, duration=0, value=value, confidence=confidence) return ann
python
def inverse(self, encoded, downbeat=None, duration=None): '''Inverse transformation for beats and optional downbeats''' ann = jams.Annotation(namespace=self.namespace, duration=duration) beat_times = np.asarray([t for t, _ in self.decode_events(encoded, transition=self.beat_transition, p_init=self.beat_p_init, p_state=self.beat_p_state) if _]) beat_frames = time_to_frames(beat_times, sr=self.sr, hop_length=self.hop_length) if downbeat is not None: downbeat_times = set([t for t, _ in self.decode_events(downbeat, transition=self.down_transition, p_init=self.down_p_init, p_state=self.down_p_state) if _]) pickup_beats = len([t for t in beat_times if t < min(downbeat_times)]) else: downbeat_times = set() pickup_beats = 0 value = - pickup_beats - 1 for beat_t, beat_f in zip(beat_times, beat_frames): if beat_t in downbeat_times: value = 1 else: value += 1 confidence = encoded[beat_f] ann.append(time=beat_t, duration=0, value=value, confidence=confidence) return ann
[ "def", "inverse", "(", "self", ",", "encoded", ",", "downbeat", "=", "None", ",", "duration", "=", "None", ")", ":", "ann", "=", "jams", ".", "Annotation", "(", "namespace", "=", "self", ".", "namespace", ",", "duration", "=", "duration", ")", "beat_times", "=", "np", ".", "asarray", "(", "[", "t", "for", "t", ",", "_", "in", "self", ".", "decode_events", "(", "encoded", ",", "transition", "=", "self", ".", "beat_transition", ",", "p_init", "=", "self", ".", "beat_p_init", ",", "p_state", "=", "self", ".", "beat_p_state", ")", "if", "_", "]", ")", "beat_frames", "=", "time_to_frames", "(", "beat_times", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "if", "downbeat", "is", "not", "None", ":", "downbeat_times", "=", "set", "(", "[", "t", "for", "t", ",", "_", "in", "self", ".", "decode_events", "(", "downbeat", ",", "transition", "=", "self", ".", "down_transition", ",", "p_init", "=", "self", ".", "down_p_init", ",", "p_state", "=", "self", ".", "down_p_state", ")", "if", "_", "]", ")", "pickup_beats", "=", "len", "(", "[", "t", "for", "t", "in", "beat_times", "if", "t", "<", "min", "(", "downbeat_times", ")", "]", ")", "else", ":", "downbeat_times", "=", "set", "(", ")", "pickup_beats", "=", "0", "value", "=", "-", "pickup_beats", "-", "1", "for", "beat_t", ",", "beat_f", "in", "zip", "(", "beat_times", ",", "beat_frames", ")", ":", "if", "beat_t", "in", "downbeat_times", ":", "value", "=", "1", "else", ":", "value", "+=", "1", "confidence", "=", "encoded", "[", "beat_f", "]", "ann", ".", "append", "(", "time", "=", "beat_t", ",", "duration", "=", "0", ",", "value", "=", "value", ",", "confidence", "=", "confidence", ")", "return", "ann" ]
Inverse transformation for beats and optional downbeats
[ "Inverse", "transformation", "for", "beats", "and", "optional", "downbeats" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/beat.py#L173-L209
bmcfee/pumpp
pumpp/task/beat.py
BeatPositionTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Transform an annotation to the beat-position encoding Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['position'] : np.ndarray, shape=(n, n_labels) or (n, 1) A time-varying label encoding of beat position ''' # 1. get all the events # 2. find all the downbeats # 3. map each downbeat to a subdivision counter # number of beats until the next downbeat # 4. pad out events to intervals # 5. encode each beat interval to its position boundaries, values = ann.to_interval_values() # Convert to intervals and span the duration # padding at the end of track does not propagate the right label # this is an artifact of inferring end-of-track from boundaries though boundaries = list(boundaries[:, 0]) if boundaries and boundaries[-1] < duration: boundaries.append(duration) intervals = boundaries_to_intervals(boundaries) intervals, values = adjust_intervals(intervals, values, t_min=0, t_max=duration, start_label=0, end_label=0) values = np.asarray(values, dtype=int) downbeats = np.flatnonzero(values == 1) position = [] for i, v in enumerate(values): # If the value is a 0, mark it as X and move on if v == 0: position.extend(self.encoder.transform(['X'])) continue # Otherwise, let's try to find the surrounding downbeats prev_idx = np.searchsorted(downbeats, i, side='right') - 1 next_idx = 1 + prev_idx if prev_idx >= 0 and next_idx < len(downbeats): # In this case, the subdivision is well-defined subdivision = downbeats[next_idx] - downbeats[prev_idx] elif prev_idx < 0 and next_idx < len(downbeats): subdivision = np.max(values[:downbeats[0]+1]) elif next_idx >= len(downbeats): subdivision = len(values) - downbeats[prev_idx] if subdivision > self.max_divisions or subdivision < 1: position.extend(self.encoder.transform(['X'])) else: position.extend(self.encoder.transform(['{:02d}/{:02d}'.format(subdivision, v)])) dtype = self.fields[self.scope('position')].dtype position = np.asarray(position) if self.sparse: position = position[:, np.newaxis] target = self.encode_intervals(duration, intervals, position, multi=False, dtype=dtype) return {'position': target}
python
def transform_annotation(self, ann, duration): '''Transform an annotation to the beat-position encoding Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['position'] : np.ndarray, shape=(n, n_labels) or (n, 1) A time-varying label encoding of beat position ''' # 1. get all the events # 2. find all the downbeats # 3. map each downbeat to a subdivision counter # number of beats until the next downbeat # 4. pad out events to intervals # 5. encode each beat interval to its position boundaries, values = ann.to_interval_values() # Convert to intervals and span the duration # padding at the end of track does not propagate the right label # this is an artifact of inferring end-of-track from boundaries though boundaries = list(boundaries[:, 0]) if boundaries and boundaries[-1] < duration: boundaries.append(duration) intervals = boundaries_to_intervals(boundaries) intervals, values = adjust_intervals(intervals, values, t_min=0, t_max=duration, start_label=0, end_label=0) values = np.asarray(values, dtype=int) downbeats = np.flatnonzero(values == 1) position = [] for i, v in enumerate(values): # If the value is a 0, mark it as X and move on if v == 0: position.extend(self.encoder.transform(['X'])) continue # Otherwise, let's try to find the surrounding downbeats prev_idx = np.searchsorted(downbeats, i, side='right') - 1 next_idx = 1 + prev_idx if prev_idx >= 0 and next_idx < len(downbeats): # In this case, the subdivision is well-defined subdivision = downbeats[next_idx] - downbeats[prev_idx] elif prev_idx < 0 and next_idx < len(downbeats): subdivision = np.max(values[:downbeats[0]+1]) elif next_idx >= len(downbeats): subdivision = len(values) - downbeats[prev_idx] if subdivision > self.max_divisions or subdivision < 1: position.extend(self.encoder.transform(['X'])) else: position.extend(self.encoder.transform(['{:02d}/{:02d}'.format(subdivision, v)])) dtype = self.fields[self.scope('position')].dtype position = np.asarray(position) if self.sparse: position = position[:, np.newaxis] target = self.encode_intervals(duration, intervals, position, multi=False, dtype=dtype) return {'position': target}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "# 1. get all the events", "# 2. find all the downbeats", "# 3. map each downbeat to a subdivision counter", "# number of beats until the next downbeat", "# 4. pad out events to intervals", "# 5. encode each beat interval to its position", "boundaries", ",", "values", "=", "ann", ".", "to_interval_values", "(", ")", "# Convert to intervals and span the duration", "# padding at the end of track does not propagate the right label", "# this is an artifact of inferring end-of-track from boundaries though", "boundaries", "=", "list", "(", "boundaries", "[", ":", ",", "0", "]", ")", "if", "boundaries", "and", "boundaries", "[", "-", "1", "]", "<", "duration", ":", "boundaries", ".", "append", "(", "duration", ")", "intervals", "=", "boundaries_to_intervals", "(", "boundaries", ")", "intervals", ",", "values", "=", "adjust_intervals", "(", "intervals", ",", "values", ",", "t_min", "=", "0", ",", "t_max", "=", "duration", ",", "start_label", "=", "0", ",", "end_label", "=", "0", ")", "values", "=", "np", ".", "asarray", "(", "values", ",", "dtype", "=", "int", ")", "downbeats", "=", "np", ".", "flatnonzero", "(", "values", "==", "1", ")", "position", "=", "[", "]", "for", "i", ",", "v", "in", "enumerate", "(", "values", ")", ":", "# If the value is a 0, mark it as X and move on", "if", "v", "==", "0", ":", "position", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "'X'", "]", ")", ")", "continue", "# Otherwise, let's try to find the surrounding downbeats", "prev_idx", "=", "np", ".", "searchsorted", "(", "downbeats", ",", "i", ",", "side", "=", "'right'", ")", "-", "1", "next_idx", "=", "1", "+", "prev_idx", "if", "prev_idx", ">=", "0", "and", "next_idx", "<", "len", "(", "downbeats", ")", ":", "# In this case, the subdivision is well-defined", "subdivision", "=", "downbeats", "[", "next_idx", "]", "-", "downbeats", "[", "prev_idx", "]", "elif", "prev_idx", "<", "0", "and", "next_idx", "<", "len", "(", "downbeats", ")", ":", "subdivision", "=", "np", ".", "max", "(", "values", "[", ":", "downbeats", "[", "0", "]", "+", "1", "]", ")", "elif", "next_idx", ">=", "len", "(", "downbeats", ")", ":", "subdivision", "=", "len", "(", "values", ")", "-", "downbeats", "[", "prev_idx", "]", "if", "subdivision", ">", "self", ".", "max_divisions", "or", "subdivision", "<", "1", ":", "position", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "'X'", "]", ")", ")", "else", ":", "position", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "'{:02d}/{:02d}'", ".", "format", "(", "subdivision", ",", "v", ")", "]", ")", ")", "dtype", "=", "self", ".", "fields", "[", "self", ".", "scope", "(", "'position'", ")", "]", ".", "dtype", "position", "=", "np", ".", "asarray", "(", "position", ")", "if", "self", ".", "sparse", ":", "position", "=", "position", "[", ":", ",", "np", ".", "newaxis", "]", "target", "=", "self", ".", "encode_intervals", "(", "duration", ",", "intervals", ",", "position", ",", "multi", "=", "False", ",", "dtype", "=", "dtype", ")", "return", "{", "'position'", ":", "target", "}" ]
Transform an annotation to the beat-position encoding Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['position'] : np.ndarray, shape=(n, n_labels) or (n, 1) A time-varying label encoding of beat position
[ "Transform", "an", "annotation", "to", "the", "beat", "-", "position", "encoding" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/beat.py#L265-L339
bmcfee/pumpp
pumpp/feature/rhythm.py
Tempogram.transform_audio
def transform_audio(self, y): '''Compute the tempogram Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['tempogram'] : np.ndarray, shape=(n_frames, win_length) The tempogram ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) tgram = tempogram(y=y, sr=self.sr, hop_length=self.hop_length, win_length=self.win_length).astype(np.float32) tgram = fix_length(tgram, n_frames) return {'tempogram': tgram.T[self.idx]}
python
def transform_audio(self, y): '''Compute the tempogram Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['tempogram'] : np.ndarray, shape=(n_frames, win_length) The tempogram ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) tgram = tempogram(y=y, sr=self.sr, hop_length=self.hop_length, win_length=self.win_length).astype(np.float32) tgram = fix_length(tgram, n_frames) return {'tempogram': tgram.T[self.idx]}
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "n_frames", "=", "self", ".", "n_frames", "(", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ")", ")", "tgram", "=", "tempogram", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ",", "win_length", "=", "self", ".", "win_length", ")", ".", "astype", "(", "np", ".", "float32", ")", "tgram", "=", "fix_length", "(", "tgram", ",", "n_frames", ")", "return", "{", "'tempogram'", ":", "tgram", ".", "T", "[", "self", ".", "idx", "]", "}" ]
Compute the tempogram Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['tempogram'] : np.ndarray, shape=(n_frames, win_length) The tempogram
[ "Compute", "the", "tempogram" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/rhythm.py#L39-L60
bmcfee/pumpp
pumpp/feature/rhythm.py
TempoScale.transform_audio
def transform_audio(self, y): '''Apply the scale transform to the tempogram Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['temposcale'] : np.ndarray, shape=(n_frames, n_fmt) The scale transform magnitude coefficients ''' data = super(TempoScale, self).transform_audio(y) data['temposcale'] = np.abs(fmt(data.pop('tempogram'), axis=1, n_fmt=self.n_fmt)).astype(np.float32)[self.idx] return data
python
def transform_audio(self, y): '''Apply the scale transform to the tempogram Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['temposcale'] : np.ndarray, shape=(n_frames, n_fmt) The scale transform magnitude coefficients ''' data = super(TempoScale, self).transform_audio(y) data['temposcale'] = np.abs(fmt(data.pop('tempogram'), axis=1, n_fmt=self.n_fmt)).astype(np.float32)[self.idx] return data
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "data", "=", "super", "(", "TempoScale", ",", "self", ")", ".", "transform_audio", "(", "y", ")", "data", "[", "'temposcale'", "]", "=", "np", ".", "abs", "(", "fmt", "(", "data", ".", "pop", "(", "'tempogram'", ")", ",", "axis", "=", "1", ",", "n_fmt", "=", "self", ".", "n_fmt", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "[", "self", ".", "idx", "]", "return", "data" ]
Apply the scale transform to the tempogram Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['temposcale'] : np.ndarray, shape=(n_frames, n_fmt) The scale transform magnitude coefficients
[ "Apply", "the", "scale", "transform", "to", "the", "tempogram" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/rhythm.py#L93-L111
bmcfee/pumpp
pumpp/task/structure.py
StructureTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Apply the structure agreement transformation. Parameters ---------- ann : jams.Annotation The segment annotation duration : number > 0 The target duration Returns ------- data : dict data['agree'] : np.ndarray, shape=(n, n), dtype=bool ''' intervals, values = ann.to_interval_values() intervals, values = adjust_intervals(intervals, values, t_min=0, t_max=duration) # Re-index the labels ids, _ = index_labels(values) rate = float(self.hop_length) / self.sr # Sample segment labels on our frame grid _, labels = intervals_to_samples(intervals, ids, sample_size=rate) # Make the agreement matrix return {'agree': np.equal.outer(labels, labels)}
python
def transform_annotation(self, ann, duration): '''Apply the structure agreement transformation. Parameters ---------- ann : jams.Annotation The segment annotation duration : number > 0 The target duration Returns ------- data : dict data['agree'] : np.ndarray, shape=(n, n), dtype=bool ''' intervals, values = ann.to_interval_values() intervals, values = adjust_intervals(intervals, values, t_min=0, t_max=duration) # Re-index the labels ids, _ = index_labels(values) rate = float(self.hop_length) / self.sr # Sample segment labels on our frame grid _, labels = intervals_to_samples(intervals, ids, sample_size=rate) # Make the agreement matrix return {'agree': np.equal.outer(labels, labels)}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "intervals", ",", "values", "=", "ann", ".", "to_interval_values", "(", ")", "intervals", ",", "values", "=", "adjust_intervals", "(", "intervals", ",", "values", ",", "t_min", "=", "0", ",", "t_max", "=", "duration", ")", "# Re-index the labels", "ids", ",", "_", "=", "index_labels", "(", "values", ")", "rate", "=", "float", "(", "self", ".", "hop_length", ")", "/", "self", ".", "sr", "# Sample segment labels on our frame grid", "_", ",", "labels", "=", "intervals_to_samples", "(", "intervals", ",", "ids", ",", "sample_size", "=", "rate", ")", "# Make the agreement matrix", "return", "{", "'agree'", ":", "np", ".", "equal", ".", "outer", "(", "labels", ",", "labels", ")", "}" ]
Apply the structure agreement transformation. Parameters ---------- ann : jams.Annotation The segment annotation duration : number > 0 The target duration Returns ------- data : dict data['agree'] : np.ndarray, shape=(n, n), dtype=bool
[ "Apply", "the", "structure", "agreement", "transformation", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/structure.py#L47-L76
bmcfee/pumpp
pumpp/feature/fft.py
STFT.transform_audio
def transform_audio(self, y): '''Compute the STFT magnitude and phase. Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) STFT magnitude data['phase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) STFT phase ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) D = stft(y, hop_length=self.hop_length, n_fft=self.n_fft) D = fix_length(D, n_frames) mag, phase = magphase(D) if self.log: mag = amplitude_to_db(mag, ref=np.max) return {'mag': mag.T[self.idx].astype(np.float32), 'phase': np.angle(phase.T)[self.idx].astype(np.float32)}
python
def transform_audio(self, y): '''Compute the STFT magnitude and phase. Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) STFT magnitude data['phase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) STFT phase ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) D = stft(y, hop_length=self.hop_length, n_fft=self.n_fft) D = fix_length(D, n_frames) mag, phase = magphase(D) if self.log: mag = amplitude_to_db(mag, ref=np.max) return {'mag': mag.T[self.idx].astype(np.float32), 'phase': np.angle(phase.T)[self.idx].astype(np.float32)}
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "n_frames", "=", "self", ".", "n_frames", "(", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ")", ")", "D", "=", "stft", "(", "y", ",", "hop_length", "=", "self", ".", "hop_length", ",", "n_fft", "=", "self", ".", "n_fft", ")", "D", "=", "fix_length", "(", "D", ",", "n_frames", ")", "mag", ",", "phase", "=", "magphase", "(", "D", ")", "if", "self", ".", "log", ":", "mag", "=", "amplitude_to_db", "(", "mag", ",", "ref", "=", "np", ".", "max", ")", "return", "{", "'mag'", ":", "mag", ".", "T", "[", "self", ".", "idx", "]", ".", "astype", "(", "np", ".", "float32", ")", ",", "'phase'", ":", "np", ".", "angle", "(", "phase", ".", "T", ")", "[", "self", ".", "idx", "]", ".", "astype", "(", "np", ".", "float32", ")", "}" ]
Compute the STFT magnitude and phase. Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) STFT magnitude data['phase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) STFT phase
[ "Compute", "the", "STFT", "magnitude", "and", "phase", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/fft.py#L51-L80
bmcfee/pumpp
pumpp/feature/fft.py
STFTPhaseDiff.transform_audio
def transform_audio(self, y): '''Compute the STFT with phase differentials. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The STFT magnitude data['dphase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The unwrapped phase differential ''' data = super(STFTPhaseDiff, self).transform_audio(y) data['dphase'] = self.phase_diff(data.pop('phase')) return data
python
def transform_audio(self, y): '''Compute the STFT with phase differentials. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The STFT magnitude data['dphase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The unwrapped phase differential ''' data = super(STFTPhaseDiff, self).transform_audio(y) data['dphase'] = self.phase_diff(data.pop('phase')) return data
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "data", "=", "super", "(", "STFTPhaseDiff", ",", "self", ")", ".", "transform_audio", "(", "y", ")", "data", "[", "'dphase'", "]", "=", "self", ".", "phase_diff", "(", "data", ".", "pop", "(", "'phase'", ")", ")", "return", "data" ]
Compute the STFT with phase differentials. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The STFT magnitude data['dphase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The unwrapped phase differential
[ "Compute", "the", "STFT", "with", "phase", "differentials", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/fft.py#L95-L114
bmcfee/pumpp
pumpp/feature/fft.py
STFTMag.transform_audio
def transform_audio(self, y): '''Compute the STFT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The STFT magnitude ''' data = super(STFTMag, self).transform_audio(y) data.pop('phase') return data
python
def transform_audio(self, y): '''Compute the STFT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The STFT magnitude ''' data = super(STFTMag, self).transform_audio(y) data.pop('phase') return data
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "data", "=", "super", "(", "STFTMag", ",", "self", ")", ".", "transform_audio", "(", "y", ")", "data", ".", "pop", "(", "'phase'", ")", "return", "data" ]
Compute the STFT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) The STFT magnitude
[ "Compute", "the", "STFT" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/fft.py#L128-L145
bmcfee/pumpp
pumpp/task/chord.py
_pad_nochord
def _pad_nochord(target, axis=-1): '''Pad a chord annotation with no-chord flags. Parameters ---------- target : np.ndarray the input data axis : int the axis along which to pad Returns ------- target_pad `target` expanded by 1 along the specified `axis`. The expanded dimension will be 0 when `target` is non-zero before padding, and 1 otherwise. ''' ncmask = ~np.max(target, axis=axis, keepdims=True) return np.concatenate([target, ncmask], axis=axis)
python
def _pad_nochord(target, axis=-1): '''Pad a chord annotation with no-chord flags. Parameters ---------- target : np.ndarray the input data axis : int the axis along which to pad Returns ------- target_pad `target` expanded by 1 along the specified `axis`. The expanded dimension will be 0 when `target` is non-zero before padding, and 1 otherwise. ''' ncmask = ~np.max(target, axis=axis, keepdims=True) return np.concatenate([target, ncmask], axis=axis)
[ "def", "_pad_nochord", "(", "target", ",", "axis", "=", "-", "1", ")", ":", "ncmask", "=", "~", "np", ".", "max", "(", "target", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", "return", "np", ".", "concatenate", "(", "[", "target", ",", "ncmask", "]", ",", "axis", "=", "axis", ")" ]
Pad a chord annotation with no-chord flags. Parameters ---------- target : np.ndarray the input data axis : int the axis along which to pad Returns ------- target_pad `target` expanded by 1 along the specified `axis`. The expanded dimension will be 0 when `target` is non-zero before padding, and 1 otherwise.
[ "Pad", "a", "chord", "annotation", "with", "no", "-", "chord", "flags", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/chord.py#L24-L44
bmcfee/pumpp
pumpp/task/chord.py
ChordTransformer.empty
def empty(self, duration): '''Empty chord annotations Parameters ---------- duration : number The length (in seconds) of the empty annotation Returns ------- ann : jams.Annotation A chord annotation consisting of a single `no-chord` observation. ''' ann = super(ChordTransformer, self).empty(duration) ann.append(time=0, duration=duration, value='N', confidence=0) return ann
python
def empty(self, duration): '''Empty chord annotations Parameters ---------- duration : number The length (in seconds) of the empty annotation Returns ------- ann : jams.Annotation A chord annotation consisting of a single `no-chord` observation. ''' ann = super(ChordTransformer, self).empty(duration) ann.append(time=0, duration=duration, value='N', confidence=0) return ann
[ "def", "empty", "(", "self", ",", "duration", ")", ":", "ann", "=", "super", "(", "ChordTransformer", ",", "self", ")", ".", "empty", "(", "duration", ")", "ann", ".", "append", "(", "time", "=", "0", ",", "duration", "=", "duration", ",", "value", "=", "'N'", ",", "confidence", "=", "0", ")", "return", "ann" ]
Empty chord annotations Parameters ---------- duration : number The length (in seconds) of the empty annotation Returns ------- ann : jams.Annotation A chord annotation consisting of a single `no-chord` observation.
[ "Empty", "chord", "annotations" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/chord.py#L92-L111
bmcfee/pumpp
pumpp/task/chord.py
ChordTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Apply the chord transformation. Parameters ---------- ann : jams.Annotation The chord annotation duration : number > 0 The target duration Returns ------- data : dict data['pitch'] : np.ndarray, shape=(n, 12) data['root'] : np.ndarray, shape=(n, 13) or (n, 1) data['bass'] : np.ndarray, shape=(n, 13) or (n, 1) `pitch` is a binary matrix indicating pitch class activation at each frame. `root` is a one-hot matrix indicating the chord root's pitch class at each frame. `bass` is a one-hot matrix indicating the chord bass (lowest note) pitch class at each frame. If sparsely encoded, `root` and `bass` are integers in the range [0, 12] where 12 indicates no chord. If densely encoded, `root` and `bass` have an extra final dimension which is active when there is no chord sounding. ''' # Construct a blank annotation with mask = 0 intervals, chords = ann.to_interval_values() # Get the dtype for root/bass if self.sparse: dtype = np.int else: dtype = np.bool # If we don't have any labeled intervals, fill in a no-chord if not chords: intervals = np.asarray([[0, duration]]) chords = ['N'] # Suppress all intervals not in the encoder pitches = [] roots = [] basses = [] # default value when data is missing if self.sparse: fill = 12 else: fill = False for chord in chords: # Encode the pitches root, semi, bass = mir_eval.chord.encode(chord) pitches.append(np.roll(semi, root)) if self.sparse: if root in self._classes: roots.append([root]) basses.append([(root + bass) % 12]) else: roots.append([fill]) basses.append([fill]) else: if root in self._classes: roots.extend(self.encoder.transform([[root]])) basses.extend(self.encoder.transform([[(root + bass) % 12]])) else: roots.extend(self.encoder.transform([[]])) basses.extend(self.encoder.transform([[]])) pitches = np.asarray(pitches, dtype=np.bool) roots = np.asarray(roots, dtype=dtype) basses = np.asarray(basses, dtype=dtype) target_pitch = self.encode_intervals(duration, intervals, pitches) target_root = self.encode_intervals(duration, intervals, roots, multi=False, dtype=dtype, fill=fill) target_bass = self.encode_intervals(duration, intervals, basses, multi=False, dtype=dtype, fill=fill) if not self.sparse: target_root = _pad_nochord(target_root) target_bass = _pad_nochord(target_bass) return {'pitch': target_pitch, 'root': target_root, 'bass': target_bass}
python
def transform_annotation(self, ann, duration): '''Apply the chord transformation. Parameters ---------- ann : jams.Annotation The chord annotation duration : number > 0 The target duration Returns ------- data : dict data['pitch'] : np.ndarray, shape=(n, 12) data['root'] : np.ndarray, shape=(n, 13) or (n, 1) data['bass'] : np.ndarray, shape=(n, 13) or (n, 1) `pitch` is a binary matrix indicating pitch class activation at each frame. `root` is a one-hot matrix indicating the chord root's pitch class at each frame. `bass` is a one-hot matrix indicating the chord bass (lowest note) pitch class at each frame. If sparsely encoded, `root` and `bass` are integers in the range [0, 12] where 12 indicates no chord. If densely encoded, `root` and `bass` have an extra final dimension which is active when there is no chord sounding. ''' # Construct a blank annotation with mask = 0 intervals, chords = ann.to_interval_values() # Get the dtype for root/bass if self.sparse: dtype = np.int else: dtype = np.bool # If we don't have any labeled intervals, fill in a no-chord if not chords: intervals = np.asarray([[0, duration]]) chords = ['N'] # Suppress all intervals not in the encoder pitches = [] roots = [] basses = [] # default value when data is missing if self.sparse: fill = 12 else: fill = False for chord in chords: # Encode the pitches root, semi, bass = mir_eval.chord.encode(chord) pitches.append(np.roll(semi, root)) if self.sparse: if root in self._classes: roots.append([root]) basses.append([(root + bass) % 12]) else: roots.append([fill]) basses.append([fill]) else: if root in self._classes: roots.extend(self.encoder.transform([[root]])) basses.extend(self.encoder.transform([[(root + bass) % 12]])) else: roots.extend(self.encoder.transform([[]])) basses.extend(self.encoder.transform([[]])) pitches = np.asarray(pitches, dtype=np.bool) roots = np.asarray(roots, dtype=dtype) basses = np.asarray(basses, dtype=dtype) target_pitch = self.encode_intervals(duration, intervals, pitches) target_root = self.encode_intervals(duration, intervals, roots, multi=False, dtype=dtype, fill=fill) target_bass = self.encode_intervals(duration, intervals, basses, multi=False, dtype=dtype, fill=fill) if not self.sparse: target_root = _pad_nochord(target_root) target_bass = _pad_nochord(target_bass) return {'pitch': target_pitch, 'root': target_root, 'bass': target_bass}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "# Construct a blank annotation with mask = 0", "intervals", ",", "chords", "=", "ann", ".", "to_interval_values", "(", ")", "# Get the dtype for root/bass", "if", "self", ".", "sparse", ":", "dtype", "=", "np", ".", "int", "else", ":", "dtype", "=", "np", ".", "bool", "# If we don't have any labeled intervals, fill in a no-chord", "if", "not", "chords", ":", "intervals", "=", "np", ".", "asarray", "(", "[", "[", "0", ",", "duration", "]", "]", ")", "chords", "=", "[", "'N'", "]", "# Suppress all intervals not in the encoder", "pitches", "=", "[", "]", "roots", "=", "[", "]", "basses", "=", "[", "]", "# default value when data is missing", "if", "self", ".", "sparse", ":", "fill", "=", "12", "else", ":", "fill", "=", "False", "for", "chord", "in", "chords", ":", "# Encode the pitches", "root", ",", "semi", ",", "bass", "=", "mir_eval", ".", "chord", ".", "encode", "(", "chord", ")", "pitches", ".", "append", "(", "np", ".", "roll", "(", "semi", ",", "root", ")", ")", "if", "self", ".", "sparse", ":", "if", "root", "in", "self", ".", "_classes", ":", "roots", ".", "append", "(", "[", "root", "]", ")", "basses", ".", "append", "(", "[", "(", "root", "+", "bass", ")", "%", "12", "]", ")", "else", ":", "roots", ".", "append", "(", "[", "fill", "]", ")", "basses", ".", "append", "(", "[", "fill", "]", ")", "else", ":", "if", "root", "in", "self", ".", "_classes", ":", "roots", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "[", "root", "]", "]", ")", ")", "basses", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "[", "(", "root", "+", "bass", ")", "%", "12", "]", "]", ")", ")", "else", ":", "roots", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "[", "]", "]", ")", ")", "basses", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "[", "]", "]", ")", ")", "pitches", "=", "np", ".", "asarray", "(", "pitches", ",", "dtype", "=", "np", ".", "bool", ")", "roots", "=", "np", ".", "asarray", "(", "roots", ",", "dtype", "=", "dtype", ")", "basses", "=", "np", ".", "asarray", "(", "basses", ",", "dtype", "=", "dtype", ")", "target_pitch", "=", "self", ".", "encode_intervals", "(", "duration", ",", "intervals", ",", "pitches", ")", "target_root", "=", "self", ".", "encode_intervals", "(", "duration", ",", "intervals", ",", "roots", ",", "multi", "=", "False", ",", "dtype", "=", "dtype", ",", "fill", "=", "fill", ")", "target_bass", "=", "self", ".", "encode_intervals", "(", "duration", ",", "intervals", ",", "basses", ",", "multi", "=", "False", ",", "dtype", "=", "dtype", ",", "fill", "=", "fill", ")", "if", "not", "self", ".", "sparse", ":", "target_root", "=", "_pad_nochord", "(", "target_root", ")", "target_bass", "=", "_pad_nochord", "(", "target_bass", ")", "return", "{", "'pitch'", ":", "target_pitch", ",", "'root'", ":", "target_root", ",", "'bass'", ":", "target_bass", "}" ]
Apply the chord transformation. Parameters ---------- ann : jams.Annotation The chord annotation duration : number > 0 The target duration Returns ------- data : dict data['pitch'] : np.ndarray, shape=(n, 12) data['root'] : np.ndarray, shape=(n, 13) or (n, 1) data['bass'] : np.ndarray, shape=(n, 13) or (n, 1) `pitch` is a binary matrix indicating pitch class activation at each frame. `root` is a one-hot matrix indicating the chord root's pitch class at each frame. `bass` is a one-hot matrix indicating the chord bass (lowest note) pitch class at each frame. If sparsely encoded, `root` and `bass` are integers in the range [0, 12] where 12 indicates no chord. If densely encoded, `root` and `bass` have an extra final dimension which is active when there is no chord sounding.
[ "Apply", "the", "chord", "transformation", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/chord.py#L113-L213
bmcfee/pumpp
pumpp/task/chord.py
SimpleChordTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Apply the chord transformation. Parameters ---------- ann : jams.Annotation The chord annotation duration : number > 0 The target duration Returns ------- data : dict data['pitch'] : np.ndarray, shape=(n, 12) `pitch` is a binary matrix indicating pitch class activation at each frame. ''' data = super(SimpleChordTransformer, self).transform_annotation(ann, duration) data.pop('root', None) data.pop('bass', None) return data
python
def transform_annotation(self, ann, duration): '''Apply the chord transformation. Parameters ---------- ann : jams.Annotation The chord annotation duration : number > 0 The target duration Returns ------- data : dict data['pitch'] : np.ndarray, shape=(n, 12) `pitch` is a binary matrix indicating pitch class activation at each frame. ''' data = super(SimpleChordTransformer, self).transform_annotation(ann, duration) data.pop('root', None) data.pop('bass', None) return data
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "data", "=", "super", "(", "SimpleChordTransformer", ",", "self", ")", ".", "transform_annotation", "(", "ann", ",", "duration", ")", "data", ".", "pop", "(", "'root'", ",", "None", ")", "data", ".", "pop", "(", "'bass'", ",", "None", ")", "return", "data" ]
Apply the chord transformation. Parameters ---------- ann : jams.Annotation The chord annotation duration : number > 0 The target duration Returns ------- data : dict data['pitch'] : np.ndarray, shape=(n, 12) `pitch` is a binary matrix indicating pitch class activation at each frame.
[ "Apply", "the", "chord", "transformation", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/chord.py#L246-L270
bmcfee/pumpp
pumpp/task/chord.py
ChordTagTransformer.set_transition
def set_transition(self, p_self): '''Set the transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(n_labels,)] Optional self-loop probability(ies), used for Viterbi decoding ''' if p_self is None: self.transition = None else: self.transition = transition_loop(len(self._classes), p_self)
python
def set_transition(self, p_self): '''Set the transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(n_labels,)] Optional self-loop probability(ies), used for Viterbi decoding ''' if p_self is None: self.transition = None else: self.transition = transition_loop(len(self._classes), p_self)
[ "def", "set_transition", "(", "self", ",", "p_self", ")", ":", "if", "p_self", "is", "None", ":", "self", ".", "transition", "=", "None", "else", ":", "self", ".", "transition", "=", "transition_loop", "(", "len", "(", "self", ".", "_classes", ")", ",", "p_self", ")" ]
Set the transition matrix according to self-loop probabilities. Parameters ---------- p_self : None, float in (0, 1), or np.ndarray [shape=(n_labels,)] Optional self-loop probability(ies), used for Viterbi decoding
[ "Set", "the", "transition", "matrix", "according", "to", "self", "-", "loop", "probabilities", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/chord.py#L416-L427
bmcfee/pumpp
pumpp/task/chord.py
ChordTagTransformer.simplify
def simplify(self, chord): '''Simplify a chord string down to the vocabulary space''' # Drop inversions chord = re.sub(r'/.*$', r'', chord) # Drop any additional or suppressed tones chord = re.sub(r'\(.*?\)', r'', chord) # Drop dangling : indicators chord = re.sub(r':$', r'', chord) # Encode the chord root, pitches, _ = mir_eval.chord.encode(chord) # Build the query # To map the binary vector pitches down to bit masked integer, # we just dot against powers of 2 P = 2**np.arange(12, dtype=int) query = self.mask_ & pitches[::-1].dot(P) if root < 0 and chord[0].upper() == 'N': return 'N' if query not in QUALITIES: return 'X' return '{}:{}'.format(PITCHES[root], QUALITIES[query])
python
def simplify(self, chord): '''Simplify a chord string down to the vocabulary space''' # Drop inversions chord = re.sub(r'/.*$', r'', chord) # Drop any additional or suppressed tones chord = re.sub(r'\(.*?\)', r'', chord) # Drop dangling : indicators chord = re.sub(r':$', r'', chord) # Encode the chord root, pitches, _ = mir_eval.chord.encode(chord) # Build the query # To map the binary vector pitches down to bit masked integer, # we just dot against powers of 2 P = 2**np.arange(12, dtype=int) query = self.mask_ & pitches[::-1].dot(P) if root < 0 and chord[0].upper() == 'N': return 'N' if query not in QUALITIES: return 'X' return '{}:{}'.format(PITCHES[root], QUALITIES[query])
[ "def", "simplify", "(", "self", ",", "chord", ")", ":", "# Drop inversions", "chord", "=", "re", ".", "sub", "(", "r'/.*$'", ",", "r''", ",", "chord", ")", "# Drop any additional or suppressed tones", "chord", "=", "re", ".", "sub", "(", "r'\\(.*?\\)'", ",", "r''", ",", "chord", ")", "# Drop dangling : indicators", "chord", "=", "re", ".", "sub", "(", "r':$'", ",", "r''", ",", "chord", ")", "# Encode the chord", "root", ",", "pitches", ",", "_", "=", "mir_eval", ".", "chord", ".", "encode", "(", "chord", ")", "# Build the query", "# To map the binary vector pitches down to bit masked integer,", "# we just dot against powers of 2", "P", "=", "2", "**", "np", ".", "arange", "(", "12", ",", "dtype", "=", "int", ")", "query", "=", "self", ".", "mask_", "&", "pitches", "[", ":", ":", "-", "1", "]", ".", "dot", "(", "P", ")", "if", "root", "<", "0", "and", "chord", "[", "0", "]", ".", "upper", "(", ")", "==", "'N'", ":", "return", "'N'", "if", "query", "not", "in", "QUALITIES", ":", "return", "'X'", "return", "'{}:{}'", ".", "format", "(", "PITCHES", "[", "root", "]", ",", "QUALITIES", "[", "query", "]", ")" ]
Simplify a chord string down to the vocabulary space
[ "Simplify", "a", "chord", "string", "down", "to", "the", "vocabulary", "space" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/chord.py#L475-L498
bmcfee/pumpp
pumpp/task/chord.py
ChordTagTransformer.transform_annotation
def transform_annotation(self, ann, duration): '''Transform an annotation to chord-tag encoding Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['chord'] : np.ndarray, shape=(n, n_labels) A time-varying binary encoding of the chords ''' intervals, values = ann.to_interval_values() chords = [] for v in values: chords.extend(self.encoder.transform([self.simplify(v)])) dtype = self.fields[self.scope('chord')].dtype chords = np.asarray(chords) if self.sparse: chords = chords[:, np.newaxis] target = self.encode_intervals(duration, intervals, chords, multi=False, dtype=dtype) return {'chord': target}
python
def transform_annotation(self, ann, duration): '''Transform an annotation to chord-tag encoding Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['chord'] : np.ndarray, shape=(n, n_labels) A time-varying binary encoding of the chords ''' intervals, values = ann.to_interval_values() chords = [] for v in values: chords.extend(self.encoder.transform([self.simplify(v)])) dtype = self.fields[self.scope('chord')].dtype chords = np.asarray(chords) if self.sparse: chords = chords[:, np.newaxis] target = self.encode_intervals(duration, intervals, chords, multi=False, dtype=dtype) return {'chord': target}
[ "def", "transform_annotation", "(", "self", ",", "ann", ",", "duration", ")", ":", "intervals", ",", "values", "=", "ann", ".", "to_interval_values", "(", ")", "chords", "=", "[", "]", "for", "v", "in", "values", ":", "chords", ".", "extend", "(", "self", ".", "encoder", ".", "transform", "(", "[", "self", ".", "simplify", "(", "v", ")", "]", ")", ")", "dtype", "=", "self", ".", "fields", "[", "self", ".", "scope", "(", "'chord'", ")", "]", ".", "dtype", "chords", "=", "np", ".", "asarray", "(", "chords", ")", "if", "self", ".", "sparse", ":", "chords", "=", "chords", "[", ":", ",", "np", ".", "newaxis", "]", "target", "=", "self", ".", "encode_intervals", "(", "duration", ",", "intervals", ",", "chords", ",", "multi", "=", "False", ",", "dtype", "=", "dtype", ")", "return", "{", "'chord'", ":", "target", "}" ]
Transform an annotation to chord-tag encoding Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['chord'] : np.ndarray, shape=(n, n_labels) A time-varying binary encoding of the chords
[ "Transform", "an", "annotation", "to", "chord", "-", "tag", "encoding" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/chord.py#L500-L534
bmcfee/pumpp
pumpp/feature/cqt.py
CQT.transform_audio
def transform_audio(self, y): '''Compute the CQT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape = (n_frames, n_bins) The CQT magnitude data['phase']: np.ndarray, shape = mag.shape The CQT phase ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) C = cqt(y=y, sr=self.sr, hop_length=self.hop_length, fmin=self.fmin, n_bins=(self.n_octaves * self.over_sample * 12), bins_per_octave=(self.over_sample * 12)) C = fix_length(C, n_frames) cqtm, phase = magphase(C) if self.log: cqtm = amplitude_to_db(cqtm, ref=np.max) return {'mag': cqtm.T.astype(np.float32)[self.idx], 'phase': np.angle(phase).T.astype(np.float32)[self.idx]}
python
def transform_audio(self, y): '''Compute the CQT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape = (n_frames, n_bins) The CQT magnitude data['phase']: np.ndarray, shape = mag.shape The CQT phase ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) C = cqt(y=y, sr=self.sr, hop_length=self.hop_length, fmin=self.fmin, n_bins=(self.n_octaves * self.over_sample * 12), bins_per_octave=(self.over_sample * 12)) C = fix_length(C, n_frames) cqtm, phase = magphase(C) if self.log: cqtm = amplitude_to_db(cqtm, ref=np.max) return {'mag': cqtm.T.astype(np.float32)[self.idx], 'phase': np.angle(phase).T.astype(np.float32)[self.idx]}
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "n_frames", "=", "self", ".", "n_frames", "(", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ")", ")", "C", "=", "cqt", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ",", "fmin", "=", "self", ".", "fmin", ",", "n_bins", "=", "(", "self", ".", "n_octaves", "*", "self", ".", "over_sample", "*", "12", ")", ",", "bins_per_octave", "=", "(", "self", ".", "over_sample", "*", "12", ")", ")", "C", "=", "fix_length", "(", "C", ",", "n_frames", ")", "cqtm", ",", "phase", "=", "magphase", "(", "C", ")", "if", "self", ".", "log", ":", "cqtm", "=", "amplitude_to_db", "(", "cqtm", ",", "ref", "=", "np", ".", "max", ")", "return", "{", "'mag'", ":", "cqtm", ".", "T", ".", "astype", "(", "np", ".", "float32", ")", "[", "self", ".", "idx", "]", ",", "'phase'", ":", "np", ".", "angle", "(", "phase", ")", ".", "T", ".", "astype", "(", "np", ".", "float32", ")", "[", "self", ".", "idx", "]", "}" ]
Compute the CQT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape = (n_frames, n_bins) The CQT magnitude data['phase']: np.ndarray, shape = mag.shape The CQT phase
[ "Compute", "the", "CQT" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/cqt.py#L61-L92
bmcfee/pumpp
pumpp/feature/cqt.py
CQTMag.transform_audio
def transform_audio(self, y): '''Compute CQT magnitude. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) The CQT magnitude ''' data = super(CQTMag, self).transform_audio(y) data.pop('phase') return data
python
def transform_audio(self, y): '''Compute CQT magnitude. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) The CQT magnitude ''' data = super(CQTMag, self).transform_audio(y) data.pop('phase') return data
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "data", "=", "super", "(", "CQTMag", ",", "self", ")", ".", "transform_audio", "(", "y", ")", "data", ".", "pop", "(", "'phase'", ")", "return", "data" ]
Compute CQT magnitude. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) The CQT magnitude
[ "Compute", "CQT", "magnitude", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/cqt.py#L107-L123
bmcfee/pumpp
pumpp/feature/cqt.py
CQTPhaseDiff.transform_audio
def transform_audio(self, y): '''Compute the CQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential ''' data = super(CQTPhaseDiff, self).transform_audio(y) data['dphase'] = self.phase_diff(data.pop('phase')) return data
python
def transform_audio(self, y): '''Compute the CQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential ''' data = super(CQTPhaseDiff, self).transform_audio(y) data['dphase'] = self.phase_diff(data.pop('phase')) return data
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "data", "=", "super", "(", "CQTPhaseDiff", ",", "self", ")", ".", "transform_audio", "(", "y", ")", "data", "[", "'dphase'", "]", "=", "self", ".", "phase_diff", "(", "data", ".", "pop", "(", "'phase'", ")", ")", "return", "data" ]
Compute the CQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential
[ "Compute", "the", "CQT", "with", "unwrapped", "phase" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/cqt.py#L141-L160
bmcfee/pumpp
pumpp/feature/cqt.py
HCQT.transform_audio
def transform_audio(self, y): '''Compute the HCQT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape = (n_frames, n_bins, n_harmonics) The CQT magnitude data['phase']: np.ndarray, shape = mag.shape The CQT phase ''' cqtm, phase = [], [] n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) for h in self.harmonics: C = cqt(y=y, sr=self.sr, hop_length=self.hop_length, fmin=self.fmin * h, n_bins=(self.n_octaves * self.over_sample * 12), bins_per_octave=(self.over_sample * 12)) C = fix_length(C, n_frames) C, P = magphase(C) if self.log: C = amplitude_to_db(C, ref=np.max) cqtm.append(C) phase.append(P) cqtm = np.asarray(cqtm).astype(np.float32) phase = np.angle(np.asarray(phase)).astype(np.float32) return {'mag': self._index(cqtm), 'phase': self._index(phase)}
python
def transform_audio(self, y): '''Compute the HCQT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape = (n_frames, n_bins, n_harmonics) The CQT magnitude data['phase']: np.ndarray, shape = mag.shape The CQT phase ''' cqtm, phase = [], [] n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) for h in self.harmonics: C = cqt(y=y, sr=self.sr, hop_length=self.hop_length, fmin=self.fmin * h, n_bins=(self.n_octaves * self.over_sample * 12), bins_per_octave=(self.over_sample * 12)) C = fix_length(C, n_frames) C, P = magphase(C) if self.log: C = amplitude_to_db(C, ref=np.max) cqtm.append(C) phase.append(P) cqtm = np.asarray(cqtm).astype(np.float32) phase = np.angle(np.asarray(phase)).astype(np.float32) return {'mag': self._index(cqtm), 'phase': self._index(phase)}
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "cqtm", ",", "phase", "=", "[", "]", ",", "[", "]", "n_frames", "=", "self", ".", "n_frames", "(", "get_duration", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ")", ")", "for", "h", "in", "self", ".", "harmonics", ":", "C", "=", "cqt", "(", "y", "=", "y", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ",", "fmin", "=", "self", ".", "fmin", "*", "h", ",", "n_bins", "=", "(", "self", ".", "n_octaves", "*", "self", ".", "over_sample", "*", "12", ")", ",", "bins_per_octave", "=", "(", "self", ".", "over_sample", "*", "12", ")", ")", "C", "=", "fix_length", "(", "C", ",", "n_frames", ")", "C", ",", "P", "=", "magphase", "(", "C", ")", "if", "self", ".", "log", ":", "C", "=", "amplitude_to_db", "(", "C", ",", "ref", "=", "np", ".", "max", ")", "cqtm", ".", "append", "(", "C", ")", "phase", ".", "append", "(", "P", ")", "cqtm", "=", "np", ".", "asarray", "(", "cqtm", ")", ".", "astype", "(", "np", ".", "float32", ")", "phase", "=", "np", ".", "angle", "(", "np", ".", "asarray", "(", "phase", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "return", "{", "'mag'", ":", "self", ".", "_index", "(", "cqtm", ")", ",", "'phase'", ":", "self", ".", "_index", "(", "phase", ")", "}" ]
Compute the HCQT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape = (n_frames, n_bins, n_harmonics) The CQT magnitude data['phase']: np.ndarray, shape = mag.shape The CQT phase
[ "Compute", "the", "HCQT" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/cqt.py#L231-L270
bmcfee/pumpp
pumpp/feature/cqt.py
HCQT._index
def _index(self, value): '''Rearrange a tensor according to the convolution mode Input is assumed to be in (channels, bins, time) format. ''' if self.conv in ('channels_last', 'tf'): return np.transpose(value, (2, 1, 0)) else: # self.conv in ('channels_first', 'th') return np.transpose(value, (0, 2, 1))
python
def _index(self, value): '''Rearrange a tensor according to the convolution mode Input is assumed to be in (channels, bins, time) format. ''' if self.conv in ('channels_last', 'tf'): return np.transpose(value, (2, 1, 0)) else: # self.conv in ('channels_first', 'th') return np.transpose(value, (0, 2, 1))
[ "def", "_index", "(", "self", ",", "value", ")", ":", "if", "self", ".", "conv", "in", "(", "'channels_last'", ",", "'tf'", ")", ":", "return", "np", ".", "transpose", "(", "value", ",", "(", "2", ",", "1", ",", "0", ")", ")", "else", ":", "# self.conv in ('channels_first', 'th')", "return", "np", ".", "transpose", "(", "value", ",", "(", "0", ",", "2", ",", "1", ")", ")" ]
Rearrange a tensor according to the convolution mode Input is assumed to be in (channels, bins, time) format.
[ "Rearrange", "a", "tensor", "according", "to", "the", "convolution", "mode" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/cqt.py#L272-L282
bmcfee/pumpp
pumpp/feature/cqt.py
HCQTMag.transform_audio
def transform_audio(self, y): '''Compute HCQT magnitude. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) The CQT magnitude ''' data = super(HCQTMag, self).transform_audio(y) data.pop('phase') return data
python
def transform_audio(self, y): '''Compute HCQT magnitude. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) The CQT magnitude ''' data = super(HCQTMag, self).transform_audio(y) data.pop('phase') return data
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "data", "=", "super", "(", "HCQTMag", ",", "self", ")", ".", "transform_audio", "(", "y", ")", "data", ".", "pop", "(", "'phase'", ")", "return", "data" ]
Compute HCQT magnitude. Parameters ---------- y : np.ndarray the audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) The CQT magnitude
[ "Compute", "HCQT", "magnitude", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/cqt.py#L297-L313
bmcfee/pumpp
pumpp/feature/cqt.py
HCQTPhaseDiff.transform_audio
def transform_audio(self, y): '''Compute the HCQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential ''' data = super(HCQTPhaseDiff, self).transform_audio(y) data['dphase'] = self.phase_diff(data.pop('phase')) return data
python
def transform_audio(self, y): '''Compute the HCQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential ''' data = super(HCQTPhaseDiff, self).transform_audio(y) data['dphase'] = self.phase_diff(data.pop('phase')) return data
[ "def", "transform_audio", "(", "self", ",", "y", ")", ":", "data", "=", "super", "(", "HCQTPhaseDiff", ",", "self", ")", ".", "transform_audio", "(", "y", ")", "data", "[", "'dphase'", "]", "=", "self", ".", "phase_diff", "(", "data", ".", "pop", "(", "'phase'", ")", ")", "return", "data" ]
Compute the HCQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential
[ "Compute", "the", "HCQT", "with", "unwrapped", "phase" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/cqt.py#L332-L351
bmcfee/pumpp
pumpp/task/base.py
fill_value
def fill_value(dtype): '''Get a fill-value for a given dtype Parameters ---------- dtype : type Returns ------- `np.nan` if `dtype` is real or complex 0 otherwise ''' if np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.complexfloating): return dtype(np.nan) return dtype(0)
python
def fill_value(dtype): '''Get a fill-value for a given dtype Parameters ---------- dtype : type Returns ------- `np.nan` if `dtype` is real or complex 0 otherwise ''' if np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.complexfloating): return dtype(np.nan) return dtype(0)
[ "def", "fill_value", "(", "dtype", ")", ":", "if", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "floating", ")", "or", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "complexfloating", ")", ":", "return", "dtype", "(", "np", ".", "nan", ")", "return", "dtype", "(", "0", ")" ]
Get a fill-value for a given dtype Parameters ---------- dtype : type Returns ------- `np.nan` if `dtype` is real or complex 0 otherwise
[ "Get", "a", "fill", "-", "value", "for", "a", "given", "dtype" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/base.py#L15-L31
bmcfee/pumpp
pumpp/task/base.py
BaseTaskTransformer.empty
def empty(self, duration): '''Create an empty jams.Annotation for this task. This method should be overridden by derived classes. Parameters ---------- duration : int >= 0 Duration of the annotation ''' return jams.Annotation(namespace=self.namespace, time=0, duration=0)
python
def empty(self, duration): '''Create an empty jams.Annotation for this task. This method should be overridden by derived classes. Parameters ---------- duration : int >= 0 Duration of the annotation ''' return jams.Annotation(namespace=self.namespace, time=0, duration=0)
[ "def", "empty", "(", "self", ",", "duration", ")", ":", "return", "jams", ".", "Annotation", "(", "namespace", "=", "self", ".", "namespace", ",", "time", "=", "0", ",", "duration", "=", "0", ")" ]
Create an empty jams.Annotation for this task. This method should be overridden by derived classes. Parameters ---------- duration : int >= 0 Duration of the annotation
[ "Create", "an", "empty", "jams", ".", "Annotation", "for", "this", "task", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/base.py#L62-L72
bmcfee/pumpp
pumpp/task/base.py
BaseTaskTransformer.transform
def transform(self, jam, query=None): '''Transform jam object to make data for this task Parameters ---------- jam : jams.JAMS The jams container object query : string, dict, or callable [optional] An optional query to narrow the elements of `jam.annotations` to be considered. If not provided, all annotations are considered. Returns ------- data : dict A dictionary of transformed annotations. All annotations which can be converted to the target namespace will be converted. ''' anns = [] if query: results = jam.search(**query) else: results = jam.annotations # Find annotations that can be coerced to our target namespace for ann in results: try: anns.append(jams.nsconvert.convert(ann, self.namespace)) except jams.NamespaceError: pass duration = jam.file_metadata.duration # If none, make a fake one if not anns: anns = [self.empty(duration)] # Apply transformations results = [] for ann in anns: results.append(self.transform_annotation(ann, duration)) # If the annotation range is None, it spans the entire track if ann.time is None or ann.duration is None: valid = [0, duration] else: valid = [ann.time, ann.time + ann.duration] results[-1]['_valid'] = time_to_frames(valid, sr=self.sr, hop_length=self.hop_length) # Prefix and collect return self.merge(results)
python
def transform(self, jam, query=None): '''Transform jam object to make data for this task Parameters ---------- jam : jams.JAMS The jams container object query : string, dict, or callable [optional] An optional query to narrow the elements of `jam.annotations` to be considered. If not provided, all annotations are considered. Returns ------- data : dict A dictionary of transformed annotations. All annotations which can be converted to the target namespace will be converted. ''' anns = [] if query: results = jam.search(**query) else: results = jam.annotations # Find annotations that can be coerced to our target namespace for ann in results: try: anns.append(jams.nsconvert.convert(ann, self.namespace)) except jams.NamespaceError: pass duration = jam.file_metadata.duration # If none, make a fake one if not anns: anns = [self.empty(duration)] # Apply transformations results = [] for ann in anns: results.append(self.transform_annotation(ann, duration)) # If the annotation range is None, it spans the entire track if ann.time is None or ann.duration is None: valid = [0, duration] else: valid = [ann.time, ann.time + ann.duration] results[-1]['_valid'] = time_to_frames(valid, sr=self.sr, hop_length=self.hop_length) # Prefix and collect return self.merge(results)
[ "def", "transform", "(", "self", ",", "jam", ",", "query", "=", "None", ")", ":", "anns", "=", "[", "]", "if", "query", ":", "results", "=", "jam", ".", "search", "(", "*", "*", "query", ")", "else", ":", "results", "=", "jam", ".", "annotations", "# Find annotations that can be coerced to our target namespace", "for", "ann", "in", "results", ":", "try", ":", "anns", ".", "append", "(", "jams", ".", "nsconvert", ".", "convert", "(", "ann", ",", "self", ".", "namespace", ")", ")", "except", "jams", ".", "NamespaceError", ":", "pass", "duration", "=", "jam", ".", "file_metadata", ".", "duration", "# If none, make a fake one", "if", "not", "anns", ":", "anns", "=", "[", "self", ".", "empty", "(", "duration", ")", "]", "# Apply transformations", "results", "=", "[", "]", "for", "ann", "in", "anns", ":", "results", ".", "append", "(", "self", ".", "transform_annotation", "(", "ann", ",", "duration", ")", ")", "# If the annotation range is None, it spans the entire track", "if", "ann", ".", "time", "is", "None", "or", "ann", ".", "duration", "is", "None", ":", "valid", "=", "[", "0", ",", "duration", "]", "else", ":", "valid", "=", "[", "ann", ".", "time", ",", "ann", ".", "time", "+", "ann", ".", "duration", "]", "results", "[", "-", "1", "]", "[", "'_valid'", "]", "=", "time_to_frames", "(", "valid", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "# Prefix and collect", "return", "self", ".", "merge", "(", "results", ")" ]
Transform jam object to make data for this task Parameters ---------- jam : jams.JAMS The jams container object query : string, dict, or callable [optional] An optional query to narrow the elements of `jam.annotations` to be considered. If not provided, all annotations are considered. Returns ------- data : dict A dictionary of transformed annotations. All annotations which can be converted to the target namespace will be converted.
[ "Transform", "jam", "object", "to", "make", "data", "for", "this", "task" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/base.py#L74-L129
bmcfee/pumpp
pumpp/task/base.py
BaseTaskTransformer.encode_events
def encode_events(self, duration, events, values, dtype=np.bool): '''Encode labeled events as a time-series matrix. Parameters ---------- duration : number The duration of the track events : ndarray, shape=(n,) Time index of the events values : ndarray, shape=(n, m) Values array. Must have the same first index as `events`. dtype : numpy data type Returns ------- target : ndarray, shape=(n_frames, n_values) ''' frames = time_to_frames(events, sr=self.sr, hop_length=self.hop_length) n_total = int(time_to_frames(duration, sr=self.sr, hop_length=self.hop_length)) n_alloc = n_total if np.any(frames): n_alloc = max(n_total, 1 + int(frames.max())) target = np.empty((n_alloc, values.shape[1]), dtype=dtype) target.fill(fill_value(dtype)) values = values.astype(dtype) for column, event in zip(values, frames): target[event] += column return target[:n_total]
python
def encode_events(self, duration, events, values, dtype=np.bool): '''Encode labeled events as a time-series matrix. Parameters ---------- duration : number The duration of the track events : ndarray, shape=(n,) Time index of the events values : ndarray, shape=(n, m) Values array. Must have the same first index as `events`. dtype : numpy data type Returns ------- target : ndarray, shape=(n_frames, n_values) ''' frames = time_to_frames(events, sr=self.sr, hop_length=self.hop_length) n_total = int(time_to_frames(duration, sr=self.sr, hop_length=self.hop_length)) n_alloc = n_total if np.any(frames): n_alloc = max(n_total, 1 + int(frames.max())) target = np.empty((n_alloc, values.shape[1]), dtype=dtype) target.fill(fill_value(dtype)) values = values.astype(dtype) for column, event in zip(values, frames): target[event] += column return target[:n_total]
[ "def", "encode_events", "(", "self", ",", "duration", ",", "events", ",", "values", ",", "dtype", "=", "np", ".", "bool", ")", ":", "frames", "=", "time_to_frames", "(", "events", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "n_total", "=", "int", "(", "time_to_frames", "(", "duration", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", ")", "n_alloc", "=", "n_total", "if", "np", ".", "any", "(", "frames", ")", ":", "n_alloc", "=", "max", "(", "n_total", ",", "1", "+", "int", "(", "frames", ".", "max", "(", ")", ")", ")", "target", "=", "np", ".", "empty", "(", "(", "n_alloc", ",", "values", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "dtype", ")", "target", ".", "fill", "(", "fill_value", "(", "dtype", ")", ")", "values", "=", "values", ".", "astype", "(", "dtype", ")", "for", "column", ",", "event", "in", "zip", "(", "values", ",", "frames", ")", ":", "target", "[", "event", "]", "+=", "column", "return", "target", "[", ":", "n_total", "]" ]
Encode labeled events as a time-series matrix. Parameters ---------- duration : number The duration of the track events : ndarray, shape=(n,) Time index of the events values : ndarray, shape=(n, m) Values array. Must have the same first index as `events`. dtype : numpy data type Returns ------- target : ndarray, shape=(n_frames, n_values)
[ "Encode", "labeled", "events", "as", "a", "time", "-", "series", "matrix", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/base.py#L131-L170
bmcfee/pumpp
pumpp/task/base.py
BaseTaskTransformer.encode_intervals
def encode_intervals(self, duration, intervals, values, dtype=np.bool, multi=True, fill=None): '''Encode labeled intervals as a time-series matrix. Parameters ---------- duration : number The duration (in frames) of the track intervals : np.ndarray, shape=(n, 2) The list of intervals values : np.ndarray, shape=(n, m) The (encoded) values corresponding to each interval dtype : np.dtype The desired output type multi : bool If `True`, allow multiple labels per interval. fill : dtype (optional) Optional default fill value for missing data. If not provided, the default is inferred from `dtype`. Returns ------- target : np.ndarray, shape=(duration * sr / hop_length, m) The labeled interval encoding, sampled at the desired frame rate ''' if fill is None: fill = fill_value(dtype) frames = time_to_frames(intervals, sr=self.sr, hop_length=self.hop_length) n_total = int(time_to_frames(duration, sr=self.sr, hop_length=self.hop_length)) values = values.astype(dtype) n_alloc = n_total if np.any(frames): n_alloc = max(n_total, 1 + int(frames.max())) target = np.empty((n_alloc, values.shape[1]), dtype=dtype) target.fill(fill) for column, interval in zip(values, frames): if multi: target[interval[0]:interval[1]] += column else: target[interval[0]:interval[1]] = column return target[:n_total]
python
def encode_intervals(self, duration, intervals, values, dtype=np.bool, multi=True, fill=None): '''Encode labeled intervals as a time-series matrix. Parameters ---------- duration : number The duration (in frames) of the track intervals : np.ndarray, shape=(n, 2) The list of intervals values : np.ndarray, shape=(n, m) The (encoded) values corresponding to each interval dtype : np.dtype The desired output type multi : bool If `True`, allow multiple labels per interval. fill : dtype (optional) Optional default fill value for missing data. If not provided, the default is inferred from `dtype`. Returns ------- target : np.ndarray, shape=(duration * sr / hop_length, m) The labeled interval encoding, sampled at the desired frame rate ''' if fill is None: fill = fill_value(dtype) frames = time_to_frames(intervals, sr=self.sr, hop_length=self.hop_length) n_total = int(time_to_frames(duration, sr=self.sr, hop_length=self.hop_length)) values = values.astype(dtype) n_alloc = n_total if np.any(frames): n_alloc = max(n_total, 1 + int(frames.max())) target = np.empty((n_alloc, values.shape[1]), dtype=dtype) target.fill(fill) for column, interval in zip(values, frames): if multi: target[interval[0]:interval[1]] += column else: target[interval[0]:interval[1]] = column return target[:n_total]
[ "def", "encode_intervals", "(", "self", ",", "duration", ",", "intervals", ",", "values", ",", "dtype", "=", "np", ".", "bool", ",", "multi", "=", "True", ",", "fill", "=", "None", ")", ":", "if", "fill", "is", "None", ":", "fill", "=", "fill_value", "(", "dtype", ")", "frames", "=", "time_to_frames", "(", "intervals", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "n_total", "=", "int", "(", "time_to_frames", "(", "duration", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", ")", "values", "=", "values", ".", "astype", "(", "dtype", ")", "n_alloc", "=", "n_total", "if", "np", ".", "any", "(", "frames", ")", ":", "n_alloc", "=", "max", "(", "n_total", ",", "1", "+", "int", "(", "frames", ".", "max", "(", ")", ")", ")", "target", "=", "np", ".", "empty", "(", "(", "n_alloc", ",", "values", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "dtype", ")", "target", ".", "fill", "(", "fill", ")", "for", "column", ",", "interval", "in", "zip", "(", "values", ",", "frames", ")", ":", "if", "multi", ":", "target", "[", "interval", "[", "0", "]", ":", "interval", "[", "1", "]", "]", "+=", "column", "else", ":", "target", "[", "interval", "[", "0", "]", ":", "interval", "[", "1", "]", "]", "=", "column", "return", "target", "[", ":", "n_total", "]" ]
Encode labeled intervals as a time-series matrix. Parameters ---------- duration : number The duration (in frames) of the track intervals : np.ndarray, shape=(n, 2) The list of intervals values : np.ndarray, shape=(n, m) The (encoded) values corresponding to each interval dtype : np.dtype The desired output type multi : bool If `True`, allow multiple labels per interval. fill : dtype (optional) Optional default fill value for missing data. If not provided, the default is inferred from `dtype`. Returns ------- target : np.ndarray, shape=(duration * sr / hop_length, m) The labeled interval encoding, sampled at the desired frame rate
[ "Encode", "labeled", "intervals", "as", "a", "time", "-", "series", "matrix", "." ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/base.py#L172-L230
bmcfee/pumpp
pumpp/task/base.py
BaseTaskTransformer.decode_events
def decode_events(self, encoded, transition=None, p_state=None, p_init=None): '''Decode labeled events into (time, value) pairs Real-valued inputs are thresholded at 0.5. Optionally, viterbi decoding can be applied to each event class. Parameters ---------- encoded : np.ndarray, shape=(n_frames, m) Frame-level annotation encodings as produced by ``encode_events``. transition : None or np.ndarray [shape=(2, 2) or (m, 2, 2)] Optional transition matrix for each event, used for Viterbi p_state : None or np.ndarray [shape=(m,)] Optional marginal probability for each event p_init : None or np.ndarray [shape=(m,)] Optional marginal probability for each event Returns ------- [(time, value)] : iterable of tuples where `time` is the event time and `value` is an np.ndarray, shape=(m,) of the encoded value at that time See Also -------- librosa.sequence.viterbi_binary ''' if np.isrealobj(encoded): if transition is None: encoded = (encoded >= 0.5) else: encoded = viterbi_binary(encoded.T, transition, p_state=p_state, p_init=p_init).T times = times_like(encoded, sr=self.sr, hop_length=self.hop_length, axis=0) return zip(times, encoded)
python
def decode_events(self, encoded, transition=None, p_state=None, p_init=None): '''Decode labeled events into (time, value) pairs Real-valued inputs are thresholded at 0.5. Optionally, viterbi decoding can be applied to each event class. Parameters ---------- encoded : np.ndarray, shape=(n_frames, m) Frame-level annotation encodings as produced by ``encode_events``. transition : None or np.ndarray [shape=(2, 2) or (m, 2, 2)] Optional transition matrix for each event, used for Viterbi p_state : None or np.ndarray [shape=(m,)] Optional marginal probability for each event p_init : None or np.ndarray [shape=(m,)] Optional marginal probability for each event Returns ------- [(time, value)] : iterable of tuples where `time` is the event time and `value` is an np.ndarray, shape=(m,) of the encoded value at that time See Also -------- librosa.sequence.viterbi_binary ''' if np.isrealobj(encoded): if transition is None: encoded = (encoded >= 0.5) else: encoded = viterbi_binary(encoded.T, transition, p_state=p_state, p_init=p_init).T times = times_like(encoded, sr=self.sr, hop_length=self.hop_length, axis=0) return zip(times, encoded)
[ "def", "decode_events", "(", "self", ",", "encoded", ",", "transition", "=", "None", ",", "p_state", "=", "None", ",", "p_init", "=", "None", ")", ":", "if", "np", ".", "isrealobj", "(", "encoded", ")", ":", "if", "transition", "is", "None", ":", "encoded", "=", "(", "encoded", ">=", "0.5", ")", "else", ":", "encoded", "=", "viterbi_binary", "(", "encoded", ".", "T", ",", "transition", ",", "p_state", "=", "p_state", ",", "p_init", "=", "p_init", ")", ".", "T", "times", "=", "times_like", "(", "encoded", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ",", "axis", "=", "0", ")", "return", "zip", "(", "times", ",", "encoded", ")" ]
Decode labeled events into (time, value) pairs Real-valued inputs are thresholded at 0.5. Optionally, viterbi decoding can be applied to each event class. Parameters ---------- encoded : np.ndarray, shape=(n_frames, m) Frame-level annotation encodings as produced by ``encode_events``. transition : None or np.ndarray [shape=(2, 2) or (m, 2, 2)] Optional transition matrix for each event, used for Viterbi p_state : None or np.ndarray [shape=(m,)] Optional marginal probability for each event p_init : None or np.ndarray [shape=(m,)] Optional marginal probability for each event Returns ------- [(time, value)] : iterable of tuples where `time` is the event time and `value` is an np.ndarray, shape=(m,) of the encoded value at that time See Also -------- librosa.sequence.viterbi_binary
[ "Decode", "labeled", "events", "into", "(", "time", "value", ")", "pairs" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/base.py#L232-L276
bmcfee/pumpp
pumpp/task/base.py
BaseTaskTransformer.decode_intervals
def decode_intervals(self, encoded, duration=None, multi=True, sparse=False, transition=None, p_state=None, p_init=None): '''Decode labeled intervals into (start, end, value) triples Parameters ---------- encoded : np.ndarray, shape=(n_frames, m) Frame-level annotation encodings as produced by ``encode_intervals`` duration : None or float > 0 The max duration of the annotation (in seconds) Must be greater than the length of encoded array. multi : bool If true, allow multiple labels per input frame. If false, take the most likely label per input frame. sparse : bool If true, values are returned as indices, not one-hot. If false, values are returned as one-hot encodings. Only applies when `multi=False`. transition : None or np.ndarray [shape=(m, m) or (2, 2) or (m, 2, 2)] Optional transition matrix for each interval, used for Viterbi decoding. If `multi=True`, then transition should be `(2, 2)` or `(m, 2, 2)`-shaped. If `multi=False`, then transition should be `(m, m)`-shaped. p_state : None or np.ndarray [shape=(m,)] Optional marginal probability for each label. p_init : None or np.ndarray [shape=(m,)] Optional marginal probability for each label. Returns ------- [(start, end, value)] : iterable of tuples where `start` and `end` are the interval boundaries (in seconds) and `value` is an np.ndarray, shape=(m,) of the encoded value for this interval. ''' if np.isrealobj(encoded): if multi: if transition is None: encoded = encoded >= 0.5 else: encoded = viterbi_binary(encoded.T, transition, p_init=p_init, p_state=p_state).T elif sparse and encoded.shape[1] > 1: # map to argmax if it's densely encoded (logits) if transition is None: encoded = np.argmax(encoded, axis=1)[:, np.newaxis] else: encoded = viterbi_discriminative(encoded.T, transition, p_init=p_init, p_state=p_state)[:, np.newaxis] elif not sparse: # if dense and multi, map to one-hot encoding if transition is None: encoded = (encoded == np.max(encoded, axis=1, keepdims=True)) else: encoded_ = viterbi_discriminative(encoded.T, transition, p_init=p_init, p_state=p_state) # Map to one-hot encoding encoded = np.zeros(encoded.shape, dtype=bool) encoded[np.arange(len(encoded_)), encoded_] = True if duration is None: # 1+ is fair here, because encode_intervals already pads duration = 1 + encoded.shape[0] else: duration = 1 + time_to_frames(duration, sr=self.sr, hop_length=self.hop_length) # [0, duration] inclusive times = times_like(duration + 1, sr=self.sr, hop_length=self.hop_length) # Find the change-points of the rows if sparse: idx = np.where(encoded[1:] != encoded[:-1])[0] else: idx = np.where(np.max(encoded[1:] != encoded[:-1], axis=-1))[0] idx = np.unique(np.append(idx, encoded.shape[0])) delta = np.diff(np.append(-1, idx)) # Starting positions can be integrated from changes position = np.cumsum(np.append(0, delta)) return [(times[p], times[p + d], encoded[p]) for (p, d) in zip(position, delta)]
python
def decode_intervals(self, encoded, duration=None, multi=True, sparse=False, transition=None, p_state=None, p_init=None): '''Decode labeled intervals into (start, end, value) triples Parameters ---------- encoded : np.ndarray, shape=(n_frames, m) Frame-level annotation encodings as produced by ``encode_intervals`` duration : None or float > 0 The max duration of the annotation (in seconds) Must be greater than the length of encoded array. multi : bool If true, allow multiple labels per input frame. If false, take the most likely label per input frame. sparse : bool If true, values are returned as indices, not one-hot. If false, values are returned as one-hot encodings. Only applies when `multi=False`. transition : None or np.ndarray [shape=(m, m) or (2, 2) or (m, 2, 2)] Optional transition matrix for each interval, used for Viterbi decoding. If `multi=True`, then transition should be `(2, 2)` or `(m, 2, 2)`-shaped. If `multi=False`, then transition should be `(m, m)`-shaped. p_state : None or np.ndarray [shape=(m,)] Optional marginal probability for each label. p_init : None or np.ndarray [shape=(m,)] Optional marginal probability for each label. Returns ------- [(start, end, value)] : iterable of tuples where `start` and `end` are the interval boundaries (in seconds) and `value` is an np.ndarray, shape=(m,) of the encoded value for this interval. ''' if np.isrealobj(encoded): if multi: if transition is None: encoded = encoded >= 0.5 else: encoded = viterbi_binary(encoded.T, transition, p_init=p_init, p_state=p_state).T elif sparse and encoded.shape[1] > 1: # map to argmax if it's densely encoded (logits) if transition is None: encoded = np.argmax(encoded, axis=1)[:, np.newaxis] else: encoded = viterbi_discriminative(encoded.T, transition, p_init=p_init, p_state=p_state)[:, np.newaxis] elif not sparse: # if dense and multi, map to one-hot encoding if transition is None: encoded = (encoded == np.max(encoded, axis=1, keepdims=True)) else: encoded_ = viterbi_discriminative(encoded.T, transition, p_init=p_init, p_state=p_state) # Map to one-hot encoding encoded = np.zeros(encoded.shape, dtype=bool) encoded[np.arange(len(encoded_)), encoded_] = True if duration is None: # 1+ is fair here, because encode_intervals already pads duration = 1 + encoded.shape[0] else: duration = 1 + time_to_frames(duration, sr=self.sr, hop_length=self.hop_length) # [0, duration] inclusive times = times_like(duration + 1, sr=self.sr, hop_length=self.hop_length) # Find the change-points of the rows if sparse: idx = np.where(encoded[1:] != encoded[:-1])[0] else: idx = np.where(np.max(encoded[1:] != encoded[:-1], axis=-1))[0] idx = np.unique(np.append(idx, encoded.shape[0])) delta = np.diff(np.append(-1, idx)) # Starting positions can be integrated from changes position = np.cumsum(np.append(0, delta)) return [(times[p], times[p + d], encoded[p]) for (p, d) in zip(position, delta)]
[ "def", "decode_intervals", "(", "self", ",", "encoded", ",", "duration", "=", "None", ",", "multi", "=", "True", ",", "sparse", "=", "False", ",", "transition", "=", "None", ",", "p_state", "=", "None", ",", "p_init", "=", "None", ")", ":", "if", "np", ".", "isrealobj", "(", "encoded", ")", ":", "if", "multi", ":", "if", "transition", "is", "None", ":", "encoded", "=", "encoded", ">=", "0.5", "else", ":", "encoded", "=", "viterbi_binary", "(", "encoded", ".", "T", ",", "transition", ",", "p_init", "=", "p_init", ",", "p_state", "=", "p_state", ")", ".", "T", "elif", "sparse", "and", "encoded", ".", "shape", "[", "1", "]", ">", "1", ":", "# map to argmax if it's densely encoded (logits)", "if", "transition", "is", "None", ":", "encoded", "=", "np", ".", "argmax", "(", "encoded", ",", "axis", "=", "1", ")", "[", ":", ",", "np", ".", "newaxis", "]", "else", ":", "encoded", "=", "viterbi_discriminative", "(", "encoded", ".", "T", ",", "transition", ",", "p_init", "=", "p_init", ",", "p_state", "=", "p_state", ")", "[", ":", ",", "np", ".", "newaxis", "]", "elif", "not", "sparse", ":", "# if dense and multi, map to one-hot encoding", "if", "transition", "is", "None", ":", "encoded", "=", "(", "encoded", "==", "np", ".", "max", "(", "encoded", ",", "axis", "=", "1", ",", "keepdims", "=", "True", ")", ")", "else", ":", "encoded_", "=", "viterbi_discriminative", "(", "encoded", ".", "T", ",", "transition", ",", "p_init", "=", "p_init", ",", "p_state", "=", "p_state", ")", "# Map to one-hot encoding", "encoded", "=", "np", ".", "zeros", "(", "encoded", ".", "shape", ",", "dtype", "=", "bool", ")", "encoded", "[", "np", ".", "arange", "(", "len", "(", "encoded_", ")", ")", ",", "encoded_", "]", "=", "True", "if", "duration", "is", "None", ":", "# 1+ is fair here, because encode_intervals already pads", "duration", "=", "1", "+", "encoded", ".", "shape", "[", "0", "]", "else", ":", "duration", "=", "1", "+", "time_to_frames", "(", "duration", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "# [0, duration] inclusive", "times", "=", "times_like", "(", "duration", "+", "1", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "# Find the change-points of the rows", "if", "sparse", ":", "idx", "=", "np", ".", "where", "(", "encoded", "[", "1", ":", "]", "!=", "encoded", "[", ":", "-", "1", "]", ")", "[", "0", "]", "else", ":", "idx", "=", "np", ".", "where", "(", "np", ".", "max", "(", "encoded", "[", "1", ":", "]", "!=", "encoded", "[", ":", "-", "1", "]", ",", "axis", "=", "-", "1", ")", ")", "[", "0", "]", "idx", "=", "np", ".", "unique", "(", "np", ".", "append", "(", "idx", ",", "encoded", ".", "shape", "[", "0", "]", ")", ")", "delta", "=", "np", ".", "diff", "(", "np", ".", "append", "(", "-", "1", ",", "idx", ")", ")", "# Starting positions can be integrated from changes", "position", "=", "np", ".", "cumsum", "(", "np", ".", "append", "(", "0", ",", "delta", ")", ")", "return", "[", "(", "times", "[", "p", "]", ",", "times", "[", "p", "+", "d", "]", ",", "encoded", "[", "p", "]", ")", "for", "(", "p", ",", "d", ")", "in", "zip", "(", "position", ",", "delta", ")", "]" ]
Decode labeled intervals into (start, end, value) triples Parameters ---------- encoded : np.ndarray, shape=(n_frames, m) Frame-level annotation encodings as produced by ``encode_intervals`` duration : None or float > 0 The max duration of the annotation (in seconds) Must be greater than the length of encoded array. multi : bool If true, allow multiple labels per input frame. If false, take the most likely label per input frame. sparse : bool If true, values are returned as indices, not one-hot. If false, values are returned as one-hot encodings. Only applies when `multi=False`. transition : None or np.ndarray [shape=(m, m) or (2, 2) or (m, 2, 2)] Optional transition matrix for each interval, used for Viterbi decoding. If `multi=True`, then transition should be `(2, 2)` or `(m, 2, 2)`-shaped. If `multi=False`, then transition should be `(m, m)`-shaped. p_state : None or np.ndarray [shape=(m,)] Optional marginal probability for each label. p_init : None or np.ndarray [shape=(m,)] Optional marginal probability for each label. Returns ------- [(start, end, value)] : iterable of tuples where `start` and `end` are the interval boundaries (in seconds) and `value` is an np.ndarray, shape=(m,) of the encoded value for this interval.
[ "Decode", "labeled", "intervals", "into", "(", "start", "end", "value", ")", "triples" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/task/base.py#L278-L373
bmcfee/pumpp
pumpp/feature/base.py
FeatureExtractor.transform
def transform(self, y, sr): '''Transform an audio signal Parameters ---------- y : np.ndarray The audio signal sr : number > 0 The native sampling rate of y Returns ------- dict Data dictionary containing features extracted from y See Also -------- transform_audio ''' if sr != self.sr: y = resample(y, sr, self.sr) return self.merge([self.transform_audio(y)])
python
def transform(self, y, sr): '''Transform an audio signal Parameters ---------- y : np.ndarray The audio signal sr : number > 0 The native sampling rate of y Returns ------- dict Data dictionary containing features extracted from y See Also -------- transform_audio ''' if sr != self.sr: y = resample(y, sr, self.sr) return self.merge([self.transform_audio(y)])
[ "def", "transform", "(", "self", ",", "y", ",", "sr", ")", ":", "if", "sr", "!=", "self", ".", "sr", ":", "y", "=", "resample", "(", "y", ",", "sr", ",", "self", ".", "sr", ")", "return", "self", ".", "merge", "(", "[", "self", ".", "transform_audio", "(", "y", ")", "]", ")" ]
Transform an audio signal Parameters ---------- y : np.ndarray The audio signal sr : number > 0 The native sampling rate of y Returns ------- dict Data dictionary containing features extracted from y See Also -------- transform_audio
[ "Transform", "an", "audio", "signal" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/base.py#L71-L94
bmcfee/pumpp
pumpp/feature/base.py
FeatureExtractor.phase_diff
def phase_diff(self, phase): '''Compute the phase differential along a given axis Parameters ---------- phase : np.ndarray Input phase (in radians) Returns ------- dphase : np.ndarray like `phase` The phase differential. ''' if self.conv is None: axis = 0 elif self.conv in ('channels_last', 'tf'): axis = 0 elif self.conv in ('channels_first', 'th'): axis = 1 # Compute the phase differential dphase = np.empty(phase.shape, dtype=phase.dtype) zero_idx = [slice(None)] * phase.ndim zero_idx[axis] = slice(1) else_idx = [slice(None)] * phase.ndim else_idx[axis] = slice(1, None) zero_idx = tuple(zero_idx) else_idx = tuple(else_idx) dphase[zero_idx] = phase[zero_idx] dphase[else_idx] = np.diff(np.unwrap(phase, axis=axis), axis=axis) return dphase
python
def phase_diff(self, phase): '''Compute the phase differential along a given axis Parameters ---------- phase : np.ndarray Input phase (in radians) Returns ------- dphase : np.ndarray like `phase` The phase differential. ''' if self.conv is None: axis = 0 elif self.conv in ('channels_last', 'tf'): axis = 0 elif self.conv in ('channels_first', 'th'): axis = 1 # Compute the phase differential dphase = np.empty(phase.shape, dtype=phase.dtype) zero_idx = [slice(None)] * phase.ndim zero_idx[axis] = slice(1) else_idx = [slice(None)] * phase.ndim else_idx[axis] = slice(1, None) zero_idx = tuple(zero_idx) else_idx = tuple(else_idx) dphase[zero_idx] = phase[zero_idx] dphase[else_idx] = np.diff(np.unwrap(phase, axis=axis), axis=axis) return dphase
[ "def", "phase_diff", "(", "self", ",", "phase", ")", ":", "if", "self", ".", "conv", "is", "None", ":", "axis", "=", "0", "elif", "self", ".", "conv", "in", "(", "'channels_last'", ",", "'tf'", ")", ":", "axis", "=", "0", "elif", "self", ".", "conv", "in", "(", "'channels_first'", ",", "'th'", ")", ":", "axis", "=", "1", "# Compute the phase differential", "dphase", "=", "np", ".", "empty", "(", "phase", ".", "shape", ",", "dtype", "=", "phase", ".", "dtype", ")", "zero_idx", "=", "[", "slice", "(", "None", ")", "]", "*", "phase", ".", "ndim", "zero_idx", "[", "axis", "]", "=", "slice", "(", "1", ")", "else_idx", "=", "[", "slice", "(", "None", ")", "]", "*", "phase", ".", "ndim", "else_idx", "[", "axis", "]", "=", "slice", "(", "1", ",", "None", ")", "zero_idx", "=", "tuple", "(", "zero_idx", ")", "else_idx", "=", "tuple", "(", "else_idx", ")", "dphase", "[", "zero_idx", "]", "=", "phase", "[", "zero_idx", "]", "dphase", "[", "else_idx", "]", "=", "np", ".", "diff", "(", "np", ".", "unwrap", "(", "phase", ",", "axis", "=", "axis", ")", ",", "axis", "=", "axis", ")", "return", "dphase" ]
Compute the phase differential along a given axis Parameters ---------- phase : np.ndarray Input phase (in radians) Returns ------- dphase : np.ndarray like `phase` The phase differential.
[ "Compute", "the", "phase", "differential", "along", "a", "given", "axis" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/base.py#L99-L130
bmcfee/pumpp
pumpp/feature/base.py
FeatureExtractor.layers
def layers(self): '''Construct Keras input layers for the given transformer Returns ------- layers : {field: keras.layers.Input} A dictionary of keras input layers, keyed by the corresponding field keys. ''' from keras.layers import Input L = dict() for key in self.fields: L[key] = Input(name=key, shape=self.fields[key].shape, dtype=self.fields[key].dtype) return L
python
def layers(self): '''Construct Keras input layers for the given transformer Returns ------- layers : {field: keras.layers.Input} A dictionary of keras input layers, keyed by the corresponding field keys. ''' from keras.layers import Input L = dict() for key in self.fields: L[key] = Input(name=key, shape=self.fields[key].shape, dtype=self.fields[key].dtype) return L
[ "def", "layers", "(", "self", ")", ":", "from", "keras", ".", "layers", "import", "Input", "L", "=", "dict", "(", ")", "for", "key", "in", "self", ".", "fields", ":", "L", "[", "key", "]", "=", "Input", "(", "name", "=", "key", ",", "shape", "=", "self", ".", "fields", "[", "key", "]", ".", "shape", ",", "dtype", "=", "self", ".", "fields", "[", "key", "]", ".", "dtype", ")", "return", "L" ]
Construct Keras input layers for the given transformer Returns ------- layers : {field: keras.layers.Input} A dictionary of keras input layers, keyed by the corresponding field keys.
[ "Construct", "Keras", "input", "layers", "for", "the", "given", "transformer" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/base.py#L132-L149
bmcfee/pumpp
pumpp/feature/base.py
FeatureExtractor.n_frames
def n_frames(self, duration): '''Get the number of frames for a given duration Parameters ---------- duration : number >= 0 The duration, in seconds Returns ------- n_frames : int >= 0 The number of frames at this extractor's sampling rate and hop length ''' return int(time_to_frames(duration, sr=self.sr, hop_length=self.hop_length))
python
def n_frames(self, duration): '''Get the number of frames for a given duration Parameters ---------- duration : number >= 0 The duration, in seconds Returns ------- n_frames : int >= 0 The number of frames at this extractor's sampling rate and hop length ''' return int(time_to_frames(duration, sr=self.sr, hop_length=self.hop_length))
[ "def", "n_frames", "(", "self", ",", "duration", ")", ":", "return", "int", "(", "time_to_frames", "(", "duration", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", ")" ]
Get the number of frames for a given duration Parameters ---------- duration : number >= 0 The duration, in seconds Returns ------- n_frames : int >= 0 The number of frames at this extractor's sampling rate and hop length
[ "Get", "the", "number", "of", "frames", "for", "a", "given", "duration" ]
train
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/feature/base.py#L151-L167
novopl/peltak
src/peltak/extra/gitflow/logic/release.py
start
def start(component, exact): # type: (str, str) -> None """ Create a new release branch. Args: component (str): Version component to bump when creating the release. Can be *major*, *minor* or *patch*. exact (str): The exact version to set for the release. Overrides the component argument. This allows to re-release a version if something went wrong with the release upload. """ version_file = conf.get_path('version_file', 'VERSION') develop = conf.get('git.devel_branch', 'develop') common.assert_on_branch(develop) with conf.within_proj_dir(): out = shell.run('git status --porcelain', capture=True).stdout lines = out.split(os.linesep) has_changes = any( not l.startswith('??') for l in lines if l.strip() ) if has_changes: log.info("Cannot release: there are uncommitted changes") exit(1) old_ver, new_ver = versioning.bump(component, exact) log.info("Bumping package version") log.info(" old version: <35>{}".format(old_ver)) log.info(" new version: <35>{}".format(new_ver)) with conf.within_proj_dir(): branch = 'release/' + new_ver common.git_checkout(branch, create=True) log.info("Creating commit for the release") shell.run('git add {ver_file} && git commit -m "{msg}"'.format( ver_file=version_file, msg="Releasing v{}".format(new_ver) ))
python
def start(component, exact): # type: (str, str) -> None """ Create a new release branch. Args: component (str): Version component to bump when creating the release. Can be *major*, *minor* or *patch*. exact (str): The exact version to set for the release. Overrides the component argument. This allows to re-release a version if something went wrong with the release upload. """ version_file = conf.get_path('version_file', 'VERSION') develop = conf.get('git.devel_branch', 'develop') common.assert_on_branch(develop) with conf.within_proj_dir(): out = shell.run('git status --porcelain', capture=True).stdout lines = out.split(os.linesep) has_changes = any( not l.startswith('??') for l in lines if l.strip() ) if has_changes: log.info("Cannot release: there are uncommitted changes") exit(1) old_ver, new_ver = versioning.bump(component, exact) log.info("Bumping package version") log.info(" old version: <35>{}".format(old_ver)) log.info(" new version: <35>{}".format(new_ver)) with conf.within_proj_dir(): branch = 'release/' + new_ver common.git_checkout(branch, create=True) log.info("Creating commit for the release") shell.run('git add {ver_file} && git commit -m "{msg}"'.format( ver_file=version_file, msg="Releasing v{}".format(new_ver) ))
[ "def", "start", "(", "component", ",", "exact", ")", ":", "# type: (str, str) -> None", "version_file", "=", "conf", ".", "get_path", "(", "'version_file'", ",", "'VERSION'", ")", "develop", "=", "conf", ".", "get", "(", "'git.devel_branch'", ",", "'develop'", ")", "common", ".", "assert_on_branch", "(", "develop", ")", "with", "conf", ".", "within_proj_dir", "(", ")", ":", "out", "=", "shell", ".", "run", "(", "'git status --porcelain'", ",", "capture", "=", "True", ")", ".", "stdout", "lines", "=", "out", ".", "split", "(", "os", ".", "linesep", ")", "has_changes", "=", "any", "(", "not", "l", ".", "startswith", "(", "'??'", ")", "for", "l", "in", "lines", "if", "l", ".", "strip", "(", ")", ")", "if", "has_changes", ":", "log", ".", "info", "(", "\"Cannot release: there are uncommitted changes\"", ")", "exit", "(", "1", ")", "old_ver", ",", "new_ver", "=", "versioning", ".", "bump", "(", "component", ",", "exact", ")", "log", ".", "info", "(", "\"Bumping package version\"", ")", "log", ".", "info", "(", "\" old version: <35>{}\"", ".", "format", "(", "old_ver", ")", ")", "log", ".", "info", "(", "\" new version: <35>{}\"", ".", "format", "(", "new_ver", ")", ")", "with", "conf", ".", "within_proj_dir", "(", ")", ":", "branch", "=", "'release/'", "+", "new_ver", "common", ".", "git_checkout", "(", "branch", ",", "create", "=", "True", ")", "log", ".", "info", "(", "\"Creating commit for the release\"", ")", "shell", ".", "run", "(", "'git add {ver_file} && git commit -m \"{msg}\"'", ".", "format", "(", "ver_file", "=", "version_file", ",", "msg", "=", "\"Releasing v{}\"", ".", "format", "(", "new_ver", ")", ")", ")" ]
Create a new release branch. Args: component (str): Version component to bump when creating the release. Can be *major*, *minor* or *patch*. exact (str): The exact version to set for the release. Overrides the component argument. This allows to re-release a version if something went wrong with the release upload.
[ "Create", "a", "new", "release", "branch", "." ]
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/release.py#L34-L78
novopl/peltak
src/peltak/extra/gitflow/logic/release.py
tag
def tag(message): # type: () -> None """ Tag the current commit with the current version. """ release_ver = versioning.current() message = message or 'v{} release'.format(release_ver) with conf.within_proj_dir(): log.info("Creating release tag") git.tag( author=git.latest_commit().author, name='v{}'.format(release_ver), message=message, )
python
def tag(message): # type: () -> None """ Tag the current commit with the current version. """ release_ver = versioning.current() message = message or 'v{} release'.format(release_ver) with conf.within_proj_dir(): log.info("Creating release tag") git.tag( author=git.latest_commit().author, name='v{}'.format(release_ver), message=message, )
[ "def", "tag", "(", "message", ")", ":", "# type: () -> None", "release_ver", "=", "versioning", ".", "current", "(", ")", "message", "=", "message", "or", "'v{} release'", ".", "format", "(", "release_ver", ")", "with", "conf", ".", "within_proj_dir", "(", ")", ":", "log", ".", "info", "(", "\"Creating release tag\"", ")", "git", ".", "tag", "(", "author", "=", "git", ".", "latest_commit", "(", ")", ".", "author", ",", "name", "=", "'v{}'", ".", "format", "(", "release_ver", ")", ",", "message", "=", "message", ",", ")" ]
Tag the current commit with the current version.
[ "Tag", "the", "current", "commit", "with", "the", "current", "version", "." ]
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/release.py#L144-L156
novopl/peltak
src/peltak/logic/lint.py
lint
def lint(exclude, skip_untracked, commit_only): # type: (List[str], bool, bool) -> None """ Lint python files. Args: exclude (list[str]): A list of glob string patterns to test against. If the file/path matches any of those patters, it will be filtered out. skip_untracked (bool): If set to **True** it will skip all files not tracked by git. commit_only (bool): Only lint files that are staged for commit. """ exclude = list(exclude) + conf.get('lint.exclude', []) runner = LintRunner(exclude, skip_untracked, commit_only) if not runner.run(): exit(1)
python
def lint(exclude, skip_untracked, commit_only): # type: (List[str], bool, bool) -> None """ Lint python files. Args: exclude (list[str]): A list of glob string patterns to test against. If the file/path matches any of those patters, it will be filtered out. skip_untracked (bool): If set to **True** it will skip all files not tracked by git. commit_only (bool): Only lint files that are staged for commit. """ exclude = list(exclude) + conf.get('lint.exclude', []) runner = LintRunner(exclude, skip_untracked, commit_only) if not runner.run(): exit(1)
[ "def", "lint", "(", "exclude", ",", "skip_untracked", ",", "commit_only", ")", ":", "# type: (List[str], bool, bool) -> None", "exclude", "=", "list", "(", "exclude", ")", "+", "conf", ".", "get", "(", "'lint.exclude'", ",", "[", "]", ")", "runner", "=", "LintRunner", "(", "exclude", ",", "skip_untracked", ",", "commit_only", ")", "if", "not", "runner", ".", "run", "(", ")", ":", "exit", "(", "1", ")" ]
Lint python files. Args: exclude (list[str]): A list of glob string patterns to test against. If the file/path matches any of those patters, it will be filtered out. skip_untracked (bool): If set to **True** it will skip all files not tracked by git. commit_only (bool): Only lint files that are staged for commit.
[ "Lint", "python", "files", "." ]
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/logic/lint.py#L41-L58
novopl/peltak
src/peltak/logic/lint.py
tool
def tool(name): # type: (str) -> FunctionType """ Decorator for defining lint tools. Args: name (str): The name of the tool. This name will be used to identify the tool in `pelconf.yaml`. """ global g_tools def decorator(fn): # pylint: disable=missing-docstring # type: (FunctionType) -> FunctionType g_tools[name] = fn return fn return decorator
python
def tool(name): # type: (str) -> FunctionType """ Decorator for defining lint tools. Args: name (str): The name of the tool. This name will be used to identify the tool in `pelconf.yaml`. """ global g_tools def decorator(fn): # pylint: disable=missing-docstring # type: (FunctionType) -> FunctionType g_tools[name] = fn return fn return decorator
[ "def", "tool", "(", "name", ")", ":", "# type: (str) -> FunctionType", "global", "g_tools", "def", "decorator", "(", "fn", ")", ":", "# pylint: disable=missing-docstring", "# type: (FunctionType) -> FunctionType", "g_tools", "[", "name", "]", "=", "fn", "return", "fn", "return", "decorator" ]
Decorator for defining lint tools. Args: name (str): The name of the tool. This name will be used to identify the tool in `pelconf.yaml`.
[ "Decorator", "for", "defining", "lint", "tools", "." ]
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/logic/lint.py#L61-L77
novopl/peltak
src/peltak/logic/lint.py
pep8_check
def pep8_check(files): # type: (List[str]) -> int """ Run code checks using pep8. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. pep8 tool is **very** fast. Especially compared to pylint and the bigger the code base the bigger the difference. If you want to reduce check times you might disable all pep8 checks in pylint and use pep8 for that. This way you use pylint only for the more advanced checks (the number of checks enabled in pylint will make a visible difference in it's run times). """ files = fs.wrap_paths(files) cfg_path = conf.get_path('lint.pep8_cfg', 'ops/tools/pep8.ini') pep8_cmd = 'pep8 --config {} {}'.format(cfg_path, files) return shell.run(pep8_cmd, exit_on_error=False).return_code
python
def pep8_check(files): # type: (List[str]) -> int """ Run code checks using pep8. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. pep8 tool is **very** fast. Especially compared to pylint and the bigger the code base the bigger the difference. If you want to reduce check times you might disable all pep8 checks in pylint and use pep8 for that. This way you use pylint only for the more advanced checks (the number of checks enabled in pylint will make a visible difference in it's run times). """ files = fs.wrap_paths(files) cfg_path = conf.get_path('lint.pep8_cfg', 'ops/tools/pep8.ini') pep8_cmd = 'pep8 --config {} {}'.format(cfg_path, files) return shell.run(pep8_cmd, exit_on_error=False).return_code
[ "def", "pep8_check", "(", "files", ")", ":", "# type: (List[str]) -> int", "files", "=", "fs", ".", "wrap_paths", "(", "files", ")", "cfg_path", "=", "conf", ".", "get_path", "(", "'lint.pep8_cfg'", ",", "'ops/tools/pep8.ini'", ")", "pep8_cmd", "=", "'pep8 --config {} {}'", ".", "format", "(", "cfg_path", ",", "files", ")", "return", "shell", ".", "run", "(", "pep8_cmd", ",", "exit_on_error", "=", "False", ")", ".", "return_code" ]
Run code checks using pep8. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. pep8 tool is **very** fast. Especially compared to pylint and the bigger the code base the bigger the difference. If you want to reduce check times you might disable all pep8 checks in pylint and use pep8 for that. This way you use pylint only for the more advanced checks (the number of checks enabled in pylint will make a visible difference in it's run times).
[ "Run", "code", "checks", "using", "pep8", "." ]
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/logic/lint.py#L208-L229
novopl/peltak
src/peltak/logic/lint.py
pylint_check
def pylint_check(files): # type: (List[str]) -> int """ Run code checks using pylint. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. """ files = fs.wrap_paths(files) cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini') pylint_cmd = 'pylint --rcfile {} {}'.format(cfg_path, files) return shell.run(pylint_cmd, exit_on_error=False).return_code
python
def pylint_check(files): # type: (List[str]) -> int """ Run code checks using pylint. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. """ files = fs.wrap_paths(files) cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini') pylint_cmd = 'pylint --rcfile {} {}'.format(cfg_path, files) return shell.run(pylint_cmd, exit_on_error=False).return_code
[ "def", "pylint_check", "(", "files", ")", ":", "# type: (List[str]) -> int", "files", "=", "fs", ".", "wrap_paths", "(", "files", ")", "cfg_path", "=", "conf", ".", "get_path", "(", "'lint.pylint_cfg'", ",", "'ops/tools/pylint.ini'", ")", "pylint_cmd", "=", "'pylint --rcfile {} {}'", ".", "format", "(", "cfg_path", ",", "files", ")", "return", "shell", ".", "run", "(", "pylint_cmd", ",", "exit_on_error", "=", "False", ")", ".", "return_code" ]
Run code checks using pylint. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise.
[ "Run", "code", "checks", "using", "pylint", "." ]
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/logic/lint.py#L233-L248
novopl/peltak
src/peltak/logic/lint.py
LintRunner.run
def run(self): # type: () -> bool """ Run all linters and report results. Returns: bool: **True** if all checks were successful, **False** otherwise. """ with util.timed_block() as t: files = self._collect_files() log.info("Collected <33>{} <32>files in <33>{}s".format( len(files), t.elapsed_s )) if self.verbose: for p in files: log.info(" <0>{}", p) # No files to lint - return success if empty runs are allowed. if not files: return self.allow_empty with util.timed_block() as t: results = self._run_checks(files) log.info("Code checked in <33>{}s", t.elapsed_s) success = True for name, retcodes in results.items(): if any(x != 0 for x in retcodes): success = False log.err("<35>{} <31>failed with: <33>{}".format( name, retcodes )) return success
python
def run(self): # type: () -> bool """ Run all linters and report results. Returns: bool: **True** if all checks were successful, **False** otherwise. """ with util.timed_block() as t: files = self._collect_files() log.info("Collected <33>{} <32>files in <33>{}s".format( len(files), t.elapsed_s )) if self.verbose: for p in files: log.info(" <0>{}", p) # No files to lint - return success if empty runs are allowed. if not files: return self.allow_empty with util.timed_block() as t: results = self._run_checks(files) log.info("Code checked in <33>{}s", t.elapsed_s) success = True for name, retcodes in results.items(): if any(x != 0 for x in retcodes): success = False log.err("<35>{} <31>failed with: <33>{}".format( name, retcodes )) return success
[ "def", "run", "(", "self", ")", ":", "# type: () -> bool", "with", "util", ".", "timed_block", "(", ")", "as", "t", ":", "files", "=", "self", ".", "_collect_files", "(", ")", "log", ".", "info", "(", "\"Collected <33>{} <32>files in <33>{}s\"", ".", "format", "(", "len", "(", "files", ")", ",", "t", ".", "elapsed_s", ")", ")", "if", "self", ".", "verbose", ":", "for", "p", "in", "files", ":", "log", ".", "info", "(", "\" <0>{}\"", ",", "p", ")", "# No files to lint - return success if empty runs are allowed.", "if", "not", "files", ":", "return", "self", ".", "allow_empty", "with", "util", ".", "timed_block", "(", ")", "as", "t", ":", "results", "=", "self", ".", "_run_checks", "(", "files", ")", "log", ".", "info", "(", "\"Code checked in <33>{}s\"", ",", "t", ".", "elapsed_s", ")", "success", "=", "True", "for", "name", ",", "retcodes", "in", "results", ".", "items", "(", ")", ":", "if", "any", "(", "x", "!=", "0", "for", "x", "in", "retcodes", ")", ":", "success", "=", "False", "log", ".", "err", "(", "\"<35>{} <31>failed with: <33>{}\"", ".", "format", "(", "name", ",", "retcodes", ")", ")", "return", "success" ]
Run all linters and report results. Returns: bool: **True** if all checks were successful, **False** otherwise.
[ "Run", "all", "linters", "and", "report", "results", "." ]
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/logic/lint.py#L118-L152
dacker-team/pyzure
pyzure/send/send.py
send_to_azure
def send_to_azure(instance, data, replace=True, types=None, primary_key=(), sub_commit=True): """ data = { "table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist, "columns_name" : [first_column_name,second_column_name,...,last_column_name], "rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...] } """ # Time initialization start = datetime.datetime.now() # Extract info rows = data["rows"] if not rows: return 0 table_name = data["table_name"] columns_name = data["columns_name"] total_len_data = len(rows) # Create table if needed if not existing_test(instance, table_name) or (types is not None) or (primary_key != ()): create.create_table(instance, data, primary_key, types) # Clean table if needed if replace: cleaning_function(instance, table_name) connection_kwargs = credential(instance) # Create an SSH tunnel ssh_host = os.environ.get("SSH_%s_HOST" % instance) ssh_user = os.environ.get("SSH_%s_USER" % instance) ssh_path_private_key = os.environ.get("SSH_%s_PATH_PRIVATE_KEY" % instance) if ssh_host: tunnel = SSHTunnelForwarder( (ssh_host, 22), ssh_username=ssh_user, ssh_private_key=ssh_path_private_key, remote_bind_address=( os.environ.get("AZURE_%s_HOST" % instance), int(os.environ.get("AZURE_%s_PORT" % instance))), local_bind_address=('localhost', 1433), # could be any available port ) # Start the tunnel try: tunnel.start() print("Tunnel opened!") except sshtunnel.HandlerSSHTunnelForwarderError: pass connection_kwargs["host"] = "localhost,1433" connection_kwargs["port"] = 1433 cnxn = pyodbc.connect(**connection_kwargs) cursor = cnxn.cursor() small_batch_size = int(2099 / len(columns_name)) print("Initiate send_to_azure...") # Initialize counters boolean = True question_mark_pattern = "(%s)" % ",".join(["?" for i in range(len(rows[0]))]) counter = 0 while boolean: temp_row = [] question_mark_list = [] for i in range(small_batch_size): if rows: temp_row.append(rows.pop()) question_mark_list.append(question_mark_pattern) else: boolean = False continue counter = counter + len(temp_row) # percent = round(float(counter * 100) / total_len_data) if sub_commit: suffix = "%% rows sent" print_progress_bar(counter, total_len_data, suffix=suffix) # print("%s %% rows sent" % str(percent)) else: suffix = "% rows prepared to be sent" print_progress_bar(counter, total_len_data, suffix=suffix) # print("%s %% rows prepared to be sent" % str(percent)) data_values_str = ','.join(question_mark_list) columns_name_str = ", ".join(columns_name) inserting_request = '''INSERT INTO %s (%s) VALUES %s ;''' % (table_name, columns_name_str, data_values_str) final_data = [y for x in temp_row for y in x] if final_data: cursor.execute(inserting_request, final_data) if sub_commit: commit_function(cnxn) if not sub_commit: commit_function(cnxn) cursor.close() cnxn.close() if ssh_host: tunnel.close() print("Tunnel closed!") print("data sent to azure") print("Total rows: %s" % str(total_len_data)) print(C.BOLD + "Total time in seconds : %s" % str((datetime.datetime.now() - start).seconds) + C.ENDC) return 0
python
def send_to_azure(instance, data, replace=True, types=None, primary_key=(), sub_commit=True): """ data = { "table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist, "columns_name" : [first_column_name,second_column_name,...,last_column_name], "rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...] } """ # Time initialization start = datetime.datetime.now() # Extract info rows = data["rows"] if not rows: return 0 table_name = data["table_name"] columns_name = data["columns_name"] total_len_data = len(rows) # Create table if needed if not existing_test(instance, table_name) or (types is not None) or (primary_key != ()): create.create_table(instance, data, primary_key, types) # Clean table if needed if replace: cleaning_function(instance, table_name) connection_kwargs = credential(instance) # Create an SSH tunnel ssh_host = os.environ.get("SSH_%s_HOST" % instance) ssh_user = os.environ.get("SSH_%s_USER" % instance) ssh_path_private_key = os.environ.get("SSH_%s_PATH_PRIVATE_KEY" % instance) if ssh_host: tunnel = SSHTunnelForwarder( (ssh_host, 22), ssh_username=ssh_user, ssh_private_key=ssh_path_private_key, remote_bind_address=( os.environ.get("AZURE_%s_HOST" % instance), int(os.environ.get("AZURE_%s_PORT" % instance))), local_bind_address=('localhost', 1433), # could be any available port ) # Start the tunnel try: tunnel.start() print("Tunnel opened!") except sshtunnel.HandlerSSHTunnelForwarderError: pass connection_kwargs["host"] = "localhost,1433" connection_kwargs["port"] = 1433 cnxn = pyodbc.connect(**connection_kwargs) cursor = cnxn.cursor() small_batch_size = int(2099 / len(columns_name)) print("Initiate send_to_azure...") # Initialize counters boolean = True question_mark_pattern = "(%s)" % ",".join(["?" for i in range(len(rows[0]))]) counter = 0 while boolean: temp_row = [] question_mark_list = [] for i in range(small_batch_size): if rows: temp_row.append(rows.pop()) question_mark_list.append(question_mark_pattern) else: boolean = False continue counter = counter + len(temp_row) # percent = round(float(counter * 100) / total_len_data) if sub_commit: suffix = "%% rows sent" print_progress_bar(counter, total_len_data, suffix=suffix) # print("%s %% rows sent" % str(percent)) else: suffix = "% rows prepared to be sent" print_progress_bar(counter, total_len_data, suffix=suffix) # print("%s %% rows prepared to be sent" % str(percent)) data_values_str = ','.join(question_mark_list) columns_name_str = ", ".join(columns_name) inserting_request = '''INSERT INTO %s (%s) VALUES %s ;''' % (table_name, columns_name_str, data_values_str) final_data = [y for x in temp_row for y in x] if final_data: cursor.execute(inserting_request, final_data) if sub_commit: commit_function(cnxn) if not sub_commit: commit_function(cnxn) cursor.close() cnxn.close() if ssh_host: tunnel.close() print("Tunnel closed!") print("data sent to azure") print("Total rows: %s" % str(total_len_data)) print(C.BOLD + "Total time in seconds : %s" % str((datetime.datetime.now() - start).seconds) + C.ENDC) return 0
[ "def", "send_to_azure", "(", "instance", ",", "data", ",", "replace", "=", "True", ",", "types", "=", "None", ",", "primary_key", "=", "(", ")", ",", "sub_commit", "=", "True", ")", ":", "# Time initialization", "start", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# Extract info", "rows", "=", "data", "[", "\"rows\"", "]", "if", "not", "rows", ":", "return", "0", "table_name", "=", "data", "[", "\"table_name\"", "]", "columns_name", "=", "data", "[", "\"columns_name\"", "]", "total_len_data", "=", "len", "(", "rows", ")", "# Create table if needed", "if", "not", "existing_test", "(", "instance", ",", "table_name", ")", "or", "(", "types", "is", "not", "None", ")", "or", "(", "primary_key", "!=", "(", ")", ")", ":", "create", ".", "create_table", "(", "instance", ",", "data", ",", "primary_key", ",", "types", ")", "# Clean table if needed", "if", "replace", ":", "cleaning_function", "(", "instance", ",", "table_name", ")", "connection_kwargs", "=", "credential", "(", "instance", ")", "# Create an SSH tunnel", "ssh_host", "=", "os", ".", "environ", ".", "get", "(", "\"SSH_%s_HOST\"", "%", "instance", ")", "ssh_user", "=", "os", ".", "environ", ".", "get", "(", "\"SSH_%s_USER\"", "%", "instance", ")", "ssh_path_private_key", "=", "os", ".", "environ", ".", "get", "(", "\"SSH_%s_PATH_PRIVATE_KEY\"", "%", "instance", ")", "if", "ssh_host", ":", "tunnel", "=", "SSHTunnelForwarder", "(", "(", "ssh_host", ",", "22", ")", ",", "ssh_username", "=", "ssh_user", ",", "ssh_private_key", "=", "ssh_path_private_key", ",", "remote_bind_address", "=", "(", "os", ".", "environ", ".", "get", "(", "\"AZURE_%s_HOST\"", "%", "instance", ")", ",", "int", "(", "os", ".", "environ", ".", "get", "(", "\"AZURE_%s_PORT\"", "%", "instance", ")", ")", ")", ",", "local_bind_address", "=", "(", "'localhost'", ",", "1433", ")", ",", "# could be any available port", ")", "# Start the tunnel", "try", ":", "tunnel", ".", "start", "(", ")", "print", "(", "\"Tunnel opened!\"", ")", "except", "sshtunnel", ".", "HandlerSSHTunnelForwarderError", ":", "pass", "connection_kwargs", "[", "\"host\"", "]", "=", "\"localhost,1433\"", "connection_kwargs", "[", "\"port\"", "]", "=", "1433", "cnxn", "=", "pyodbc", ".", "connect", "(", "*", "*", "connection_kwargs", ")", "cursor", "=", "cnxn", ".", "cursor", "(", ")", "small_batch_size", "=", "int", "(", "2099", "/", "len", "(", "columns_name", ")", ")", "print", "(", "\"Initiate send_to_azure...\"", ")", "# Initialize counters", "boolean", "=", "True", "question_mark_pattern", "=", "\"(%s)\"", "%", "\",\"", ".", "join", "(", "[", "\"?\"", "for", "i", "in", "range", "(", "len", "(", "rows", "[", "0", "]", ")", ")", "]", ")", "counter", "=", "0", "while", "boolean", ":", "temp_row", "=", "[", "]", "question_mark_list", "=", "[", "]", "for", "i", "in", "range", "(", "small_batch_size", ")", ":", "if", "rows", ":", "temp_row", ".", "append", "(", "rows", ".", "pop", "(", ")", ")", "question_mark_list", ".", "append", "(", "question_mark_pattern", ")", "else", ":", "boolean", "=", "False", "continue", "counter", "=", "counter", "+", "len", "(", "temp_row", ")", "# percent = round(float(counter * 100) / total_len_data)", "if", "sub_commit", ":", "suffix", "=", "\"%% rows sent\"", "print_progress_bar", "(", "counter", ",", "total_len_data", ",", "suffix", "=", "suffix", ")", "# print(\"%s %% rows sent\" % str(percent))", "else", ":", "suffix", "=", "\"% rows prepared to be sent\"", "print_progress_bar", "(", "counter", ",", "total_len_data", ",", "suffix", "=", "suffix", ")", "# print(\"%s %% rows prepared to be sent\" % str(percent))", "data_values_str", "=", "','", ".", "join", "(", "question_mark_list", ")", "columns_name_str", "=", "\", \"", ".", "join", "(", "columns_name", ")", "inserting_request", "=", "'''INSERT INTO %s (%s) VALUES %s ;'''", "%", "(", "table_name", ",", "columns_name_str", ",", "data_values_str", ")", "final_data", "=", "[", "y", "for", "x", "in", "temp_row", "for", "y", "in", "x", "]", "if", "final_data", ":", "cursor", ".", "execute", "(", "inserting_request", ",", "final_data", ")", "if", "sub_commit", ":", "commit_function", "(", "cnxn", ")", "if", "not", "sub_commit", ":", "commit_function", "(", "cnxn", ")", "cursor", ".", "close", "(", ")", "cnxn", ".", "close", "(", ")", "if", "ssh_host", ":", "tunnel", ".", "close", "(", ")", "print", "(", "\"Tunnel closed!\"", ")", "print", "(", "\"data sent to azure\"", ")", "print", "(", "\"Total rows: %s\"", "%", "str", "(", "total_len_data", ")", ")", "print", "(", "C", ".", "BOLD", "+", "\"Total time in seconds : %s\"", "%", "str", "(", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "start", ")", ".", "seconds", ")", "+", "C", ".", "ENDC", ")", "return", "0" ]
data = { "table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist, "columns_name" : [first_column_name,second_column_name,...,last_column_name], "rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...] }
[ "data", "=", "{", "table_name", ":", "name_of_the_azure_schema", "+", ".", "+", "name_of_the_azure_table", "#Must", "already", "exist", "columns_name", ":", "[", "first_column_name", "second_column_name", "...", "last_column_name", "]", "rows", ":", "[[", "first_raw_value", "second_raw_value", "...", "last_raw_value", "]", "...", "]", "}" ]
train
https://github.com/dacker-team/pyzure/blob/1e6d202f91ca0f080635adc470d9d18585056d53/pyzure/send/send.py#L15-L121
cons3rt/pycons3rt
pycons3rt/dyndict.py
getdict
def getdict(source): """Returns a standard python Dict with computed values from the DynDict :param source: (DynDict) input :return: (dict) Containing computed values """ std_dict = {} for var, val in source.iteritems(): std_dict[var] = source[var] return std_dict
python
def getdict(source): """Returns a standard python Dict with computed values from the DynDict :param source: (DynDict) input :return: (dict) Containing computed values """ std_dict = {} for var, val in source.iteritems(): std_dict[var] = source[var] return std_dict
[ "def", "getdict", "(", "source", ")", ":", "std_dict", "=", "{", "}", "for", "var", ",", "val", "in", "source", ".", "iteritems", "(", ")", ":", "std_dict", "[", "var", "]", "=", "source", "[", "var", "]", "return", "std_dict" ]
Returns a standard python Dict with computed values from the DynDict :param source: (DynDict) input :return: (dict) Containing computed values
[ "Returns", "a", "standard", "python", "Dict", "with", "computed", "values", "from", "the", "DynDict", ":", "param", "source", ":", "(", "DynDict", ")", "input", ":", "return", ":", "(", "dict", ")", "Containing", "computed", "values" ]
train
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/dyndict.py#L20-L29
Varkal/chuda
chuda/plugins.py
Plugin.enrich_app
def enrich_app(self, name, value): ''' Add a new property to the app (with setattr) Args: name (str): the name of the new property value (any): the value of the new property ''' #Method shouldn't be added: https://stackoverflow.com/a/28060251/3042398 if type(value) == type(self.enrich_app): raise ValueError("enrich_app can't add method") setattr(self.app, name, value)
python
def enrich_app(self, name, value): ''' Add a new property to the app (with setattr) Args: name (str): the name of the new property value (any): the value of the new property ''' #Method shouldn't be added: https://stackoverflow.com/a/28060251/3042398 if type(value) == type(self.enrich_app): raise ValueError("enrich_app can't add method") setattr(self.app, name, value)
[ "def", "enrich_app", "(", "self", ",", "name", ",", "value", ")", ":", "#Method shouldn't be added: https://stackoverflow.com/a/28060251/3042398", "if", "type", "(", "value", ")", "==", "type", "(", "self", ".", "enrich_app", ")", ":", "raise", "ValueError", "(", "\"enrich_app can't add method\"", ")", "setattr", "(", "self", ".", "app", ",", "name", ",", "value", ")" ]
Add a new property to the app (with setattr) Args: name (str): the name of the new property value (any): the value of the new property
[ "Add", "a", "new", "property", "to", "the", "app", "(", "with", "setattr", ")" ]
train
https://github.com/Varkal/chuda/blob/0d93b716dede35231c21be97bcc19a656655983f/chuda/plugins.py#L21-L33
Vital-Fernandez/dazer
bin/lib/Math_Libraries/fitting_methods.py
linfit
def linfit(x_true, y, sigmay=None, relsigma=True, cov=False, chisq=False, residuals=False): """ Least squares linear fit. Fit a straight line `f(x_true) = a + bx` to points `(x_true, y)`. Returns coefficients `a` and `b` that minimize the squared error. Parameters ---------- x_true : array_like one dimensional array of `x_true` data with `n`>2 data points. y : array_like one dimensional array of `y` data with `n`>2 data points. sigmay : NoneType or float or array_like, optional one dimensional array of uncertainties (errors) in `y` data or a single positive number if all uncertainties are the same. `sigmay` determines the weighting in the least squares minimization. Leaving `sigmay=None` uses no weighting and is equivalent to `sigmay=1`. relsigma : bool, optional If `relsigma` is True, the residuals are used to scale the covariance matrix. Use this option if you do not know the absolute uncertainties (`sigmay`) in the data but still want a covariance matrix whose entries give meaningful estimates of the uncertainties in the fitting parameters `a` and `b` (from `f = a + bx`). If `relsigma` is False, the covariance matrix is calculated (provided `cov` = True) using sigmay assuming sigmay represents absolute undertainties. cov : bool, optional If True, calculate and return the 2x2 covarience matrix of the fitting parameters. chisq : bool, optional If True, calculate and return redchisq. residuals : bool, optional If True, calculate and return residuals. Returns ------- fit : array([a,b]) ndarray of floats The best fit model parameters `a` (the slope) and `b` (the `y`-intercept) for the input data arrays `x_true` and `y` cvm : array, shape (2,2) : returned only if cov=True Covarience matrix of the fitting parameters. Diagonal elements are estimated variances of the fitting parameters a and b; square roots of the diagonal elements thus provide estimates of the uncertainties in the fitting parameters `a` and `b`. Off diagonal elements (equal to each other) are the covarience between the fitting parameters `a` and `b`. redchisq : float : returned only if chisq=True Reduced chi-squared goodness of fit parameter. residuals : ndarray of floats : returned only if residuals=True Length n array of the differences `y-(ax+b)` between `y`-data and the fitted data `ax + b`. Raises ------ TypeError : if `x_true` and `y` have different lengths TypeError : If `x_true` and `y` have 2 or fewer elements TypeError : If `sigmay` length is not 1 or the same as `y` See Also -------- polyfit : Least squares fit to polynomial. linalg.lstsq : Least-squares solution to a linear matrix equation. Notes ----- By default, ``linfit`` returns optimal fitting parameters `a` and `b` without weighting of the data. In that case, linfit minimizes the squared error .. math :: E = \\sum_{i=0}^n [y_i - (a x_i + b)]^2 If `sigmay` is set equal to the uncertainties in the `y` data points, then linfit minimizes the `chi-squared` sum .. math :: \chi^2 = \\sum_{i=0}^n \\left[ \\frac{y_i-(a x_i + b)}{\\sigma_i} \\right]^2 where :math:`\sigma_i` is given by `sigmay`, the "error" or standard deviation of :math:`y_i`. `sigmay` can be either a single number that gives the uncertainty for all elements of `y`, or it can be an array of the same length as `y` that gives the "error" for each element of `y`. `redchisq` is :math:`\chi^2/(n-2)` where :math:`n` is the number of data points (the length of `x_true` or `y`). If `relsigma` is False, then the uncertainties `sigmay` in `y` are assumed to be the absolute one-standard-deviation uncertainties in `y`. In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` provides a measure of the goodness of the fit. If it is near 1, then the linear fitting model is considered to be good and the values of the covariance matrix are appropriately scaled. In particular, the square root of the diagonal elements of the covariance matrix give the estimated uncertainty in the fitting parameters `a` and `b`. See Refernece [2] below for more information. If `relsigma` is True, then the uncertainties `sigmay` in `y` are considered to be only relative uncertainties. They are used to weight the data for the fit, but in this case, the covariance matrix is rescaled using the residuals between the fit and the data. In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` does not provide a measure of the goodness of the fit. Nevertheless, the diagonal elements of the rescaled covariance matrix (returned by linfit) give the estimated uncertainty in the fitting parameters `a` and `b`. The covariance matrix is a 2x2 symmetric matrix where the diagonal elements are the variance of the fitting parameters. Their square roots provide estimates of the uncertainties in the fitting parameters. The off-diagonal elements are equal and give the cross correlation between the two fitting parameters `a` and `b`. linfit runs faster, by a factor of 2 to 3, if calculation of the residuals is suppressed letting `cov`, `chisq`, and `residuals` remain False (the default setting). Fitting a straight line to a single set of `(x_true, y)` data using ``linfit`` is typically 2 to 10 times faster than using either ``polyfit`` or ``linalg.lstsq``, especially when weighting is used and for very large data sets. References ---------- .. [1] An Introduction to Error Analysis, 2nd Ed. by John R. Taylor (University Science Books, 1997) .. [2] Numerical Recipes, The Art of Scientific Computing, 3rd Edition by W.H. Press, S. A. Teukolsky, W. T. Vetterling, & B. P. Flannery (Cambridge University Press, 2007) Examples -------- Fit a line, `y = ax + b`, through some noisy `(x_true, y)` data-points without any weighting (`sigmay` = None) to obtain fitting parameters `a` and `b`: >>> x_true = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) >>> fit = linfit(x_true, y) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 1.00, b = -0.95 Setting `cov` = True in the input, returns the covariance matrix `cvm`. When uncertainties `sigmay` are left unspecified, meaningful estimates of the uncertainties `da` and `db` in the fitting parameters `a` and `b` are given by the square roots of the diagonals of the covariance matrix `cvm`, provided `relsigma` = True (the default state). >>> fit, cvm = linfit(x_true, y, cov=True) >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.07, db = 0.13 A better practice is to supply estimates of the uncertainties in the input argument `sigmay`. `sigmay` can be a single float, if the uncertainties are the same for all data points, or it can be an array, if the uncertainties for different data points are different. Here we enter sigmay as an array. >>> dy = np.array([0.18, 0.13, 0.15, 0.17]) >>> fit, cvm, redchisq, resids = linfit(x_true, y, cov=True, sigmay=dy, relsigma=False, chisq=True, residuals=True) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 0.98, b = -0.91 >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.08, db = 0.14 >>> print("reduced chi-squared = {0:0.2f}".format(redchisq)) reduced chi-squared = 1.21 >>> print(resids) [-0.08856653 0.12781099 -0.1558115 0.06056602] The value of reduced chi-squared `redchisq` is 1.21 indicating that a linear model is valid for these data. The residuals :math:`y_i - (a+bx_i)` are given by the output `resids`. If absolute estimates of the uncertainties are not available, but relative estimates of the uncertainties are known, a fit can be obtained with reasonable estimates of the uncertainties in the fitting parameters by setting `relsigma` = True. >>> dy = np.array([1.0, 0.75, 0.75, 1.25]) >>> fit, cvm, redchisq = linfit(x_true, y, cov=True, sigmay=dy, relsigma=True, chisq=True) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 0.97, b = -0.91 >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.09, db = 0.16 >>> print("reduced chi-squared = {0:0.2f}".format(redchisq)) reduced chi-squared = 0.04 In this case, the value `redchisq` is meaningless, because only the relative, rather than the absolute uncertainties are known. Nevertheless, by setting `relsigma` = True, reasonable estimates for the uncertainties in the fitting parameters are obtained. Illustration: .. image:: example.png :scale: 75 % """ x_true = asarray(x_true) y = asarray(y) if x_true.size != y.size: raise TypeError('Expected x_true and y to have same length') if x_true.size <= 2: raise TypeError('Expected x_true and y length > 2') if sigmay is None: sigmay = 1.0 sigmay = asarray(sigmay) if sigmay.size == 1: sigy = float(sigmay) # convert 0-d array to a float wt = 1./(sigy*sigy) s = wt * y.size sx = wt * x_true.sum() sy = wt * y.sum() t = x_true-sx/s stt = wt * (t*t).sum() slope = wt * (t*y).sum()/stt yint = (sy - sx * slope)/s else: if sigmay.size != y.size: raise TypeError('Expected sigmay size to be 1 or same as y') wt = 1./(sigmay*sigmay) s = wt.sum() sx = (x_true*wt).sum() sy = (y*wt).sum() t = (x_true-sx/s)/sigmay stt = (t*t).sum() slope = (t*y/sigmay).sum()/stt yint = (sy - sx * slope)/s returns = array([slope, yint]) if cov is True: cvm00 = 1./stt cvm01 = -sx/(s*stt) cvm11 = (1.0-sx*cvm01)/s if relsigma is True: redchisq, resids = _resids(x_true, y, sigmay, slope, yint) cvm00 *= redchisq cvm01 *= redchisq cvm11 *= redchisq returns = [returns] + [array([[cvm00, cvm01], [cvm01, cvm11]])] if residuals or chisq is True: if relsigma is False: redchisq, resids = _resids(x_true, y, sigmay, slope, yint) if type(returns) is not list: returns = [returns] if chisq is True: returns += [redchisq] if residuals is True: returns += [resids] return returns
python
def linfit(x_true, y, sigmay=None, relsigma=True, cov=False, chisq=False, residuals=False): """ Least squares linear fit. Fit a straight line `f(x_true) = a + bx` to points `(x_true, y)`. Returns coefficients `a` and `b` that minimize the squared error. Parameters ---------- x_true : array_like one dimensional array of `x_true` data with `n`>2 data points. y : array_like one dimensional array of `y` data with `n`>2 data points. sigmay : NoneType or float or array_like, optional one dimensional array of uncertainties (errors) in `y` data or a single positive number if all uncertainties are the same. `sigmay` determines the weighting in the least squares minimization. Leaving `sigmay=None` uses no weighting and is equivalent to `sigmay=1`. relsigma : bool, optional If `relsigma` is True, the residuals are used to scale the covariance matrix. Use this option if you do not know the absolute uncertainties (`sigmay`) in the data but still want a covariance matrix whose entries give meaningful estimates of the uncertainties in the fitting parameters `a` and `b` (from `f = a + bx`). If `relsigma` is False, the covariance matrix is calculated (provided `cov` = True) using sigmay assuming sigmay represents absolute undertainties. cov : bool, optional If True, calculate and return the 2x2 covarience matrix of the fitting parameters. chisq : bool, optional If True, calculate and return redchisq. residuals : bool, optional If True, calculate and return residuals. Returns ------- fit : array([a,b]) ndarray of floats The best fit model parameters `a` (the slope) and `b` (the `y`-intercept) for the input data arrays `x_true` and `y` cvm : array, shape (2,2) : returned only if cov=True Covarience matrix of the fitting parameters. Diagonal elements are estimated variances of the fitting parameters a and b; square roots of the diagonal elements thus provide estimates of the uncertainties in the fitting parameters `a` and `b`. Off diagonal elements (equal to each other) are the covarience between the fitting parameters `a` and `b`. redchisq : float : returned only if chisq=True Reduced chi-squared goodness of fit parameter. residuals : ndarray of floats : returned only if residuals=True Length n array of the differences `y-(ax+b)` between `y`-data and the fitted data `ax + b`. Raises ------ TypeError : if `x_true` and `y` have different lengths TypeError : If `x_true` and `y` have 2 or fewer elements TypeError : If `sigmay` length is not 1 or the same as `y` See Also -------- polyfit : Least squares fit to polynomial. linalg.lstsq : Least-squares solution to a linear matrix equation. Notes ----- By default, ``linfit`` returns optimal fitting parameters `a` and `b` without weighting of the data. In that case, linfit minimizes the squared error .. math :: E = \\sum_{i=0}^n [y_i - (a x_i + b)]^2 If `sigmay` is set equal to the uncertainties in the `y` data points, then linfit minimizes the `chi-squared` sum .. math :: \chi^2 = \\sum_{i=0}^n \\left[ \\frac{y_i-(a x_i + b)}{\\sigma_i} \\right]^2 where :math:`\sigma_i` is given by `sigmay`, the "error" or standard deviation of :math:`y_i`. `sigmay` can be either a single number that gives the uncertainty for all elements of `y`, or it can be an array of the same length as `y` that gives the "error" for each element of `y`. `redchisq` is :math:`\chi^2/(n-2)` where :math:`n` is the number of data points (the length of `x_true` or `y`). If `relsigma` is False, then the uncertainties `sigmay` in `y` are assumed to be the absolute one-standard-deviation uncertainties in `y`. In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` provides a measure of the goodness of the fit. If it is near 1, then the linear fitting model is considered to be good and the values of the covariance matrix are appropriately scaled. In particular, the square root of the diagonal elements of the covariance matrix give the estimated uncertainty in the fitting parameters `a` and `b`. See Refernece [2] below for more information. If `relsigma` is True, then the uncertainties `sigmay` in `y` are considered to be only relative uncertainties. They are used to weight the data for the fit, but in this case, the covariance matrix is rescaled using the residuals between the fit and the data. In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` does not provide a measure of the goodness of the fit. Nevertheless, the diagonal elements of the rescaled covariance matrix (returned by linfit) give the estimated uncertainty in the fitting parameters `a` and `b`. The covariance matrix is a 2x2 symmetric matrix where the diagonal elements are the variance of the fitting parameters. Their square roots provide estimates of the uncertainties in the fitting parameters. The off-diagonal elements are equal and give the cross correlation between the two fitting parameters `a` and `b`. linfit runs faster, by a factor of 2 to 3, if calculation of the residuals is suppressed letting `cov`, `chisq`, and `residuals` remain False (the default setting). Fitting a straight line to a single set of `(x_true, y)` data using ``linfit`` is typically 2 to 10 times faster than using either ``polyfit`` or ``linalg.lstsq``, especially when weighting is used and for very large data sets. References ---------- .. [1] An Introduction to Error Analysis, 2nd Ed. by John R. Taylor (University Science Books, 1997) .. [2] Numerical Recipes, The Art of Scientific Computing, 3rd Edition by W.H. Press, S. A. Teukolsky, W. T. Vetterling, & B. P. Flannery (Cambridge University Press, 2007) Examples -------- Fit a line, `y = ax + b`, through some noisy `(x_true, y)` data-points without any weighting (`sigmay` = None) to obtain fitting parameters `a` and `b`: >>> x_true = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) >>> fit = linfit(x_true, y) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 1.00, b = -0.95 Setting `cov` = True in the input, returns the covariance matrix `cvm`. When uncertainties `sigmay` are left unspecified, meaningful estimates of the uncertainties `da` and `db` in the fitting parameters `a` and `b` are given by the square roots of the diagonals of the covariance matrix `cvm`, provided `relsigma` = True (the default state). >>> fit, cvm = linfit(x_true, y, cov=True) >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.07, db = 0.13 A better practice is to supply estimates of the uncertainties in the input argument `sigmay`. `sigmay` can be a single float, if the uncertainties are the same for all data points, or it can be an array, if the uncertainties for different data points are different. Here we enter sigmay as an array. >>> dy = np.array([0.18, 0.13, 0.15, 0.17]) >>> fit, cvm, redchisq, resids = linfit(x_true, y, cov=True, sigmay=dy, relsigma=False, chisq=True, residuals=True) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 0.98, b = -0.91 >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.08, db = 0.14 >>> print("reduced chi-squared = {0:0.2f}".format(redchisq)) reduced chi-squared = 1.21 >>> print(resids) [-0.08856653 0.12781099 -0.1558115 0.06056602] The value of reduced chi-squared `redchisq` is 1.21 indicating that a linear model is valid for these data. The residuals :math:`y_i - (a+bx_i)` are given by the output `resids`. If absolute estimates of the uncertainties are not available, but relative estimates of the uncertainties are known, a fit can be obtained with reasonable estimates of the uncertainties in the fitting parameters by setting `relsigma` = True. >>> dy = np.array([1.0, 0.75, 0.75, 1.25]) >>> fit, cvm, redchisq = linfit(x_true, y, cov=True, sigmay=dy, relsigma=True, chisq=True) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 0.97, b = -0.91 >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.09, db = 0.16 >>> print("reduced chi-squared = {0:0.2f}".format(redchisq)) reduced chi-squared = 0.04 In this case, the value `redchisq` is meaningless, because only the relative, rather than the absolute uncertainties are known. Nevertheless, by setting `relsigma` = True, reasonable estimates for the uncertainties in the fitting parameters are obtained. Illustration: .. image:: example.png :scale: 75 % """ x_true = asarray(x_true) y = asarray(y) if x_true.size != y.size: raise TypeError('Expected x_true and y to have same length') if x_true.size <= 2: raise TypeError('Expected x_true and y length > 2') if sigmay is None: sigmay = 1.0 sigmay = asarray(sigmay) if sigmay.size == 1: sigy = float(sigmay) # convert 0-d array to a float wt = 1./(sigy*sigy) s = wt * y.size sx = wt * x_true.sum() sy = wt * y.sum() t = x_true-sx/s stt = wt * (t*t).sum() slope = wt * (t*y).sum()/stt yint = (sy - sx * slope)/s else: if sigmay.size != y.size: raise TypeError('Expected sigmay size to be 1 or same as y') wt = 1./(sigmay*sigmay) s = wt.sum() sx = (x_true*wt).sum() sy = (y*wt).sum() t = (x_true-sx/s)/sigmay stt = (t*t).sum() slope = (t*y/sigmay).sum()/stt yint = (sy - sx * slope)/s returns = array([slope, yint]) if cov is True: cvm00 = 1./stt cvm01 = -sx/(s*stt) cvm11 = (1.0-sx*cvm01)/s if relsigma is True: redchisq, resids = _resids(x_true, y, sigmay, slope, yint) cvm00 *= redchisq cvm01 *= redchisq cvm11 *= redchisq returns = [returns] + [array([[cvm00, cvm01], [cvm01, cvm11]])] if residuals or chisq is True: if relsigma is False: redchisq, resids = _resids(x_true, y, sigmay, slope, yint) if type(returns) is not list: returns = [returns] if chisq is True: returns += [redchisq] if residuals is True: returns += [resids] return returns
[ "def", "linfit", "(", "x_true", ",", "y", ",", "sigmay", "=", "None", ",", "relsigma", "=", "True", ",", "cov", "=", "False", ",", "chisq", "=", "False", ",", "residuals", "=", "False", ")", ":", "x_true", "=", "asarray", "(", "x_true", ")", "y", "=", "asarray", "(", "y", ")", "if", "x_true", ".", "size", "!=", "y", ".", "size", ":", "raise", "TypeError", "(", "'Expected x_true and y to have same length'", ")", "if", "x_true", ".", "size", "<=", "2", ":", "raise", "TypeError", "(", "'Expected x_true and y length > 2'", ")", "if", "sigmay", "is", "None", ":", "sigmay", "=", "1.0", "sigmay", "=", "asarray", "(", "sigmay", ")", "if", "sigmay", ".", "size", "==", "1", ":", "sigy", "=", "float", "(", "sigmay", ")", "# convert 0-d array to a float", "wt", "=", "1.", "/", "(", "sigy", "*", "sigy", ")", "s", "=", "wt", "*", "y", ".", "size", "sx", "=", "wt", "*", "x_true", ".", "sum", "(", ")", "sy", "=", "wt", "*", "y", ".", "sum", "(", ")", "t", "=", "x_true", "-", "sx", "/", "s", "stt", "=", "wt", "*", "(", "t", "*", "t", ")", ".", "sum", "(", ")", "slope", "=", "wt", "*", "(", "t", "*", "y", ")", ".", "sum", "(", ")", "/", "stt", "yint", "=", "(", "sy", "-", "sx", "*", "slope", ")", "/", "s", "else", ":", "if", "sigmay", ".", "size", "!=", "y", ".", "size", ":", "raise", "TypeError", "(", "'Expected sigmay size to be 1 or same as y'", ")", "wt", "=", "1.", "/", "(", "sigmay", "*", "sigmay", ")", "s", "=", "wt", ".", "sum", "(", ")", "sx", "=", "(", "x_true", "*", "wt", ")", ".", "sum", "(", ")", "sy", "=", "(", "y", "*", "wt", ")", ".", "sum", "(", ")", "t", "=", "(", "x_true", "-", "sx", "/", "s", ")", "/", "sigmay", "stt", "=", "(", "t", "*", "t", ")", ".", "sum", "(", ")", "slope", "=", "(", "t", "*", "y", "/", "sigmay", ")", ".", "sum", "(", ")", "/", "stt", "yint", "=", "(", "sy", "-", "sx", "*", "slope", ")", "/", "s", "returns", "=", "array", "(", "[", "slope", ",", "yint", "]", ")", "if", "cov", "is", "True", ":", "cvm00", "=", "1.", "/", "stt", "cvm01", "=", "-", "sx", "/", "(", "s", "*", "stt", ")", "cvm11", "=", "(", "1.0", "-", "sx", "*", "cvm01", ")", "/", "s", "if", "relsigma", "is", "True", ":", "redchisq", ",", "resids", "=", "_resids", "(", "x_true", ",", "y", ",", "sigmay", ",", "slope", ",", "yint", ")", "cvm00", "*=", "redchisq", "cvm01", "*=", "redchisq", "cvm11", "*=", "redchisq", "returns", "=", "[", "returns", "]", "+", "[", "array", "(", "[", "[", "cvm00", ",", "cvm01", "]", ",", "[", "cvm01", ",", "cvm11", "]", "]", ")", "]", "if", "residuals", "or", "chisq", "is", "True", ":", "if", "relsigma", "is", "False", ":", "redchisq", ",", "resids", "=", "_resids", "(", "x_true", ",", "y", ",", "sigmay", ",", "slope", ",", "yint", ")", "if", "type", "(", "returns", ")", "is", "not", "list", ":", "returns", "=", "[", "returns", "]", "if", "chisq", "is", "True", ":", "returns", "+=", "[", "redchisq", "]", "if", "residuals", "is", "True", ":", "returns", "+=", "[", "resids", "]", "return", "returns" ]
Least squares linear fit. Fit a straight line `f(x_true) = a + bx` to points `(x_true, y)`. Returns coefficients `a` and `b` that minimize the squared error. Parameters ---------- x_true : array_like one dimensional array of `x_true` data with `n`>2 data points. y : array_like one dimensional array of `y` data with `n`>2 data points. sigmay : NoneType or float or array_like, optional one dimensional array of uncertainties (errors) in `y` data or a single positive number if all uncertainties are the same. `sigmay` determines the weighting in the least squares minimization. Leaving `sigmay=None` uses no weighting and is equivalent to `sigmay=1`. relsigma : bool, optional If `relsigma` is True, the residuals are used to scale the covariance matrix. Use this option if you do not know the absolute uncertainties (`sigmay`) in the data but still want a covariance matrix whose entries give meaningful estimates of the uncertainties in the fitting parameters `a` and `b` (from `f = a + bx`). If `relsigma` is False, the covariance matrix is calculated (provided `cov` = True) using sigmay assuming sigmay represents absolute undertainties. cov : bool, optional If True, calculate and return the 2x2 covarience matrix of the fitting parameters. chisq : bool, optional If True, calculate and return redchisq. residuals : bool, optional If True, calculate and return residuals. Returns ------- fit : array([a,b]) ndarray of floats The best fit model parameters `a` (the slope) and `b` (the `y`-intercept) for the input data arrays `x_true` and `y` cvm : array, shape (2,2) : returned only if cov=True Covarience matrix of the fitting parameters. Diagonal elements are estimated variances of the fitting parameters a and b; square roots of the diagonal elements thus provide estimates of the uncertainties in the fitting parameters `a` and `b`. Off diagonal elements (equal to each other) are the covarience between the fitting parameters `a` and `b`. redchisq : float : returned only if chisq=True Reduced chi-squared goodness of fit parameter. residuals : ndarray of floats : returned only if residuals=True Length n array of the differences `y-(ax+b)` between `y`-data and the fitted data `ax + b`. Raises ------ TypeError : if `x_true` and `y` have different lengths TypeError : If `x_true` and `y` have 2 or fewer elements TypeError : If `sigmay` length is not 1 or the same as `y` See Also -------- polyfit : Least squares fit to polynomial. linalg.lstsq : Least-squares solution to a linear matrix equation. Notes ----- By default, ``linfit`` returns optimal fitting parameters `a` and `b` without weighting of the data. In that case, linfit minimizes the squared error .. math :: E = \\sum_{i=0}^n [y_i - (a x_i + b)]^2 If `sigmay` is set equal to the uncertainties in the `y` data points, then linfit minimizes the `chi-squared` sum .. math :: \chi^2 = \\sum_{i=0}^n \\left[ \\frac{y_i-(a x_i + b)}{\\sigma_i} \\right]^2 where :math:`\sigma_i` is given by `sigmay`, the "error" or standard deviation of :math:`y_i`. `sigmay` can be either a single number that gives the uncertainty for all elements of `y`, or it can be an array of the same length as `y` that gives the "error" for each element of `y`. `redchisq` is :math:`\chi^2/(n-2)` where :math:`n` is the number of data points (the length of `x_true` or `y`). If `relsigma` is False, then the uncertainties `sigmay` in `y` are assumed to be the absolute one-standard-deviation uncertainties in `y`. In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` provides a measure of the goodness of the fit. If it is near 1, then the linear fitting model is considered to be good and the values of the covariance matrix are appropriately scaled. In particular, the square root of the diagonal elements of the covariance matrix give the estimated uncertainty in the fitting parameters `a` and `b`. See Refernece [2] below for more information. If `relsigma` is True, then the uncertainties `sigmay` in `y` are considered to be only relative uncertainties. They are used to weight the data for the fit, but in this case, the covariance matrix is rescaled using the residuals between the fit and the data. In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` does not provide a measure of the goodness of the fit. Nevertheless, the diagonal elements of the rescaled covariance matrix (returned by linfit) give the estimated uncertainty in the fitting parameters `a` and `b`. The covariance matrix is a 2x2 symmetric matrix where the diagonal elements are the variance of the fitting parameters. Their square roots provide estimates of the uncertainties in the fitting parameters. The off-diagonal elements are equal and give the cross correlation between the two fitting parameters `a` and `b`. linfit runs faster, by a factor of 2 to 3, if calculation of the residuals is suppressed letting `cov`, `chisq`, and `residuals` remain False (the default setting). Fitting a straight line to a single set of `(x_true, y)` data using ``linfit`` is typically 2 to 10 times faster than using either ``polyfit`` or ``linalg.lstsq``, especially when weighting is used and for very large data sets. References ---------- .. [1] An Introduction to Error Analysis, 2nd Ed. by John R. Taylor (University Science Books, 1997) .. [2] Numerical Recipes, The Art of Scientific Computing, 3rd Edition by W.H. Press, S. A. Teukolsky, W. T. Vetterling, & B. P. Flannery (Cambridge University Press, 2007) Examples -------- Fit a line, `y = ax + b`, through some noisy `(x_true, y)` data-points without any weighting (`sigmay` = None) to obtain fitting parameters `a` and `b`: >>> x_true = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) >>> fit = linfit(x_true, y) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 1.00, b = -0.95 Setting `cov` = True in the input, returns the covariance matrix `cvm`. When uncertainties `sigmay` are left unspecified, meaningful estimates of the uncertainties `da` and `db` in the fitting parameters `a` and `b` are given by the square roots of the diagonals of the covariance matrix `cvm`, provided `relsigma` = True (the default state). >>> fit, cvm = linfit(x_true, y, cov=True) >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.07, db = 0.13 A better practice is to supply estimates of the uncertainties in the input argument `sigmay`. `sigmay` can be a single float, if the uncertainties are the same for all data points, or it can be an array, if the uncertainties for different data points are different. Here we enter sigmay as an array. >>> dy = np.array([0.18, 0.13, 0.15, 0.17]) >>> fit, cvm, redchisq, resids = linfit(x_true, y, cov=True, sigmay=dy, relsigma=False, chisq=True, residuals=True) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 0.98, b = -0.91 >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.08, db = 0.14 >>> print("reduced chi-squared = {0:0.2f}".format(redchisq)) reduced chi-squared = 1.21 >>> print(resids) [-0.08856653 0.12781099 -0.1558115 0.06056602] The value of reduced chi-squared `redchisq` is 1.21 indicating that a linear model is valid for these data. The residuals :math:`y_i - (a+bx_i)` are given by the output `resids`. If absolute estimates of the uncertainties are not available, but relative estimates of the uncertainties are known, a fit can be obtained with reasonable estimates of the uncertainties in the fitting parameters by setting `relsigma` = True. >>> dy = np.array([1.0, 0.75, 0.75, 1.25]) >>> fit, cvm, redchisq = linfit(x_true, y, cov=True, sigmay=dy, relsigma=True, chisq=True) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 0.97, b = -0.91 >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.09, db = 0.16 >>> print("reduced chi-squared = {0:0.2f}".format(redchisq)) reduced chi-squared = 0.04 In this case, the value `redchisq` is meaningless, because only the relative, rather than the absolute uncertainties are known. Nevertheless, by setting `relsigma` = True, reasonable estimates for the uncertainties in the fitting parameters are obtained. Illustration: .. image:: example.png :scale: 75 %
[ "Least", "squares", "linear", "fit", ".", "Fit", "a", "straight", "line", "f", "(", "x_true", ")", "=", "a", "+", "bx", "to", "points", "(", "x_true", "y", ")", ".", "Returns", "coefficients", "a", "and", "b", "that", "minimize", "the", "squared", "error", ".", "Parameters", "----------", "x_true", ":", "array_like", "one", "dimensional", "array", "of", "x_true", "data", "with", "n", ">", "2", "data", "points", ".", "y", ":", "array_like", "one", "dimensional", "array", "of", "y", "data", "with", "n", ">", "2", "data", "points", ".", "sigmay", ":", "NoneType", "or", "float", "or", "array_like", "optional", "one", "dimensional", "array", "of", "uncertainties", "(", "errors", ")", "in", "y", "data", "or", "a", "single", "positive", "number", "if", "all", "uncertainties", "are", "the", "same", ".", "sigmay", "determines", "the", "weighting", "in", "the", "least", "squares", "minimization", ".", "Leaving", "sigmay", "=", "None", "uses", "no", "weighting", "and", "is", "equivalent", "to", "sigmay", "=", "1", ".", "relsigma", ":", "bool", "optional", "If", "relsigma", "is", "True", "the", "residuals", "are", "used", "to", "scale", "the", "covariance", "matrix", ".", "Use", "this", "option", "if", "you", "do", "not", "know", "the", "absolute", "uncertainties", "(", "sigmay", ")", "in", "the", "data", "but", "still", "want", "a", "covariance", "matrix", "whose", "entries", "give", "meaningful", "estimates", "of", "the", "uncertainties", "in", "the", "fitting", "parameters", "a", "and", "b", "(", "from", "f", "=", "a", "+", "bx", ")", ".", "If", "relsigma", "is", "False", "the", "covariance", "matrix", "is", "calculated", "(", "provided", "cov", "=", "True", ")", "using", "sigmay", "assuming", "sigmay", "represents", "absolute", "undertainties", ".", "cov", ":", "bool", "optional", "If", "True", "calculate", "and", "return", "the", "2x2", "covarience", "matrix", "of", "the", "fitting", "parameters", ".", "chisq", ":", "bool", "optional", "If", "True", "calculate", "and", "return", "redchisq", ".", "residuals", ":", "bool", "optional", "If", "True", "calculate", "and", "return", "residuals", ".", "Returns", "-------", "fit", ":", "array", "(", "[", "a", "b", "]", ")", "ndarray", "of", "floats", "The", "best", "fit", "model", "parameters", "a", "(", "the", "slope", ")", "and", "b", "(", "the", "y", "-", "intercept", ")", "for", "the", "input", "data", "arrays", "x_true", "and", "y", "cvm", ":", "array", "shape", "(", "2", "2", ")", ":", "returned", "only", "if", "cov", "=", "True", "Covarience", "matrix", "of", "the", "fitting", "parameters", ".", "Diagonal", "elements", "are", "estimated", "variances", "of", "the", "fitting", "parameters", "a", "and", "b", ";", "square", "roots", "of", "the", "diagonal", "elements", "thus", "provide", "estimates", "of", "the", "uncertainties", "in", "the", "fitting", "parameters", "a", "and", "b", ".", "Off", "diagonal", "elements", "(", "equal", "to", "each", "other", ")", "are", "the", "covarience", "between", "the", "fitting", "parameters", "a", "and", "b", ".", "redchisq", ":", "float", ":", "returned", "only", "if", "chisq", "=", "True", "Reduced", "chi", "-", "squared", "goodness", "of", "fit", "parameter", ".", "residuals", ":", "ndarray", "of", "floats", ":", "returned", "only", "if", "residuals", "=", "True", "Length", "n", "array", "of", "the", "differences", "y", "-", "(", "ax", "+", "b", ")", "between", "y", "-", "data", "and", "the", "fitted", "data", "ax", "+", "b", ".", "Raises", "------", "TypeError", ":", "if", "x_true", "and", "y", "have", "different", "lengths", "TypeError", ":", "If", "x_true", "and", "y", "have", "2", "or", "fewer", "elements", "TypeError", ":", "If", "sigmay", "length", "is", "not", "1", "or", "the", "same", "as", "y", "See", "Also", "--------", "polyfit", ":", "Least", "squares", "fit", "to", "polynomial", ".", "linalg", ".", "lstsq", ":", "Least", "-", "squares", "solution", "to", "a", "linear", "matrix", "equation", ".", "Notes", "-----", "By", "default", "linfit", "returns", "optimal", "fitting", "parameters", "a", "and", "b", "without", "weighting", "of", "the", "data", ".", "In", "that", "case", "linfit", "minimizes", "the", "squared", "error", "..", "math", "::", "E", "=", "\\\\", "sum_", "{", "i", "=", "0", "}", "^n", "[", "y_i", "-", "(", "a", "x_i", "+", "b", ")", "]", "^2", "If", "sigmay", "is", "set", "equal", "to", "the", "uncertainties", "in", "the", "y", "data", "points", "then", "linfit", "minimizes", "the", "chi", "-", "squared", "sum", "..", "math", "::", "\\", "chi^2", "=", "\\\\", "sum_", "{", "i", "=", "0", "}", "^n", "\\\\", "left", "[", "\\\\", "frac", "{", "y_i", "-", "(", "a", "x_i", "+", "b", ")", "}", "{", "\\\\", "sigma_i", "}", "\\\\", "right", "]", "^2", "where", ":", "math", ":", "\\", "sigma_i", "is", "given", "by", "sigmay", "the", "error", "or", "standard", "deviation", "of", ":", "math", ":", "y_i", ".", "sigmay", "can", "be", "either", "a", "single", "number", "that", "gives", "the", "uncertainty", "for", "all", "elements", "of", "y", "or", "it", "can", "be", "an", "array", "of", "the", "same", "length", "as", "y", "that", "gives", "the", "error", "for", "each", "element", "of", "y", ".", "redchisq", "is", ":", "math", ":", "\\", "chi^2", "/", "(", "n", "-", "2", ")", "where", ":", "math", ":", "n", "is", "the", "number", "of", "data", "points", "(", "the", "length", "of", "x_true", "or", "y", ")", ".", "If", "relsigma", "is", "False", "then", "the", "uncertainties", "sigmay", "in", "y", "are", "assumed", "to", "be", "the", "absolute", "one", "-", "standard", "-", "deviation", "uncertainties", "in", "y", ".", "In", "this", "case", "the", "reduced", "chi", "-", "squared", "value", ":", "math", ":", "\\", "chi^2", "/", "(", "n", "-", "2", ")", "provides", "a", "measure", "of", "the", "goodness", "of", "the", "fit", ".", "If", "it", "is", "near", "1", "then", "the", "linear", "fitting", "model", "is", "considered", "to", "be", "good", "and", "the", "values", "of", "the", "covariance", "matrix", "are", "appropriately", "scaled", ".", "In", "particular", "the", "square", "root", "of", "the", "diagonal", "elements", "of", "the", "covariance", "matrix", "give", "the", "estimated", "uncertainty", "in", "the", "fitting", "parameters", "a", "and", "b", ".", "See", "Refernece", "[", "2", "]", "below", "for", "more", "information", ".", "If", "relsigma", "is", "True", "then", "the", "uncertainties", "sigmay", "in", "y", "are", "considered", "to", "be", "only", "relative", "uncertainties", ".", "They", "are", "used", "to", "weight", "the", "data", "for", "the", "fit", "but", "in", "this", "case", "the", "covariance", "matrix", "is", "rescaled", "using", "the", "residuals", "between", "the", "fit", "and", "the", "data", ".", "In", "this", "case", "the", "reduced", "chi", "-", "squared", "value", ":", "math", ":", "\\", "chi^2", "/", "(", "n", "-", "2", ")", "does", "not", "provide", "a", "measure", "of", "the", "goodness", "of", "the", "fit", ".", "Nevertheless", "the", "diagonal", "elements", "of", "the", "rescaled", "covariance", "matrix", "(", "returned", "by", "linfit", ")", "give", "the", "estimated", "uncertainty", "in", "the", "fitting", "parameters", "a", "and", "b", ".", "The", "covariance", "matrix", "is", "a", "2x2", "symmetric", "matrix", "where", "the", "diagonal", "elements", "are", "the", "variance", "of", "the", "fitting", "parameters", ".", "Their", "square", "roots", "provide", "estimates", "of", "the", "uncertainties", "in", "the", "fitting", "parameters", ".", "The", "off", "-", "diagonal", "elements", "are", "equal", "and", "give", "the", "cross", "correlation", "between", "the", "two", "fitting", "parameters", "a", "and", "b", ".", "linfit", "runs", "faster", "by", "a", "factor", "of", "2", "to", "3", "if", "calculation", "of", "the", "residuals", "is", "suppressed", "letting", "cov", "chisq", "and", "residuals", "remain", "False", "(", "the", "default", "setting", ")", ".", "Fitting", "a", "straight", "line", "to", "a", "single", "set", "of", "(", "x_true", "y", ")", "data", "using", "linfit", "is", "typically", "2", "to", "10", "times", "faster", "than", "using", "either", "polyfit", "or", "linalg", ".", "lstsq", "especially", "when", "weighting", "is", "used", "and", "for", "very", "large", "data", "sets", ".", "References", "----------", "..", "[", "1", "]", "An", "Introduction", "to", "Error", "Analysis", "2nd", "Ed", ".", "by", "John", "R", ".", "Taylor", "(", "University", "Science", "Books", "1997", ")", "..", "[", "2", "]", "Numerical", "Recipes", "The", "Art", "of", "Scientific", "Computing", "3rd", "Edition", "by", "W", ".", "H", ".", "Press", "S", ".", "A", ".", "Teukolsky", "W", ".", "T", ".", "Vetterling", "&", "B", ".", "P", ".", "Flannery", "(", "Cambridge", "University", "Press", "2007", ")", "Examples", "--------", "Fit", "a", "line", "y", "=", "ax", "+", "b", "through", "some", "noisy", "(", "x_true", "y", ")", "data", "-", "points", "without", "any", "weighting", "(", "sigmay", "=", "None", ")", "to", "obtain", "fitting", "parameters", "a", "and", "b", ":", ">>>", "x_true", "=", "np", ".", "array", "(", "[", "0", "1", "2", "3", "]", ")", ">>>", "y", "=", "np", ".", "array", "(", "[", "-", "1", "0", ".", "2", "0", ".", "9", "2", ".", "1", "]", ")", ">>>", "fit", "=", "linfit", "(", "x_true", "y", ")", ">>>", "print", "(", "a", "=", "{", "0", ":", "0", ".", "2f", "}", "b", "=", "{", "1", ":", "0", ".", "2f", "}", ".", "format", "(", "fit", "[", "0", "]", "fit", "[", "1", "]", "))", "a", "=", "1", ".", "00", "b", "=", "-", "0", ".", "95", "Setting", "cov", "=", "True", "in", "the", "input", "returns", "the", "covariance", "matrix", "cvm", ".", "When", "uncertainties", "sigmay", "are", "left", "unspecified", "meaningful", "estimates", "of", "the", "uncertainties", "da", "and", "db", "in", "the", "fitting", "parameters", "a", "and", "b", "are", "given", "by", "the", "square", "roots", "of", "the", "diagonals", "of", "the", "covariance", "matrix", "cvm", "provided", "relsigma", "=", "True", "(", "the", "default", "state", ")", ".", ">>>", "fit", "cvm", "=", "linfit", "(", "x_true", "y", "cov", "=", "True", ")", ">>>", "dfit", "=", "[", "np", ".", "sqrt", "(", "cvm", "[", "i", "i", "]", ")", "for", "i", "in", "range", "(", "2", ")", "]", ">>>", "print", "(", "da", "=", "{", "0", ":", "0", ".", "2f", "}", "db", "=", "{", "1", ":", "0", ".", "2f", "}", ".", "format", "(", "dfit", "[", "0", "]", "dfit", "[", "1", "]", "))", "da", "=", "0", ".", "07", "db", "=", "0", ".", "13", "A", "better", "practice", "is", "to", "supply", "estimates", "of", "the", "uncertainties", "in", "the", "input", "argument", "sigmay", ".", "sigmay", "can", "be", "a", "single", "float", "if", "the", "uncertainties", "are", "the", "same", "for", "all", "data", "points", "or", "it", "can", "be", "an", "array", "if", "the", "uncertainties", "for", "different", "data", "points", "are", "different", ".", "Here", "we", "enter", "sigmay", "as", "an", "array", ".", ">>>", "dy", "=", "np", ".", "array", "(", "[", "0", ".", "18", "0", ".", "13", "0", ".", "15", "0", ".", "17", "]", ")", ">>>", "fit", "cvm", "redchisq", "resids", "=", "linfit", "(", "x_true", "y", "cov", "=", "True", "sigmay", "=", "dy", "relsigma", "=", "False", "chisq", "=", "True", "residuals", "=", "True", ")", ">>>", "print", "(", "a", "=", "{", "0", ":", "0", ".", "2f", "}", "b", "=", "{", "1", ":", "0", ".", "2f", "}", ".", "format", "(", "fit", "[", "0", "]", "fit", "[", "1", "]", "))", "a", "=", "0", ".", "98", "b", "=", "-", "0", ".", "91", ">>>", "dfit", "=", "[", "np", ".", "sqrt", "(", "cvm", "[", "i", "i", "]", ")", "for", "i", "in", "range", "(", "2", ")", "]", ">>>", "print", "(", "da", "=", "{", "0", ":", "0", ".", "2f", "}", "db", "=", "{", "1", ":", "0", ".", "2f", "}", ".", "format", "(", "dfit", "[", "0", "]", "dfit", "[", "1", "]", "))", "da", "=", "0", ".", "08", "db", "=", "0", ".", "14", ">>>", "print", "(", "reduced", "chi", "-", "squared", "=", "{", "0", ":", "0", ".", "2f", "}", ".", "format", "(", "redchisq", "))", "reduced", "chi", "-", "squared", "=", "1", ".", "21", ">>>", "print", "(", "resids", ")", "[", "-", "0", ".", "08856653", "0", ".", "12781099", "-", "0", ".", "1558115", "0", ".", "06056602", "]", "The", "value", "of", "reduced", "chi", "-", "squared", "redchisq", "is", "1", ".", "21", "indicating", "that", "a", "linear", "model", "is", "valid", "for", "these", "data", ".", "The", "residuals", ":", "math", ":", "y_i", "-", "(", "a", "+", "bx_i", ")", "are", "given", "by", "the", "output", "resids", ".", "If", "absolute", "estimates", "of", "the", "uncertainties", "are", "not", "available", "but", "relative", "estimates", "of", "the", "uncertainties", "are", "known", "a", "fit", "can", "be", "obtained", "with", "reasonable", "estimates", "of", "the", "uncertainties", "in", "the", "fitting", "parameters", "by", "setting", "relsigma", "=", "True", ".", ">>>", "dy", "=", "np", ".", "array", "(", "[", "1", ".", "0", "0", ".", "75", "0", ".", "75", "1", ".", "25", "]", ")", ">>>", "fit", "cvm", "redchisq", "=", "linfit", "(", "x_true", "y", "cov", "=", "True", "sigmay", "=", "dy", "relsigma", "=", "True", "chisq", "=", "True", ")", ">>>", "print", "(", "a", "=", "{", "0", ":", "0", ".", "2f", "}", "b", "=", "{", "1", ":", "0", ".", "2f", "}", ".", "format", "(", "fit", "[", "0", "]", "fit", "[", "1", "]", "))", "a", "=", "0", ".", "97", "b", "=", "-", "0", ".", "91", ">>>", "dfit", "=", "[", "np", ".", "sqrt", "(", "cvm", "[", "i", "i", "]", ")", "for", "i", "in", "range", "(", "2", ")", "]", ">>>", "print", "(", "da", "=", "{", "0", ":", "0", ".", "2f", "}", "db", "=", "{", "1", ":", "0", ".", "2f", "}", ".", "format", "(", "dfit", "[", "0", "]", "dfit", "[", "1", "]", "))", "da", "=", "0", ".", "09", "db", "=", "0", ".", "16", ">>>", "print", "(", "reduced", "chi", "-", "squared", "=", "{", "0", ":", "0", ".", "2f", "}", ".", "format", "(", "redchisq", "))", "reduced", "chi", "-", "squared", "=", "0", ".", "04", "In", "this", "case", "the", "value", "redchisq", "is", "meaningless", "because", "only", "the", "relative", "rather", "than", "the", "absolute", "uncertainties", "are", "known", ".", "Nevertheless", "by", "setting", "relsigma", "=", "True", "reasonable", "estimates", "for", "the", "uncertainties", "in", "the", "fitting", "parameters", "are", "obtained", ".", "Illustration", ":", "..", "image", "::", "example", ".", "png", ":", "scale", ":", "75", "%" ]
train
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Math_Libraries/fitting_methods.py#L53-L305