code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def gelman_rubin(x, return_var=False):
""" Returns estimate of R for a set of traces.
The Gelman-Rubin diagnostic tests for lack of convergence by comparing
the variance between multiple chains to the variance within each chain.
If convergence has been achieved, the between-chain and within-chain
variances should be identical. To be most effective in detecting evidence
for nonconvergence, each chain should have been initialized to starting
values that are dispersed relative to the target distribution.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
return_var : bool
Flag for returning the marginal posterior variance instead of R-hat (defaults of False).
Returns
-------
Rhat : float
Return the potential scale reduction factor, :math:`\hat{R}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{R} = \sqrt{\frac{\hat{V}}{W}}
where :math:`W` is the within-chain variance and :math:`\hat{V}` is
the posterior variance estimate for the pooled traces. This is the
potential scale reduction factor, which converges to unity when each
of the traces is a sample from the target posterior. Values greater
than one indicate that one or more chains have not yet converged.
References
----------
Brooks and Gelman (1998)
Gelman and Rubin (1992)"""
if np.shape(x) < (2,):
raise ValueError(
'Gelman-Rubin diagnostic requires multiple chains of the same length.')
try:
m, n = np.shape(x)
except ValueError:
return [gelman_rubin(np.transpose(y)) for y in np.transpose(x)]
# Calculate between-chain variance
B_over_n = np.sum((np.mean(x, 1) - np.mean(x)) ** 2) / (m - 1)
# Calculate within-chain variances
W = np.sum(
[(x[i] - xbar) ** 2 for i,
xbar in enumerate(np.mean(x,
1))]) / (m * (n - 1))
# (over) estimate of variance
s2 = W * (n - 1) / n + B_over_n
if return_var:
return s2
# Pooled posterior variance estimate
V = s2 + B_over_n / m
# Calculate PSRF
R = V / W
return np.sqrt(R) | def function[gelman_rubin, parameter[x, return_var]]:
constant[ Returns estimate of R for a set of traces.
The Gelman-Rubin diagnostic tests for lack of convergence by comparing
the variance between multiple chains to the variance within each chain.
If convergence has been achieved, the between-chain and within-chain
variances should be identical. To be most effective in detecting evidence
for nonconvergence, each chain should have been initialized to starting
values that are dispersed relative to the target distribution.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
return_var : bool
Flag for returning the marginal posterior variance instead of R-hat (defaults of False).
Returns
-------
Rhat : float
Return the potential scale reduction factor, :math:`\hat{R}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{R} = \sqrt{rac{\hat{V}}{W}}
where :math:`W` is the within-chain variance and :math:`\hat{V}` is
the posterior variance estimate for the pooled traces. This is the
potential scale reduction factor, which converges to unity when each
of the traces is a sample from the target posterior. Values greater
than one indicate that one or more chains have not yet converged.
References
----------
Brooks and Gelman (1998)
Gelman and Rubin (1992)]
if compare[call[name[np].shape, parameter[name[x]]] less[<] tuple[[<ast.Constant object at 0x7da20c7ca350>]]] begin[:]
<ast.Raise object at 0x7da20c7c9570>
<ast.Try object at 0x7da20c7caa10>
variable[B_over_n] assign[=] binary_operation[call[name[np].sum, parameter[binary_operation[binary_operation[call[name[np].mean, parameter[name[x], constant[1]]] - call[name[np].mean, parameter[name[x]]]] ** constant[2]]]] / binary_operation[name[m] - constant[1]]]
variable[W] assign[=] binary_operation[call[name[np].sum, parameter[<ast.ListComp object at 0x7da20c7c8790>]] / binary_operation[name[m] * binary_operation[name[n] - constant[1]]]]
variable[s2] assign[=] binary_operation[binary_operation[binary_operation[name[W] * binary_operation[name[n] - constant[1]]] / name[n]] + name[B_over_n]]
if name[return_var] begin[:]
return[name[s2]]
variable[V] assign[=] binary_operation[name[s2] + binary_operation[name[B_over_n] / name[m]]]
variable[R] assign[=] binary_operation[name[V] / name[W]]
return[call[name[np].sqrt, parameter[name[R]]]] | keyword[def] identifier[gelman_rubin] ( identifier[x] , identifier[return_var] = keyword[False] ):
literal[string]
keyword[if] identifier[np] . identifier[shape] ( identifier[x] )<( literal[int] ,):
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[try] :
identifier[m] , identifier[n] = identifier[np] . identifier[shape] ( identifier[x] )
keyword[except] identifier[ValueError] :
keyword[return] [ identifier[gelman_rubin] ( identifier[np] . identifier[transpose] ( identifier[y] )) keyword[for] identifier[y] keyword[in] identifier[np] . identifier[transpose] ( identifier[x] )]
identifier[B_over_n] = identifier[np] . identifier[sum] (( identifier[np] . identifier[mean] ( identifier[x] , literal[int] )- identifier[np] . identifier[mean] ( identifier[x] ))** literal[int] )/( identifier[m] - literal[int] )
identifier[W] = identifier[np] . identifier[sum] (
[( identifier[x] [ identifier[i] ]- identifier[xbar] )** literal[int] keyword[for] identifier[i] ,
identifier[xbar] keyword[in] identifier[enumerate] ( identifier[np] . identifier[mean] ( identifier[x] ,
literal[int] ))])/( identifier[m] *( identifier[n] - literal[int] ))
identifier[s2] = identifier[W] *( identifier[n] - literal[int] )/ identifier[n] + identifier[B_over_n]
keyword[if] identifier[return_var] :
keyword[return] identifier[s2]
identifier[V] = identifier[s2] + identifier[B_over_n] / identifier[m]
identifier[R] = identifier[V] / identifier[W]
keyword[return] identifier[np] . identifier[sqrt] ( identifier[R] ) | def gelman_rubin(x, return_var=False):
""" Returns estimate of R for a set of traces.
The Gelman-Rubin diagnostic tests for lack of convergence by comparing
the variance between multiple chains to the variance within each chain.
If convergence has been achieved, the between-chain and within-chain
variances should be identical. To be most effective in detecting evidence
for nonconvergence, each chain should have been initialized to starting
values that are dispersed relative to the target distribution.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
return_var : bool
Flag for returning the marginal posterior variance instead of R-hat (defaults of False).
Returns
-------
Rhat : float
Return the potential scale reduction factor, :math:`\\hat{R}`
Notes
-----
The diagnostic is computed by:
.. math:: \\hat{R} = \\sqrt{\x0crac{\\hat{V}}{W}}
where :math:`W` is the within-chain variance and :math:`\\hat{V}` is
the posterior variance estimate for the pooled traces. This is the
potential scale reduction factor, which converges to unity when each
of the traces is a sample from the target posterior. Values greater
than one indicate that one or more chains have not yet converged.
References
----------
Brooks and Gelman (1998)
Gelman and Rubin (1992)"""
if np.shape(x) < (2,):
raise ValueError('Gelman-Rubin diagnostic requires multiple chains of the same length.') # depends on [control=['if'], data=[]]
try:
(m, n) = np.shape(x) # depends on [control=['try'], data=[]]
except ValueError:
return [gelman_rubin(np.transpose(y)) for y in np.transpose(x)] # depends on [control=['except'], data=[]]
# Calculate between-chain variance
B_over_n = np.sum((np.mean(x, 1) - np.mean(x)) ** 2) / (m - 1)
# Calculate within-chain variances
W = np.sum([(x[i] - xbar) ** 2 for (i, xbar) in enumerate(np.mean(x, 1))]) / (m * (n - 1))
# (over) estimate of variance
s2 = W * (n - 1) / n + B_over_n
if return_var:
return s2 # depends on [control=['if'], data=[]]
# Pooled posterior variance estimate
V = s2 + B_over_n / m
# Calculate PSRF
R = V / W
return np.sqrt(R) |
def convert_md_to_rst(md_path, rst_temp_path):
"""
Convert the contents of a file from Markdown to reStructuredText.
Returns the converted text as a Unicode string.
Arguments:
md_path: a path to a UTF-8 encoded Markdown file to convert.
rst_temp_path: a temporary path to which to write the converted contents.
"""
# Pandoc uses the UTF-8 character encoding for both input and output.
command = "pandoc --write=rst --output=%s %s" % (rst_temp_path, md_path)
print("converting with pandoc: %s to %s\n-->%s" % (md_path, rst_temp_path,
command))
if os.path.exists(rst_temp_path):
os.remove(rst_temp_path)
os.system(command)
if not os.path.exists(rst_temp_path):
s = ("Error running: %s\n"
" Did you install pandoc per the %s docstring?" % (command,
__file__))
sys.exit(s)
return read(rst_temp_path) | def function[convert_md_to_rst, parameter[md_path, rst_temp_path]]:
constant[
Convert the contents of a file from Markdown to reStructuredText.
Returns the converted text as a Unicode string.
Arguments:
md_path: a path to a UTF-8 encoded Markdown file to convert.
rst_temp_path: a temporary path to which to write the converted contents.
]
variable[command] assign[=] binary_operation[constant[pandoc --write=rst --output=%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ad600>, <ast.Name object at 0x7da1b26ac4c0>]]]
call[name[print], parameter[binary_operation[constant[converting with pandoc: %s to %s
-->%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26af8e0>, <ast.Name object at 0x7da1b26ae3e0>, <ast.Name object at 0x7da1b26ad3f0>]]]]]
if call[name[os].path.exists, parameter[name[rst_temp_path]]] begin[:]
call[name[os].remove, parameter[name[rst_temp_path]]]
call[name[os].system, parameter[name[command]]]
if <ast.UnaryOp object at 0x7da1b26af6a0> begin[:]
variable[s] assign[=] binary_operation[constant[Error running: %s
Did you install pandoc per the %s docstring?] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26adc30>, <ast.Name object at 0x7da1b26afca0>]]]
call[name[sys].exit, parameter[name[s]]]
return[call[name[read], parameter[name[rst_temp_path]]]] | keyword[def] identifier[convert_md_to_rst] ( identifier[md_path] , identifier[rst_temp_path] ):
literal[string]
identifier[command] = literal[string] %( identifier[rst_temp_path] , identifier[md_path] )
identifier[print] ( literal[string] %( identifier[md_path] , identifier[rst_temp_path] ,
identifier[command] ))
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[rst_temp_path] ):
identifier[os] . identifier[remove] ( identifier[rst_temp_path] )
identifier[os] . identifier[system] ( identifier[command] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[rst_temp_path] ):
identifier[s] =( literal[string]
literal[string] %( identifier[command] ,
identifier[__file__] ))
identifier[sys] . identifier[exit] ( identifier[s] )
keyword[return] identifier[read] ( identifier[rst_temp_path] ) | def convert_md_to_rst(md_path, rst_temp_path):
"""
Convert the contents of a file from Markdown to reStructuredText.
Returns the converted text as a Unicode string.
Arguments:
md_path: a path to a UTF-8 encoded Markdown file to convert.
rst_temp_path: a temporary path to which to write the converted contents.
"""
# Pandoc uses the UTF-8 character encoding for both input and output.
command = 'pandoc --write=rst --output=%s %s' % (rst_temp_path, md_path)
print('converting with pandoc: %s to %s\n-->%s' % (md_path, rst_temp_path, command))
if os.path.exists(rst_temp_path):
os.remove(rst_temp_path) # depends on [control=['if'], data=[]]
os.system(command)
if not os.path.exists(rst_temp_path):
s = 'Error running: %s\n Did you install pandoc per the %s docstring?' % (command, __file__)
sys.exit(s) # depends on [control=['if'], data=[]]
return read(rst_temp_path) |
def grad(heads, variables, head_grads=None, retain_graph=None, create_graph=False,
train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t variables. Gradients will be
returned as new NDArrays instead of stored into `variable.grad`.
Supports recording gradient graph for computing higher order gradients.
.. note::
Currently only a very limited set of operators support higher order \
gradients.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
variables: NDArray or list of NDArray
Input variables to compute gradients for.
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
retain_graph: bool
Whether to keep computation graph to differentiate again, instead
of clearing history and release memory. Defaults to the same value
as create_graph.
create_graph: bool
Whether to record gradient graph for computing higher order
train_mode: bool, optional
Whether to do backward for training or prediction.
Returns
-------
NDArray or list of NDArray:
Gradients with respect to variables.
Examples
--------
>>> x = mx.nd.ones((1,))
>>> x.attach_grad()
>>> with mx.autograd.record():
... z = mx.nd.elemwise_add(mx.nd.exp(x), x)
>>> dx = mx.autograd.grad(z, [x], create_graph=True)
>>> print(dx)
[
[ 3.71828175]
<NDArray 1 @cpu(0)>]
"""
head_handles, hgrad_handles = _parse_head(heads, head_grads)
if isinstance(variables, NDArray):
variables = [variables]
else:
assert len(variables), "variables cannot be an empty list."
var_handles = c_handle_array(variables)
retain_graph = retain_graph if retain_graph is not None else create_graph
grad_vars = ctypes.POINTER(NDArrayHandle)()
grad_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
len(var_handles),
var_handles,
ctypes.c_int(retain_graph),
ctypes.c_int(create_graph),
ctypes.c_int(train_mode),
ctypes.byref(grad_vars),
ctypes.byref(grad_stypes)))
ret = [_ndarray_cls(ctypes.cast(grad_vars[i], NDArrayHandle),
stype=grad_stypes[i])
for i in range(len(var_handles))]
if isinstance(variables, NDArray):
return ret[0]
return ret | def function[grad, parameter[heads, variables, head_grads, retain_graph, create_graph, train_mode]]:
constant[Compute the gradients of heads w.r.t variables. Gradients will be
returned as new NDArrays instead of stored into `variable.grad`.
Supports recording gradient graph for computing higher order gradients.
.. note::
Currently only a very limited set of operators support higher order gradients.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
variables: NDArray or list of NDArray
Input variables to compute gradients for.
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
retain_graph: bool
Whether to keep computation graph to differentiate again, instead
of clearing history and release memory. Defaults to the same value
as create_graph.
create_graph: bool
Whether to record gradient graph for computing higher order
train_mode: bool, optional
Whether to do backward for training or prediction.
Returns
-------
NDArray or list of NDArray:
Gradients with respect to variables.
Examples
--------
>>> x = mx.nd.ones((1,))
>>> x.attach_grad()
>>> with mx.autograd.record():
... z = mx.nd.elemwise_add(mx.nd.exp(x), x)
>>> dx = mx.autograd.grad(z, [x], create_graph=True)
>>> print(dx)
[
[ 3.71828175]
<NDArray 1 @cpu(0)>]
]
<ast.Tuple object at 0x7da1b1ef1e40> assign[=] call[name[_parse_head], parameter[name[heads], name[head_grads]]]
if call[name[isinstance], parameter[name[variables], name[NDArray]]] begin[:]
variable[variables] assign[=] list[[<ast.Name object at 0x7da1b1ef0640>]]
variable[var_handles] assign[=] call[name[c_handle_array], parameter[name[variables]]]
variable[retain_graph] assign[=] <ast.IfExp object at 0x7da1b1e64490>
variable[grad_vars] assign[=] call[call[name[ctypes].POINTER, parameter[name[NDArrayHandle]]], parameter[]]
variable[grad_stypes] assign[=] call[call[name[ctypes].POINTER, parameter[name[ctypes].c_int]], parameter[]]
call[name[check_call], parameter[call[name[_LIB].MXAutogradBackwardEx, parameter[call[name[len], parameter[name[head_handles]]], name[head_handles], name[hgrad_handles], call[name[len], parameter[name[var_handles]]], name[var_handles], call[name[ctypes].c_int, parameter[name[retain_graph]]], call[name[ctypes].c_int, parameter[name[create_graph]]], call[name[ctypes].c_int, parameter[name[train_mode]]], call[name[ctypes].byref, parameter[name[grad_vars]]], call[name[ctypes].byref, parameter[name[grad_stypes]]]]]]]
variable[ret] assign[=] <ast.ListComp object at 0x7da1b1e645e0>
if call[name[isinstance], parameter[name[variables], name[NDArray]]] begin[:]
return[call[name[ret]][constant[0]]]
return[name[ret]] | keyword[def] identifier[grad] ( identifier[heads] , identifier[variables] , identifier[head_grads] = keyword[None] , identifier[retain_graph] = keyword[None] , identifier[create_graph] = keyword[False] ,
identifier[train_mode] = keyword[True] ):
literal[string]
identifier[head_handles] , identifier[hgrad_handles] = identifier[_parse_head] ( identifier[heads] , identifier[head_grads] )
keyword[if] identifier[isinstance] ( identifier[variables] , identifier[NDArray] ):
identifier[variables] =[ identifier[variables] ]
keyword[else] :
keyword[assert] identifier[len] ( identifier[variables] ), literal[string]
identifier[var_handles] = identifier[c_handle_array] ( identifier[variables] )
identifier[retain_graph] = identifier[retain_graph] keyword[if] identifier[retain_graph] keyword[is] keyword[not] keyword[None] keyword[else] identifier[create_graph]
identifier[grad_vars] = identifier[ctypes] . identifier[POINTER] ( identifier[NDArrayHandle] )()
identifier[grad_stypes] = identifier[ctypes] . identifier[POINTER] ( identifier[ctypes] . identifier[c_int] )()
identifier[check_call] ( identifier[_LIB] . identifier[MXAutogradBackwardEx] (
identifier[len] ( identifier[head_handles] ),
identifier[head_handles] ,
identifier[hgrad_handles] ,
identifier[len] ( identifier[var_handles] ),
identifier[var_handles] ,
identifier[ctypes] . identifier[c_int] ( identifier[retain_graph] ),
identifier[ctypes] . identifier[c_int] ( identifier[create_graph] ),
identifier[ctypes] . identifier[c_int] ( identifier[train_mode] ),
identifier[ctypes] . identifier[byref] ( identifier[grad_vars] ),
identifier[ctypes] . identifier[byref] ( identifier[grad_stypes] )))
identifier[ret] =[ identifier[_ndarray_cls] ( identifier[ctypes] . identifier[cast] ( identifier[grad_vars] [ identifier[i] ], identifier[NDArrayHandle] ),
identifier[stype] = identifier[grad_stypes] [ identifier[i] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[var_handles] ))]
keyword[if] identifier[isinstance] ( identifier[variables] , identifier[NDArray] ):
keyword[return] identifier[ret] [ literal[int] ]
keyword[return] identifier[ret] | def grad(heads, variables, head_grads=None, retain_graph=None, create_graph=False, train_mode=True): #pylint: disable=redefined-outer-name
'Compute the gradients of heads w.r.t variables. Gradients will be\n returned as new NDArrays instead of stored into `variable.grad`.\n Supports recording gradient graph for computing higher order gradients.\n\n .. note::\n\n Currently only a very limited set of operators support higher order gradients.\n\n Parameters\n ----------\n heads: NDArray or list of NDArray\n Output NDArray(s)\n variables: NDArray or list of NDArray\n Input variables to compute gradients for.\n head_grads: NDArray or list of NDArray or None\n Gradients with respect to heads.\n retain_graph: bool\n Whether to keep computation graph to differentiate again, instead\n of clearing history and release memory. Defaults to the same value\n as create_graph.\n create_graph: bool\n Whether to record gradient graph for computing higher order\n train_mode: bool, optional\n Whether to do backward for training or prediction.\n\n Returns\n -------\n NDArray or list of NDArray:\n Gradients with respect to variables.\n\n Examples\n --------\n >>> x = mx.nd.ones((1,))\n >>> x.attach_grad()\n >>> with mx.autograd.record():\n ... z = mx.nd.elemwise_add(mx.nd.exp(x), x)\n >>> dx = mx.autograd.grad(z, [x], create_graph=True)\n >>> print(dx)\n [\n [ 3.71828175]\n <NDArray 1 @cpu(0)>]\n '
(head_handles, hgrad_handles) = _parse_head(heads, head_grads)
if isinstance(variables, NDArray):
variables = [variables] # depends on [control=['if'], data=[]]
else:
assert len(variables), 'variables cannot be an empty list.'
var_handles = c_handle_array(variables)
retain_graph = retain_graph if retain_graph is not None else create_graph
grad_vars = ctypes.POINTER(NDArrayHandle)()
grad_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXAutogradBackwardEx(len(head_handles), head_handles, hgrad_handles, len(var_handles), var_handles, ctypes.c_int(retain_graph), ctypes.c_int(create_graph), ctypes.c_int(train_mode), ctypes.byref(grad_vars), ctypes.byref(grad_stypes)))
ret = [_ndarray_cls(ctypes.cast(grad_vars[i], NDArrayHandle), stype=grad_stypes[i]) for i in range(len(var_handles))]
if isinstance(variables, NDArray):
return ret[0] # depends on [control=['if'], data=[]]
return ret |
def hrscan(self, name, key_start, key_end, limit=10):
"""
Return a dict mapping key/value in the top ``limit`` keys between
``key_start`` and ``key_end`` within hash ``name`` in descending order
.. note:: The range is (``key_start``, ``key_end``]. The ``key_start``
isn't in the range, but ``key_end`` is.
:param string name: the hash name
:param string key_start: The upper bound(not included) of keys to be
returned, empty string ``''`` means +inf
:param string key_end: The lower bound(included) of keys to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a dict mapping key/value in descending order
:rtype: OrderedDict
>>> ssdb.hrscan('hash_1', 'g', 'a', 10)
{'f': 'F', 'e': 'E', 'd': 'D', 'c': 'C', 'b': 'B', 'a': 'A'}
>>> ssdb.hrscan('hash_2', 'key7', 'key1', 3)
{'key6': 'log', 'key5': 'e', 'key4': '256'}
>>> ssdb.hrscan('hash_1', 'c', '', 10)
{'b': 'B', 'a': 'A'}
>>> ssdb.hscan('hash_2', 'keys', '', 10)
{}
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('hrscan', name, key_start, key_end, limit) | def function[hrscan, parameter[self, name, key_start, key_end, limit]]:
constant[
Return a dict mapping key/value in the top ``limit`` keys between
``key_start`` and ``key_end`` within hash ``name`` in descending order
.. note:: The range is (``key_start``, ``key_end``]. The ``key_start``
isn't in the range, but ``key_end`` is.
:param string name: the hash name
:param string key_start: The upper bound(not included) of keys to be
returned, empty string ``''`` means +inf
:param string key_end: The lower bound(included) of keys to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a dict mapping key/value in descending order
:rtype: OrderedDict
>>> ssdb.hrscan('hash_1', 'g', 'a', 10)
{'f': 'F', 'e': 'E', 'd': 'D', 'c': 'C', 'b': 'B', 'a': 'A'}
>>> ssdb.hrscan('hash_2', 'key7', 'key1', 3)
{'key6': 'log', 'key5': 'e', 'key4': '256'}
>>> ssdb.hrscan('hash_1', 'c', '', 10)
{'b': 'B', 'a': 'A'}
>>> ssdb.hscan('hash_2', 'keys', '', 10)
{}
]
variable[limit] assign[=] call[name[get_positive_integer], parameter[constant[limit], name[limit]]]
return[call[name[self].execute_command, parameter[constant[hrscan], name[name], name[key_start], name[key_end], name[limit]]]] | keyword[def] identifier[hrscan] ( identifier[self] , identifier[name] , identifier[key_start] , identifier[key_end] , identifier[limit] = literal[int] ):
literal[string]
identifier[limit] = identifier[get_positive_integer] ( literal[string] , identifier[limit] )
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[key_start] , identifier[key_end] , identifier[limit] ) | def hrscan(self, name, key_start, key_end, limit=10):
"""
Return a dict mapping key/value in the top ``limit`` keys between
``key_start`` and ``key_end`` within hash ``name`` in descending order
.. note:: The range is (``key_start``, ``key_end``]. The ``key_start``
isn't in the range, but ``key_end`` is.
:param string name: the hash name
:param string key_start: The upper bound(not included) of keys to be
returned, empty string ``''`` means +inf
:param string key_end: The lower bound(included) of keys to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a dict mapping key/value in descending order
:rtype: OrderedDict
>>> ssdb.hrscan('hash_1', 'g', 'a', 10)
{'f': 'F', 'e': 'E', 'd': 'D', 'c': 'C', 'b': 'B', 'a': 'A'}
>>> ssdb.hrscan('hash_2', 'key7', 'key1', 3)
{'key6': 'log', 'key5': 'e', 'key4': '256'}
>>> ssdb.hrscan('hash_1', 'c', '', 10)
{'b': 'B', 'a': 'A'}
>>> ssdb.hscan('hash_2', 'keys', '', 10)
{}
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('hrscan', name, key_start, key_end, limit) |
def ping(awsclient, function_name, alias_name=ALIAS_NAME, version=None):
"""Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param alias_name:
:param version:
:return: ping response payload
"""
log.debug('sending ping to lambda function: %s', function_name)
payload = '{"ramuda_action": "ping"}' # default to ping event
# reuse invoke
return invoke(awsclient, function_name, payload, invocation_type=None,
alias_name=alias_name, version=version) | def function[ping, parameter[awsclient, function_name, alias_name, version]]:
constant[Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param alias_name:
:param version:
:return: ping response payload
]
call[name[log].debug, parameter[constant[sending ping to lambda function: %s], name[function_name]]]
variable[payload] assign[=] constant[{"ramuda_action": "ping"}]
return[call[name[invoke], parameter[name[awsclient], name[function_name], name[payload]]]] | keyword[def] identifier[ping] ( identifier[awsclient] , identifier[function_name] , identifier[alias_name] = identifier[ALIAS_NAME] , identifier[version] = keyword[None] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[function_name] )
identifier[payload] = literal[string]
keyword[return] identifier[invoke] ( identifier[awsclient] , identifier[function_name] , identifier[payload] , identifier[invocation_type] = keyword[None] ,
identifier[alias_name] = identifier[alias_name] , identifier[version] = identifier[version] ) | def ping(awsclient, function_name, alias_name=ALIAS_NAME, version=None):
"""Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param alias_name:
:param version:
:return: ping response payload
"""
log.debug('sending ping to lambda function: %s', function_name)
payload = '{"ramuda_action": "ping"}' # default to ping event
# reuse invoke
return invoke(awsclient, function_name, payload, invocation_type=None, alias_name=alias_name, version=version) |
def _copy_element_meta_data_from_meta_file_data(meta_data, element_m, element_name, element_id):
"""Helper method to assign the meta of the given element
The method assigns the meta data of the elements from the given meta data dictionary. The copied meta data is
then removed from the dictionary.
:param meta_data: The loaded meta data
:param element_m: The element model that is supposed to retrieve the meta data
:param element_name: The name string of the element type in the dictionary
:param element_id: The id of the element
"""
meta_data_element_id = element_name + str(element_id)
meta_data_element = meta_data[meta_data_element_id]
# print(meta_data_element_id, element_m, meta_data_element)
element_m.meta = meta_data_element
del meta_data[meta_data_element_id] | def function[_copy_element_meta_data_from_meta_file_data, parameter[meta_data, element_m, element_name, element_id]]:
constant[Helper method to assign the meta of the given element
The method assigns the meta data of the elements from the given meta data dictionary. The copied meta data is
then removed from the dictionary.
:param meta_data: The loaded meta data
:param element_m: The element model that is supposed to retrieve the meta data
:param element_name: The name string of the element type in the dictionary
:param element_id: The id of the element
]
variable[meta_data_element_id] assign[=] binary_operation[name[element_name] + call[name[str], parameter[name[element_id]]]]
variable[meta_data_element] assign[=] call[name[meta_data]][name[meta_data_element_id]]
name[element_m].meta assign[=] name[meta_data_element]
<ast.Delete object at 0x7da20c76f5b0> | keyword[def] identifier[_copy_element_meta_data_from_meta_file_data] ( identifier[meta_data] , identifier[element_m] , identifier[element_name] , identifier[element_id] ):
literal[string]
identifier[meta_data_element_id] = identifier[element_name] + identifier[str] ( identifier[element_id] )
identifier[meta_data_element] = identifier[meta_data] [ identifier[meta_data_element_id] ]
identifier[element_m] . identifier[meta] = identifier[meta_data_element]
keyword[del] identifier[meta_data] [ identifier[meta_data_element_id] ] | def _copy_element_meta_data_from_meta_file_data(meta_data, element_m, element_name, element_id):
"""Helper method to assign the meta of the given element
The method assigns the meta data of the elements from the given meta data dictionary. The copied meta data is
then removed from the dictionary.
:param meta_data: The loaded meta data
:param element_m: The element model that is supposed to retrieve the meta data
:param element_name: The name string of the element type in the dictionary
:param element_id: The id of the element
"""
meta_data_element_id = element_name + str(element_id)
meta_data_element = meta_data[meta_data_element_id]
# print(meta_data_element_id, element_m, meta_data_element)
element_m.meta = meta_data_element
del meta_data[meta_data_element_id] |
def create_user(self, customer_id, name, login, password, role=FastlyRoles.USER, require_new_password=True):
"""Create a user."""
body = self._formdata({
"customer_id": customer_id,
"name": name,
"login": login,
"password": password,
"role": role,
"require_new_password": require_new_password,
}, FastlyUser.FIELDS)
content = self._fetch("/user", method="POST", body=body)
return FastlyUser(self, content) | def function[create_user, parameter[self, customer_id, name, login, password, role, require_new_password]]:
constant[Create a user.]
variable[body] assign[=] call[name[self]._formdata, parameter[dictionary[[<ast.Constant object at 0x7da1b11101c0>, <ast.Constant object at 0x7da1b11125c0>, <ast.Constant object at 0x7da1b1112140>, <ast.Constant object at 0x7da1b1112ce0>, <ast.Constant object at 0x7da1b1111030>, <ast.Constant object at 0x7da1b11118d0>], [<ast.Name object at 0x7da1b1112bf0>, <ast.Name object at 0x7da1b1110460>, <ast.Name object at 0x7da1b11100a0>, <ast.Name object at 0x7da1b1113160>, <ast.Name object at 0x7da1b1113820>, <ast.Name object at 0x7da1b1111d50>]], name[FastlyUser].FIELDS]]
variable[content] assign[=] call[name[self]._fetch, parameter[constant[/user]]]
return[call[name[FastlyUser], parameter[name[self], name[content]]]] | keyword[def] identifier[create_user] ( identifier[self] , identifier[customer_id] , identifier[name] , identifier[login] , identifier[password] , identifier[role] = identifier[FastlyRoles] . identifier[USER] , identifier[require_new_password] = keyword[True] ):
literal[string]
identifier[body] = identifier[self] . identifier[_formdata] ({
literal[string] : identifier[customer_id] ,
literal[string] : identifier[name] ,
literal[string] : identifier[login] ,
literal[string] : identifier[password] ,
literal[string] : identifier[role] ,
literal[string] : identifier[require_new_password] ,
}, identifier[FastlyUser] . identifier[FIELDS] )
identifier[content] = identifier[self] . identifier[_fetch] ( literal[string] , identifier[method] = literal[string] , identifier[body] = identifier[body] )
keyword[return] identifier[FastlyUser] ( identifier[self] , identifier[content] ) | def create_user(self, customer_id, name, login, password, role=FastlyRoles.USER, require_new_password=True):
"""Create a user."""
body = self._formdata({'customer_id': customer_id, 'name': name, 'login': login, 'password': password, 'role': role, 'require_new_password': require_new_password}, FastlyUser.FIELDS)
content = self._fetch('/user', method='POST', body=body)
return FastlyUser(self, content) |
def raise_or_lock(self, key, timeout):
"""
Checks if the task is locked and raises an exception, else locks
the task. By default, the tasks and the key expire after 60 minutes.
(meaning it will not be executed and the lock will clear).
"""
acquired = Lock(
self.redis,
key,
timeout=timeout,
blocking=self.blocking,
blocking_timeout=self.blocking_timeout
).acquire()
if not acquired:
# Time remaining in milliseconds
# https://redis.io/commands/pttl
ttl = self.redis.pttl(key)
raise AlreadyQueued(ttl / 1000.) | def function[raise_or_lock, parameter[self, key, timeout]]:
constant[
Checks if the task is locked and raises an exception, else locks
the task. By default, the tasks and the key expire after 60 minutes.
(meaning it will not be executed and the lock will clear).
]
variable[acquired] assign[=] call[call[name[Lock], parameter[name[self].redis, name[key]]].acquire, parameter[]]
if <ast.UnaryOp object at 0x7da1b0865930> begin[:]
variable[ttl] assign[=] call[name[self].redis.pttl, parameter[name[key]]]
<ast.Raise object at 0x7da1b08664a0> | keyword[def] identifier[raise_or_lock] ( identifier[self] , identifier[key] , identifier[timeout] ):
literal[string]
identifier[acquired] = identifier[Lock] (
identifier[self] . identifier[redis] ,
identifier[key] ,
identifier[timeout] = identifier[timeout] ,
identifier[blocking] = identifier[self] . identifier[blocking] ,
identifier[blocking_timeout] = identifier[self] . identifier[blocking_timeout]
). identifier[acquire] ()
keyword[if] keyword[not] identifier[acquired] :
identifier[ttl] = identifier[self] . identifier[redis] . identifier[pttl] ( identifier[key] )
keyword[raise] identifier[AlreadyQueued] ( identifier[ttl] / literal[int] ) | def raise_or_lock(self, key, timeout):
"""
Checks if the task is locked and raises an exception, else locks
the task. By default, the tasks and the key expire after 60 minutes.
(meaning it will not be executed and the lock will clear).
"""
acquired = Lock(self.redis, key, timeout=timeout, blocking=self.blocking, blocking_timeout=self.blocking_timeout).acquire()
if not acquired:
# Time remaining in milliseconds
# https://redis.io/commands/pttl
ttl = self.redis.pttl(key)
raise AlreadyQueued(ttl / 1000.0) # depends on [control=['if'], data=[]] |
def send_data(socket, data, scan_parameters={}, name='ReadoutData'):
'''Sends the data of every read out (raw data and meta data) via ZeroMQ to a specified socket
'''
if not scan_parameters:
scan_parameters = {}
data_meta_data = dict(
name=name,
dtype=str(data[0].dtype),
shape=data[0].shape,
timestamp_start=data[1], # float
timestamp_stop=data[2], # float
readout_error=data[3], # int
scan_parameters=scan_parameters # dict
)
try:
socket.send_json(data_meta_data, flags=zmq.SNDMORE | zmq.NOBLOCK)
socket.send(data[0], flags=zmq.NOBLOCK) # PyZMQ supports sending numpy arrays without copying any data
except zmq.Again:
pass | def function[send_data, parameter[socket, data, scan_parameters, name]]:
constant[Sends the data of every read out (raw data and meta data) via ZeroMQ to a specified socket
]
if <ast.UnaryOp object at 0x7da1b11a2c20> begin[:]
variable[scan_parameters] assign[=] dictionary[[], []]
variable[data_meta_data] assign[=] call[name[dict], parameter[]]
<ast.Try object at 0x7da1b11a3070> | keyword[def] identifier[send_data] ( identifier[socket] , identifier[data] , identifier[scan_parameters] ={}, identifier[name] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[scan_parameters] :
identifier[scan_parameters] ={}
identifier[data_meta_data] = identifier[dict] (
identifier[name] = identifier[name] ,
identifier[dtype] = identifier[str] ( identifier[data] [ literal[int] ]. identifier[dtype] ),
identifier[shape] = identifier[data] [ literal[int] ]. identifier[shape] ,
identifier[timestamp_start] = identifier[data] [ literal[int] ],
identifier[timestamp_stop] = identifier[data] [ literal[int] ],
identifier[readout_error] = identifier[data] [ literal[int] ],
identifier[scan_parameters] = identifier[scan_parameters]
)
keyword[try] :
identifier[socket] . identifier[send_json] ( identifier[data_meta_data] , identifier[flags] = identifier[zmq] . identifier[SNDMORE] | identifier[zmq] . identifier[NOBLOCK] )
identifier[socket] . identifier[send] ( identifier[data] [ literal[int] ], identifier[flags] = identifier[zmq] . identifier[NOBLOCK] )
keyword[except] identifier[zmq] . identifier[Again] :
keyword[pass] | def send_data(socket, data, scan_parameters={}, name='ReadoutData'):
"""Sends the data of every read out (raw data and meta data) via ZeroMQ to a specified socket
"""
if not scan_parameters:
scan_parameters = {} # depends on [control=['if'], data=[]] # float
# float
# int
# dict
data_meta_data = dict(name=name, dtype=str(data[0].dtype), shape=data[0].shape, timestamp_start=data[1], timestamp_stop=data[2], readout_error=data[3], scan_parameters=scan_parameters)
try:
socket.send_json(data_meta_data, flags=zmq.SNDMORE | zmq.NOBLOCK)
socket.send(data[0], flags=zmq.NOBLOCK) # PyZMQ supports sending numpy arrays without copying any data # depends on [control=['try'], data=[]]
except zmq.Again:
pass # depends on [control=['except'], data=[]] |
def present(name,
vname=None,
vdata=None,
vtype='REG_SZ',
use_32bit_registry=False,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False):
r'''
Ensure a registry key or value is present.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (``vname``) is passed,
this will be the data for that value name. If not, this will be the
``(Default)`` value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary automatically. To pass binary data, use the built-in
yaml tag ``!!binary`` to denote the actual binary
characters. For example, the following lines will both set
the same data in the registry:
- ``vdata: Salty Test``
- ``vdata: !!binary U2FsdHkgVGVzdA==\n``
For more information about the ``!!binary`` tag see
`here <http://yaml.org/type/binary.html>`_
.. note::
The type for the ``(Default)`` value is always REG_SZ and cannot
be changed. This parameter is optional. If not passed, the Key
will be created with no associated item/value pairs.
vtype (str):
The value type for the data you wish to store in the registry. Valid
values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ (Default)
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
win_owner (str):
The owner of the registry key. If this is not passed, the account
under which Salt is running will be used.
.. note::
Owner is set for the key that contains the value/data pair. You
cannot set ownership on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms (dict):
A dictionary containing permissions to grant and their propagation.
If not passed the 'Grant` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
For each user specify the account name, with a sub dict for the
permissions to grant and the 'Applies to' setting. For example:
``{'Administrators': {'perms': 'full_control', 'applies_to':
'this_key_subkeys'}}``. ``perms`` must be specified.
Registry permissions are specified using the ``perms`` key. You can
specify a single basic permission or a list of advanced perms. The
following are valid perms:
Basic (passed as a string):
- full_control
- read
- write
Advanced (passed as a list):
- delete
- query_value
- set_value
- create_subkey
- enum_subkeys
- notify
- create_link
- read_control
- write_dac
- write_owner
The 'Applies to' setting is optional. It is specified using the
``applies_to`` key. If not specified ``this_key_subkeys`` is used.
Valid options are:
Applies to settings:
- this_key_only
- this_key_subkeys
- subkeys_only
.. versionadded:: 2019.2.0
win_deny_perms (dict):
A dictionary containing permissions to deny and their propagation.
If not passed the `Deny` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
Valid options are the same as those specified in ``win_perms``
.. note::
'Deny' permissions always take precedence over 'grant'
permissions.
.. versionadded:: 2019.2.0
win_inheritance (bool):
``True`` to inherit permissions from the parent key. ``False`` to
disable inheritance. Default is ``True``.
.. note::
Inheritance is set for the key that contains the value/data
pair. You cannot set inheritance on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms_reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``
.. note::
Perms are reset for the key that contains the value/data pair.
You cannot set permissions on value/data pairs themselves.
.. versionadded:: 2019.2.0
Returns:
dict: A dictionary showing the results of the registry operation.
Example:
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
Example:
The following example will set the value for the ``version`` entry under
the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``. The value will be reflected in ``Wow6432Node``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
Example:
Binary data can be set in two ways. The following two examples will set
a binary value of ``Salty Test``
.. code-block:: yaml
no_conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state
- vdata: Salty Test
- vtype: REG_BINARY
conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state_with_tag
- vdata: !!binary U2FsdHkgVGVzdA==\n
- vtype: REG_BINARY
Example:
To set a ``REG_MULTI_SZ`` value:
.. code-block:: yaml
reg_multi_sz:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: reg_multi_sz
- vdata:
- list item 1
- list item 2
Example:
To ensure a key is present and has permissions:
.. code-block:: yaml
set_key_permissions:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: version
- vdata: 2016.3.1
- win_owner: Administrators
- win_perms:
jsnuffy:
perms: full_control
sjones:
perms:
- read_control
- enum_subkeys
- query_value
applies_to:
- this_key_only
- win_deny_perms:
bsimpson:
perms: full_control
applies_to: this_key_subkeys
- win_inheritance: True
- win_perms_reset: True
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
reg_current = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
# Check if the key already exists
# If so, check perms
# We check `vdata` and `success` because `vdata` can be None
if vdata == reg_current['vdata'] and reg_current['success']:
ret['comment'] = '{0} in {1} is already present' \
''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)',
salt.utils.stringutils.to_unicode(name, 'utf-8'))
return __utils__['dacl.check_perms'](
obj_name='\\'.join([hive, key]),
obj_type='registry32' if use_32bit_registry else 'registry',
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
# Cast the vdata according to the vtype
vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype)
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'),
'Value': vdata_decoded,
'Owner': win_owner,
'Perms': {'Grant': win_perms,
'Deny': win_deny_perms},
'Inheritance': win_inheritance}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret
# Configure the value
ret['result'] = __utils__['reg.set_value'](hive=hive,
key=key,
vname=vname,
vdata=vdata,
vtype=vtype,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key)
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key)
if ret['result']:
ret = __utils__['dacl.check_perms'](
obj_name='\\'.join([hive, key]),
obj_type='registry32' if use_32bit_registry else 'registry',
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
return ret | def function[present, parameter[name, vname, vdata, vtype, use_32bit_registry, win_owner, win_perms, win_deny_perms, win_inheritance, win_perms_reset]]:
constant[
Ensure a registry key or value is present.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (``vname``) is passed,
this will be the data for that value name. If not, this will be the
``(Default)`` value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary automatically. To pass binary data, use the built-in
yaml tag ``!!binary`` to denote the actual binary
characters. For example, the following lines will both set
the same data in the registry:
- ``vdata: Salty Test``
- ``vdata: !!binary U2FsdHkgVGVzdA==\n``
For more information about the ``!!binary`` tag see
`here <http://yaml.org/type/binary.html>`_
.. note::
The type for the ``(Default)`` value is always REG_SZ and cannot
be changed. This parameter is optional. If not passed, the Key
will be created with no associated item/value pairs.
vtype (str):
The value type for the data you wish to store in the registry. Valid
values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ (Default)
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
win_owner (str):
The owner of the registry key. If this is not passed, the account
under which Salt is running will be used.
.. note::
Owner is set for the key that contains the value/data pair. You
cannot set ownership on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms (dict):
A dictionary containing permissions to grant and their propagation.
If not passed the 'Grant` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
For each user specify the account name, with a sub dict for the
permissions to grant and the 'Applies to' setting. For example:
``{'Administrators': {'perms': 'full_control', 'applies_to':
'this_key_subkeys'}}``. ``perms`` must be specified.
Registry permissions are specified using the ``perms`` key. You can
specify a single basic permission or a list of advanced perms. The
following are valid perms:
Basic (passed as a string):
- full_control
- read
- write
Advanced (passed as a list):
- delete
- query_value
- set_value
- create_subkey
- enum_subkeys
- notify
- create_link
- read_control
- write_dac
- write_owner
The 'Applies to' setting is optional. It is specified using the
``applies_to`` key. If not specified ``this_key_subkeys`` is used.
Valid options are:
Applies to settings:
- this_key_only
- this_key_subkeys
- subkeys_only
.. versionadded:: 2019.2.0
win_deny_perms (dict):
A dictionary containing permissions to deny and their propagation.
If not passed the `Deny` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
Valid options are the same as those specified in ``win_perms``
.. note::
'Deny' permissions always take precedence over 'grant'
permissions.
.. versionadded:: 2019.2.0
win_inheritance (bool):
``True`` to inherit permissions from the parent key. ``False`` to
disable inheritance. Default is ``True``.
.. note::
Inheritance is set for the key that contains the value/data
pair. You cannot set inheritance on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms_reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``
.. note::
Perms are reset for the key that contains the value/data pair.
You cannot set permissions on value/data pairs themselves.
.. versionadded:: 2019.2.0
Returns:
dict: A dictionary showing the results of the registry operation.
Example:
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
Example:
The following example will set the value for the ``version`` entry under
the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``. The value will be reflected in ``Wow6432Node``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
Example:
Binary data can be set in two ways. The following two examples will set
a binary value of ``Salty Test``
.. code-block:: yaml
no_conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state
- vdata: Salty Test
- vtype: REG_BINARY
conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state_with_tag
- vdata: !!binary U2FsdHkgVGVzdA==\n
- vtype: REG_BINARY
Example:
To set a ``REG_MULTI_SZ`` value:
.. code-block:: yaml
reg_multi_sz:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: reg_multi_sz
- vdata:
- list item 1
- list item 2
Example:
To ensure a key is present and has permissions:
.. code-block:: yaml
set_key_permissions:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: version
- vdata: 2016.3.1
- win_owner: Administrators
- win_perms:
jsnuffy:
perms: full_control
sjones:
perms:
- read_control
- enum_subkeys
- query_value
applies_to:
- this_key_only
- win_deny_perms:
bsimpson:
perms: full_control
applies_to: this_key_subkeys
- win_inheritance: True
- win_perms_reset: True
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c14670>, <ast.Constant object at 0x7da1b1c146d0>, <ast.Constant object at 0x7da1b1c14880>, <ast.Constant object at 0x7da1b1c14910>], [<ast.Name object at 0x7da1b1c14760>, <ast.Constant object at 0x7da1b1c14730>, <ast.Dict object at 0x7da1b1c14a00>, <ast.Constant object at 0x7da1b1c14be0>]]
<ast.Tuple object at 0x7da1b1c156c0> assign[=] call[name[_parse_key], parameter[name[name]]]
variable[reg_current] assign[=] call[call[name[__utils__]][constant[reg.read_value]], parameter[]]
if <ast.BoolOp object at 0x7da1b1c176d0> begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[{0} in {1} is already present].format, parameter[<ast.IfExp object at 0x7da1b1c14fd0>, call[name[salt].utils.stringutils.to_unicode, parameter[name[name], constant[utf-8]]]]]
return[call[call[name[__utils__]][constant[dacl.check_perms]], parameter[]]]
variable[vdata_decoded] assign[=] call[call[name[__utils__]][constant[reg.cast_vdata]], parameter[]]
variable[add_change] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c14610>, <ast.Constant object at 0x7da1b1c14d00>, <ast.Constant object at 0x7da1b1c142b0>, <ast.Constant object at 0x7da1b1c15360>, <ast.Constant object at 0x7da1b1c17fd0>, <ast.Constant object at 0x7da1b1c17f40>], [<ast.Call object at 0x7da1b1c17e80>, <ast.Call object at 0x7da1b1c154e0>, <ast.Name object at 0x7da1b1c15db0>, <ast.Name object at 0x7da1b1c15e10>, <ast.Dict object at 0x7da1b1c16890>, <ast.Name object at 0x7da1b1c14a60>]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c14df0>], [<ast.Dict object at 0x7da1b1c15ab0>]]
return[name[ret]]
call[name[ret]][constant[result]] assign[=] call[call[name[__utils__]][constant[reg.set_value]], parameter[]]
if <ast.UnaryOp object at 0x7da1b1c17be0> begin[:]
call[name[ret]][constant[changes]] assign[=] dictionary[[], []]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to add {0} to {1}\{2}].format, parameter[name[name], name[hive], name[key]]]
if call[name[ret]][constant[result]] begin[:]
variable[ret] assign[=] call[call[name[__utils__]][constant[dacl.check_perms]], parameter[]]
return[name[ret]] | keyword[def] identifier[present] ( identifier[name] ,
identifier[vname] = keyword[None] ,
identifier[vdata] = keyword[None] ,
identifier[vtype] = literal[string] ,
identifier[use_32bit_registry] = keyword[False] ,
identifier[win_owner] = keyword[None] ,
identifier[win_perms] = keyword[None] ,
identifier[win_deny_perms] = keyword[None] ,
identifier[win_inheritance] = keyword[True] ,
identifier[win_perms_reset] = keyword[False] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] : keyword[True] ,
literal[string] :{},
literal[string] : literal[string] }
identifier[hive] , identifier[key] = identifier[_parse_key] ( identifier[name] )
identifier[reg_current] = identifier[__utils__] [ literal[string] ]( identifier[hive] = identifier[hive] ,
identifier[key] = identifier[key] ,
identifier[vname] = identifier[vname] ,
identifier[use_32bit_registry] = identifier[use_32bit_registry] )
keyword[if] identifier[vdata] == identifier[reg_current] [ literal[string] ] keyword[and] identifier[reg_current] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] literal[string] . identifier[format] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[vname] , literal[string] ) keyword[if] identifier[vname] keyword[else] literal[string] ,
identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[name] , literal[string] ))
keyword[return] identifier[__utils__] [ literal[string] ](
identifier[obj_name] = literal[string] . identifier[join] ([ identifier[hive] , identifier[key] ]),
identifier[obj_type] = literal[string] keyword[if] identifier[use_32bit_registry] keyword[else] literal[string] ,
identifier[ret] = identifier[ret] ,
identifier[owner] = identifier[win_owner] ,
identifier[grant_perms] = identifier[win_perms] ,
identifier[deny_perms] = identifier[win_deny_perms] ,
identifier[inheritance] = identifier[win_inheritance] ,
identifier[reset] = identifier[win_perms_reset] )
identifier[vdata_decoded] = identifier[__utils__] [ literal[string] ]( identifier[vdata] = identifier[vdata] , identifier[vtype] = identifier[vtype] )
identifier[add_change] ={ literal[string] : literal[string] . identifier[format] ( identifier[hive] , identifier[key] ),
literal[string] : literal[string] . identifier[format] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[vname] , literal[string] ) keyword[if] identifier[vname] keyword[else] literal[string] ),
literal[string] : identifier[vdata_decoded] ,
literal[string] : identifier[win_owner] ,
literal[string] :{ literal[string] : identifier[win_perms] ,
literal[string] : identifier[win_deny_perms] },
literal[string] : identifier[win_inheritance] }
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]={ literal[string] :{ literal[string] : identifier[add_change] }}
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= identifier[__utils__] [ literal[string] ]( identifier[hive] = identifier[hive] ,
identifier[key] = identifier[key] ,
identifier[vname] = identifier[vname] ,
identifier[vdata] = identifier[vdata] ,
identifier[vtype] = identifier[vtype] ,
identifier[use_32bit_registry] = identifier[use_32bit_registry] )
keyword[if] keyword[not] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ]={}
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] , identifier[hive] , identifier[key] )
keyword[else] :
identifier[ret] [ literal[string] ]={ literal[string] :{ literal[string] : identifier[add_change] }}
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] , identifier[hive] , identifier[key] )
keyword[if] identifier[ret] [ literal[string] ]:
identifier[ret] = identifier[__utils__] [ literal[string] ](
identifier[obj_name] = literal[string] . identifier[join] ([ identifier[hive] , identifier[key] ]),
identifier[obj_type] = literal[string] keyword[if] identifier[use_32bit_registry] keyword[else] literal[string] ,
identifier[ret] = identifier[ret] ,
identifier[owner] = identifier[win_owner] ,
identifier[grant_perms] = identifier[win_perms] ,
identifier[deny_perms] = identifier[win_deny_perms] ,
identifier[inheritance] = identifier[win_inheritance] ,
identifier[reset] = identifier[win_perms_reset] )
keyword[return] identifier[ret] | def present(name, vname=None, vdata=None, vtype='REG_SZ', use_32bit_registry=False, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=True, win_perms_reset=False):
"""
Ensure a registry key or value is present.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (``vname``) is passed,
this will be the data for that value name. If not, this will be the
``(Default)`` value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary automatically. To pass binary data, use the built-in
yaml tag ``!!binary`` to denote the actual binary
characters. For example, the following lines will both set
the same data in the registry:
- ``vdata: Salty Test``
- ``vdata: !!binary U2FsdHkgVGVzdA==\\n``
For more information about the ``!!binary`` tag see
`here <http://yaml.org/type/binary.html>`_
.. note::
The type for the ``(Default)`` value is always REG_SZ and cannot
be changed. This parameter is optional. If not passed, the Key
will be created with no associated item/value pairs.
vtype (str):
The value type for the data you wish to store in the registry. Valid
values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ (Default)
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
win_owner (str):
The owner of the registry key. If this is not passed, the account
under which Salt is running will be used.
.. note::
Owner is set for the key that contains the value/data pair. You
cannot set ownership on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms (dict):
A dictionary containing permissions to grant and their propagation.
If not passed the 'Grant` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
For each user specify the account name, with a sub dict for the
permissions to grant and the 'Applies to' setting. For example:
``{'Administrators': {'perms': 'full_control', 'applies_to':
'this_key_subkeys'}}``. ``perms`` must be specified.
Registry permissions are specified using the ``perms`` key. You can
specify a single basic permission or a list of advanced perms. The
following are valid perms:
Basic (passed as a string):
- full_control
- read
- write
Advanced (passed as a list):
- delete
- query_value
- set_value
- create_subkey
- enum_subkeys
- notify
- create_link
- read_control
- write_dac
- write_owner
The 'Applies to' setting is optional. It is specified using the
``applies_to`` key. If not specified ``this_key_subkeys`` is used.
Valid options are:
Applies to settings:
- this_key_only
- this_key_subkeys
- subkeys_only
.. versionadded:: 2019.2.0
win_deny_perms (dict):
A dictionary containing permissions to deny and their propagation.
If not passed the `Deny` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
Valid options are the same as those specified in ``win_perms``
.. note::
'Deny' permissions always take precedence over 'grant'
permissions.
.. versionadded:: 2019.2.0
win_inheritance (bool):
``True`` to inherit permissions from the parent key. ``False`` to
disable inheritance. Default is ``True``.
.. note::
Inheritance is set for the key that contains the value/data
pair. You cannot set inheritance on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms_reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``
.. note::
Perms are reset for the key that contains the value/data pair.
You cannot set permissions on value/data pairs themselves.
.. versionadded:: 2019.2.0
Returns:
dict: A dictionary showing the results of the registry operation.
Example:
The following example will set the ``(Default)`` value for the
``SOFTWARE\\\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``:
.. code-block:: yaml
HKEY_CURRENT_USER\\\\SOFTWARE\\\\Salt:
reg.present:
- vdata: 2016.3.1
Example:
The following example will set the value for the ``version`` entry under
the ``SOFTWARE\\\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``. The value will be reflected in ``Wow6432Node``:
.. code-block:: yaml
HKEY_CURRENT_USER\\\\SOFTWARE\\\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
Example:
Binary data can be set in two ways. The following two examples will set
a binary value of ``Salty Test``
.. code-block:: yaml
no_conversion:
reg.present:
- name: HKLM\\SOFTWARE\\SaltTesting
- vname: test_reg_binary_state
- vdata: Salty Test
- vtype: REG_BINARY
conversion:
reg.present:
- name: HKLM\\SOFTWARE\\SaltTesting
- vname: test_reg_binary_state_with_tag
- vdata: !!binary U2FsdHkgVGVzdA==\\n
- vtype: REG_BINARY
Example:
To set a ``REG_MULTI_SZ`` value:
.. code-block:: yaml
reg_multi_sz:
reg.present:
- name: HKLM\\SOFTWARE\\Salt
- vname: reg_multi_sz
- vdata:
- list item 1
- list item 2
Example:
To ensure a key is present and has permissions:
.. code-block:: yaml
set_key_permissions:
reg.present:
- name: HKLM\\SOFTWARE\\Salt
- vname: version
- vdata: 2016.3.1
- win_owner: Administrators
- win_perms:
jsnuffy:
perms: full_control
sjones:
perms:
- read_control
- enum_subkeys
- query_value
applies_to:
- this_key_only
- win_deny_perms:
bsimpson:
perms: full_control
applies_to: this_key_subkeys
- win_inheritance: True
- win_perms_reset: True
"""
ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}
(hive, key) = _parse_key(name)
# Determine what to do
reg_current = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry)
# Check if the key already exists
# If so, check perms
# We check `vdata` and `success` because `vdata` can be None
if vdata == reg_current['vdata'] and reg_current['success']:
ret['comment'] = '{0} in {1} is already present'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)', salt.utils.stringutils.to_unicode(name, 'utf-8'))
return __utils__['dacl.check_perms'](obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) # depends on [control=['if'], data=[]]
# Cast the vdata according to the vtype
vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype)
add_change = {'Key': '{0}\\{1}'.format(hive, key), 'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'), 'Value': vdata_decoded, 'Owner': win_owner, 'Perms': {'Grant': win_perms, 'Deny': win_deny_perms}, 'Inheritance': win_inheritance}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret # depends on [control=['if'], data=[]]
# Configure the value
ret['result'] = __utils__['reg.set_value'](hive=hive, key=key, vname=vname, vdata=vdata, vtype=vtype, use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = 'Failed to add {0} to {1}\\{2}'.format(name, hive, key) # depends on [control=['if'], data=[]]
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = 'Added {0} to {1}\\{2}'.format(name, hive, key)
if ret['result']:
ret = __utils__['dacl.check_perms'](obj_name='\\'.join([hive, key]), obj_type='registry32' if use_32bit_registry else 'registry', ret=ret, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance, reset=win_perms_reset) # depends on [control=['if'], data=[]]
return ret |
def get_satellite5_info(self, branch_info):
"""
Get remote_leaf for Satellite 5 Managed box
"""
logger.debug(
"Remote branch not -1 but remote leaf is -1, must be Satellite 5")
if os.path.isfile('/etc/sysconfig/rhn/systemid'):
logger.debug("Found systemid file")
sat5_conf = ET.parse('/etc/sysconfig/rhn/systemid').getroot()
leaf_id = None
for member in sat5_conf.getiterator('member'):
if member.find('name').text == 'system_id':
logger.debug("Found member 'system_id'")
leaf_id = member.find('value').find(
'string').text.split('ID-')[1]
logger.debug("Found leaf id: %s", leaf_id)
branch_info['remote_leaf'] = leaf_id
if leaf_id is None:
logger.error("Could not determine leaf_id! Exiting!")
return False | def function[get_satellite5_info, parameter[self, branch_info]]:
constant[
Get remote_leaf for Satellite 5 Managed box
]
call[name[logger].debug, parameter[constant[Remote branch not -1 but remote leaf is -1, must be Satellite 5]]]
if call[name[os].path.isfile, parameter[constant[/etc/sysconfig/rhn/systemid]]] begin[:]
call[name[logger].debug, parameter[constant[Found systemid file]]]
variable[sat5_conf] assign[=] call[call[name[ET].parse, parameter[constant[/etc/sysconfig/rhn/systemid]]].getroot, parameter[]]
variable[leaf_id] assign[=] constant[None]
for taget[name[member]] in starred[call[name[sat5_conf].getiterator, parameter[constant[member]]]] begin[:]
if compare[call[name[member].find, parameter[constant[name]]].text equal[==] constant[system_id]] begin[:]
call[name[logger].debug, parameter[constant[Found member 'system_id']]]
variable[leaf_id] assign[=] call[call[call[call[name[member].find, parameter[constant[value]]].find, parameter[constant[string]]].text.split, parameter[constant[ID-]]]][constant[1]]
call[name[logger].debug, parameter[constant[Found leaf id: %s], name[leaf_id]]]
call[name[branch_info]][constant[remote_leaf]] assign[=] name[leaf_id]
if compare[name[leaf_id] is constant[None]] begin[:]
call[name[logger].error, parameter[constant[Could not determine leaf_id! Exiting!]]]
return[constant[False]] | keyword[def] identifier[get_satellite5_info] ( identifier[self] , identifier[branch_info] ):
literal[string]
identifier[logger] . identifier[debug] (
literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[sat5_conf] = identifier[ET] . identifier[parse] ( literal[string] ). identifier[getroot] ()
identifier[leaf_id] = keyword[None]
keyword[for] identifier[member] keyword[in] identifier[sat5_conf] . identifier[getiterator] ( literal[string] ):
keyword[if] identifier[member] . identifier[find] ( literal[string] ). identifier[text] == literal[string] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[leaf_id] = identifier[member] . identifier[find] ( literal[string] ). identifier[find] (
literal[string] ). identifier[text] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[logger] . identifier[debug] ( literal[string] , identifier[leaf_id] )
identifier[branch_info] [ literal[string] ]= identifier[leaf_id]
keyword[if] identifier[leaf_id] keyword[is] keyword[None] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[False] | def get_satellite5_info(self, branch_info):
"""
Get remote_leaf for Satellite 5 Managed box
"""
logger.debug('Remote branch not -1 but remote leaf is -1, must be Satellite 5')
if os.path.isfile('/etc/sysconfig/rhn/systemid'):
logger.debug('Found systemid file')
sat5_conf = ET.parse('/etc/sysconfig/rhn/systemid').getroot()
leaf_id = None
for member in sat5_conf.getiterator('member'):
if member.find('name').text == 'system_id':
logger.debug("Found member 'system_id'")
leaf_id = member.find('value').find('string').text.split('ID-')[1]
logger.debug('Found leaf id: %s', leaf_id)
branch_info['remote_leaf'] = leaf_id # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['member']]
if leaf_id is None:
logger.error('Could not determine leaf_id! Exiting!')
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration
"""
if dt is None:
dt = pendulum.now().time()
else:
dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)
us1 = (
self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second
) * USECS_PER_SEC
us2 = (
dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second
) * USECS_PER_SEC
klass = Duration
if abs:
klass = AbsoluteDuration
return klass(microseconds=us2 - us1) | def function[diff, parameter[self, dt, abs]]:
constant[
Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration
]
if compare[name[dt] is constant[None]] begin[:]
variable[dt] assign[=] call[call[name[pendulum].now, parameter[]].time, parameter[]]
variable[us1] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[self].hour * name[SECS_PER_HOUR]] + binary_operation[name[self].minute * name[SECS_PER_MIN]]] + name[self].second] * name[USECS_PER_SEC]]
variable[us2] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[dt].hour * name[SECS_PER_HOUR]] + binary_operation[name[dt].minute * name[SECS_PER_MIN]]] + name[dt].second] * name[USECS_PER_SEC]]
variable[klass] assign[=] name[Duration]
if name[abs] begin[:]
variable[klass] assign[=] name[AbsoluteDuration]
return[call[name[klass], parameter[]]] | keyword[def] identifier[diff] ( identifier[self] , identifier[dt] = keyword[None] , identifier[abs] = keyword[True] ):
literal[string]
keyword[if] identifier[dt] keyword[is] keyword[None] :
identifier[dt] = identifier[pendulum] . identifier[now] (). identifier[time] ()
keyword[else] :
identifier[dt] = identifier[self] . identifier[__class__] ( identifier[dt] . identifier[hour] , identifier[dt] . identifier[minute] , identifier[dt] . identifier[second] , identifier[dt] . identifier[microsecond] )
identifier[us1] =(
identifier[self] . identifier[hour] * identifier[SECS_PER_HOUR] + identifier[self] . identifier[minute] * identifier[SECS_PER_MIN] + identifier[self] . identifier[second]
)* identifier[USECS_PER_SEC]
identifier[us2] =(
identifier[dt] . identifier[hour] * identifier[SECS_PER_HOUR] + identifier[dt] . identifier[minute] * identifier[SECS_PER_MIN] + identifier[dt] . identifier[second]
)* identifier[USECS_PER_SEC]
identifier[klass] = identifier[Duration]
keyword[if] identifier[abs] :
identifier[klass] = identifier[AbsoluteDuration]
keyword[return] identifier[klass] ( identifier[microseconds] = identifier[us2] - identifier[us1] ) | def diff(self, dt=None, abs=True):
"""
Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration
"""
if dt is None:
dt = pendulum.now().time() # depends on [control=['if'], data=['dt']]
else:
dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)
us1 = (self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second) * USECS_PER_SEC
us2 = (dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second) * USECS_PER_SEC
klass = Duration
if abs:
klass = AbsoluteDuration # depends on [control=['if'], data=[]]
return klass(microseconds=us2 - us1) |
def iter_filth(self, text):
"""Iterate over the different types of filth that can exist.
"""
# currently doing this by aggregating all_filths and then sorting
# inline instead of with a Filth.__cmp__ method, which is apparently
# much slower http://stackoverflow.com/a/988728/564709
#
# NOTE: we could probably do this in a more efficient way by iterating
# over all detectors simultaneously. just trying to get something
# working right now and we can worry about efficiency later
all_filths = []
for detector in self._detectors.values():
for filth in detector.iter_filth(text):
if not isinstance(filth, Filth):
raise TypeError('iter_filth must always yield Filth')
all_filths.append(filth)
# Sort by start position. If two filths start in the same place then
# return the longer one first
all_filths.sort(key=lambda f: (f.beg, -f.end))
# this is where the Scrubber does its hard work and merges any
# overlapping filths.
if not all_filths:
raise StopIteration
filth = all_filths[0]
for next_filth in all_filths[1:]:
if filth.end < next_filth.beg:
yield filth
filth = next_filth
else:
filth = filth.merge(next_filth)
yield filth | def function[iter_filth, parameter[self, text]]:
constant[Iterate over the different types of filth that can exist.
]
variable[all_filths] assign[=] list[[]]
for taget[name[detector]] in starred[call[name[self]._detectors.values, parameter[]]] begin[:]
for taget[name[filth]] in starred[call[name[detector].iter_filth, parameter[name[text]]]] begin[:]
if <ast.UnaryOp object at 0x7da18bccb220> begin[:]
<ast.Raise object at 0x7da18bcc81c0>
call[name[all_filths].append, parameter[name[filth]]]
call[name[all_filths].sort, parameter[]]
if <ast.UnaryOp object at 0x7da18f720dc0> begin[:]
<ast.Raise object at 0x7da18f720a30>
variable[filth] assign[=] call[name[all_filths]][constant[0]]
for taget[name[next_filth]] in starred[call[name[all_filths]][<ast.Slice object at 0x7da18f721c90>]] begin[:]
if compare[name[filth].end less[<] name[next_filth].beg] begin[:]
<ast.Yield object at 0x7da2043471f0>
variable[filth] assign[=] name[next_filth]
<ast.Yield object at 0x7da18c4ccca0> | keyword[def] identifier[iter_filth] ( identifier[self] , identifier[text] ):
literal[string]
identifier[all_filths] =[]
keyword[for] identifier[detector] keyword[in] identifier[self] . identifier[_detectors] . identifier[values] ():
keyword[for] identifier[filth] keyword[in] identifier[detector] . identifier[iter_filth] ( identifier[text] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[filth] , identifier[Filth] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[all_filths] . identifier[append] ( identifier[filth] )
identifier[all_filths] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[f] :( identifier[f] . identifier[beg] ,- identifier[f] . identifier[end] ))
keyword[if] keyword[not] identifier[all_filths] :
keyword[raise] identifier[StopIteration]
identifier[filth] = identifier[all_filths] [ literal[int] ]
keyword[for] identifier[next_filth] keyword[in] identifier[all_filths] [ literal[int] :]:
keyword[if] identifier[filth] . identifier[end] < identifier[next_filth] . identifier[beg] :
keyword[yield] identifier[filth]
identifier[filth] = identifier[next_filth]
keyword[else] :
identifier[filth] = identifier[filth] . identifier[merge] ( identifier[next_filth] )
keyword[yield] identifier[filth] | def iter_filth(self, text):
"""Iterate over the different types of filth that can exist.
"""
# currently doing this by aggregating all_filths and then sorting
# inline instead of with a Filth.__cmp__ method, which is apparently
# much slower http://stackoverflow.com/a/988728/564709
#
# NOTE: we could probably do this in a more efficient way by iterating
# over all detectors simultaneously. just trying to get something
# working right now and we can worry about efficiency later
all_filths = []
for detector in self._detectors.values():
for filth in detector.iter_filth(text):
if not isinstance(filth, Filth):
raise TypeError('iter_filth must always yield Filth') # depends on [control=['if'], data=[]]
all_filths.append(filth) # depends on [control=['for'], data=['filth']] # depends on [control=['for'], data=['detector']]
# Sort by start position. If two filths start in the same place then
# return the longer one first
all_filths.sort(key=lambda f: (f.beg, -f.end))
# this is where the Scrubber does its hard work and merges any
# overlapping filths.
if not all_filths:
raise StopIteration # depends on [control=['if'], data=[]]
filth = all_filths[0]
for next_filth in all_filths[1:]:
if filth.end < next_filth.beg:
yield filth
filth = next_filth # depends on [control=['if'], data=[]]
else:
filth = filth.merge(next_filth) # depends on [control=['for'], data=['next_filth']]
yield filth |
def update(self, ns, docid, raw, **kw):
""" Perform a single update operation.
{'docid': ObjectId('4e95ae3616692111bb000001'),
'ns': u'mydb.tweets',
'raw': {u'h': -5295451122737468990L,
u'ns': u'mydb.tweets',
u'o': {u'$set': {u'content': u'Lorem ipsum'}},
u'o2': {u'_id': ObjectId('4e95ae3616692111bb000001')},
u'op': u'u',
u'ts': Timestamp(1318432339, 1)}}
"""
self._dest_coll(ns).update(raw['o2'], raw['o'], safe=True) | def function[update, parameter[self, ns, docid, raw]]:
constant[ Perform a single update operation.
{'docid': ObjectId('4e95ae3616692111bb000001'),
'ns': u'mydb.tweets',
'raw': {u'h': -5295451122737468990L,
u'ns': u'mydb.tweets',
u'o': {u'$set': {u'content': u'Lorem ipsum'}},
u'o2': {u'_id': ObjectId('4e95ae3616692111bb000001')},
u'op': u'u',
u'ts': Timestamp(1318432339, 1)}}
]
call[call[name[self]._dest_coll, parameter[name[ns]]].update, parameter[call[name[raw]][constant[o2]], call[name[raw]][constant[o]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[ns] , identifier[docid] , identifier[raw] ,** identifier[kw] ):
literal[string]
identifier[self] . identifier[_dest_coll] ( identifier[ns] ). identifier[update] ( identifier[raw] [ literal[string] ], identifier[raw] [ literal[string] ], identifier[safe] = keyword[True] ) | def update(self, ns, docid, raw, **kw):
""" Perform a single update operation.
{'docid': ObjectId('4e95ae3616692111bb000001'),
'ns': u'mydb.tweets',
'raw': {u'h': -5295451122737468990L,
u'ns': u'mydb.tweets',
u'o': {u'$set': {u'content': u'Lorem ipsum'}},
u'o2': {u'_id': ObjectId('4e95ae3616692111bb000001')},
u'op': u'u',
u'ts': Timestamp(1318432339, 1)}}
"""
self._dest_coll(ns).update(raw['o2'], raw['o'], safe=True) |
def process_single_message_from_queue(self):
"""
Tries to read a single message from the queue and let the associated task process it.
:return: bool: True if we processed a message, otherwise False
"""
try:
message = self.message_queue.get_nowait()
task_id, data = message
task = self.task_id_to_task[task_id]
task.on_message(data)
return True
except queue.Empty:
return False | def function[process_single_message_from_queue, parameter[self]]:
constant[
Tries to read a single message from the queue and let the associated task process it.
:return: bool: True if we processed a message, otherwise False
]
<ast.Try object at 0x7da18bc718a0> | keyword[def] identifier[process_single_message_from_queue] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[message] = identifier[self] . identifier[message_queue] . identifier[get_nowait] ()
identifier[task_id] , identifier[data] = identifier[message]
identifier[task] = identifier[self] . identifier[task_id_to_task] [ identifier[task_id] ]
identifier[task] . identifier[on_message] ( identifier[data] )
keyword[return] keyword[True]
keyword[except] identifier[queue] . identifier[Empty] :
keyword[return] keyword[False] | def process_single_message_from_queue(self):
"""
Tries to read a single message from the queue and let the associated task process it.
:return: bool: True if we processed a message, otherwise False
"""
try:
message = self.message_queue.get_nowait()
(task_id, data) = message
task = self.task_id_to_task[task_id]
task.on_message(data)
return True # depends on [control=['try'], data=[]]
except queue.Empty:
return False # depends on [control=['except'], data=[]] |
def char_spacing(self, dots):
'''Specifes character spacing in dots.
Args:
dots: the character spacing you desire, in dots
Returns:
None
Raises:
RuntimeError: Invalid dot amount.
'''
if dots in range(0,127):
self.send(chr(27)+chr(32)+chr(dots))
else:
raise RuntimeError('Invalid dot amount in function charSpacing') | def function[char_spacing, parameter[self, dots]]:
constant[Specifes character spacing in dots.
Args:
dots: the character spacing you desire, in dots
Returns:
None
Raises:
RuntimeError: Invalid dot amount.
]
if compare[name[dots] in call[name[range], parameter[constant[0], constant[127]]]] begin[:]
call[name[self].send, parameter[binary_operation[binary_operation[call[name[chr], parameter[constant[27]]] + call[name[chr], parameter[constant[32]]]] + call[name[chr], parameter[name[dots]]]]]] | keyword[def] identifier[char_spacing] ( identifier[self] , identifier[dots] ):
literal[string]
keyword[if] identifier[dots] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[self] . identifier[send] ( identifier[chr] ( literal[int] )+ identifier[chr] ( literal[int] )+ identifier[chr] ( identifier[dots] ))
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def char_spacing(self, dots):
"""Specifes character spacing in dots.
Args:
dots: the character spacing you desire, in dots
Returns:
None
Raises:
RuntimeError: Invalid dot amount.
"""
if dots in range(0, 127):
self.send(chr(27) + chr(32) + chr(dots)) # depends on [control=['if'], data=['dots']]
else:
raise RuntimeError('Invalid dot amount in function charSpacing') |
def visit(self, obj):
"""Visit a node or a list of nodes. Other values are ignored"""
if isinstance(obj, list):
return list(filter(lambda x: x is not None, map(self.visit, obj)))
elif isinstance(obj, ast.AST):
return self._visit_one(obj)
else:
return obj | def function[visit, parameter[self, obj]]:
constant[Visit a node or a list of nodes. Other values are ignored]
if call[name[isinstance], parameter[name[obj], name[list]]] begin[:]
return[call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da18dc9ab90>, call[name[map], parameter[name[self].visit, name[obj]]]]]]]] | keyword[def] identifier[visit] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[list] ):
keyword[return] identifier[list] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] keyword[is] keyword[not] keyword[None] , identifier[map] ( identifier[self] . identifier[visit] , identifier[obj] )))
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[ast] . identifier[AST] ):
keyword[return] identifier[self] . identifier[_visit_one] ( identifier[obj] )
keyword[else] :
keyword[return] identifier[obj] | def visit(self, obj):
"""Visit a node or a list of nodes. Other values are ignored"""
if isinstance(obj, list):
return list(filter(lambda x: x is not None, map(self.visit, obj))) # depends on [control=['if'], data=[]]
elif isinstance(obj, ast.AST):
return self._visit_one(obj) # depends on [control=['if'], data=[]]
else:
return obj |
def linescore(self):
"""Returns the linescore for the game as a DataFrame."""
doc = self.get_main_doc()
table = doc('table#line_score')
columns = [th.text() for th in table('tr.thead').items('th')]
columns[0] = 'team_id'
data = [
[sportsref.utils.flatten_links(td) for td in tr('td').items()]
for tr in table('tr.thead').next_all('tr').items()
]
return pd.DataFrame(data, index=['away', 'home'],
columns=columns, dtype='float') | def function[linescore, parameter[self]]:
constant[Returns the linescore for the game as a DataFrame.]
variable[doc] assign[=] call[name[self].get_main_doc, parameter[]]
variable[table] assign[=] call[name[doc], parameter[constant[table#line_score]]]
variable[columns] assign[=] <ast.ListComp object at 0x7da20c76ca90>
call[name[columns]][constant[0]] assign[=] constant[team_id]
variable[data] assign[=] <ast.ListComp object at 0x7da20c76da80>
return[call[name[pd].DataFrame, parameter[name[data]]]] | keyword[def] identifier[linescore] ( identifier[self] ):
literal[string]
identifier[doc] = identifier[self] . identifier[get_main_doc] ()
identifier[table] = identifier[doc] ( literal[string] )
identifier[columns] =[ identifier[th] . identifier[text] () keyword[for] identifier[th] keyword[in] identifier[table] ( literal[string] ). identifier[items] ( literal[string] )]
identifier[columns] [ literal[int] ]= literal[string]
identifier[data] =[
[ identifier[sportsref] . identifier[utils] . identifier[flatten_links] ( identifier[td] ) keyword[for] identifier[td] keyword[in] identifier[tr] ( literal[string] ). identifier[items] ()]
keyword[for] identifier[tr] keyword[in] identifier[table] ( literal[string] ). identifier[next_all] ( literal[string] ). identifier[items] ()
]
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[data] , identifier[index] =[ literal[string] , literal[string] ],
identifier[columns] = identifier[columns] , identifier[dtype] = literal[string] ) | def linescore(self):
"""Returns the linescore for the game as a DataFrame."""
doc = self.get_main_doc()
table = doc('table#line_score')
columns = [th.text() for th in table('tr.thead').items('th')]
columns[0] = 'team_id'
data = [[sportsref.utils.flatten_links(td) for td in tr('td').items()] for tr in table('tr.thead').next_all('tr').items()]
return pd.DataFrame(data, index=['away', 'home'], columns=columns, dtype='float') |
def _handle_response(self):
"""
returns RESTBase response if appropriate
"""
content = self.cache['restbase']['info']['content-type']
if content.startswith('text/html'):
html = self.cache['restbase']['response']
if isinstance(html, bytes):
html = html.decode('utf-8')
self.data['html'] = html
return
response = self._load_response('restbase')
http_status = self.cache['restbase']['info']['status']
if http_status == 404:
raise LookupError(self.cache['restbase']['query'])
if self.params.get('endpoint') == '/page/':
msg = "RESTBase /page/ entry points: %s" % response.get('items')
utils.stderr(msg)
del self.cache['restbase']
return
return response | def function[_handle_response, parameter[self]]:
constant[
returns RESTBase response if appropriate
]
variable[content] assign[=] call[call[call[name[self].cache][constant[restbase]]][constant[info]]][constant[content-type]]
if call[name[content].startswith, parameter[constant[text/html]]] begin[:]
variable[html] assign[=] call[call[name[self].cache][constant[restbase]]][constant[response]]
if call[name[isinstance], parameter[name[html], name[bytes]]] begin[:]
variable[html] assign[=] call[name[html].decode, parameter[constant[utf-8]]]
call[name[self].data][constant[html]] assign[=] name[html]
return[None]
variable[response] assign[=] call[name[self]._load_response, parameter[constant[restbase]]]
variable[http_status] assign[=] call[call[call[name[self].cache][constant[restbase]]][constant[info]]][constant[status]]
if compare[name[http_status] equal[==] constant[404]] begin[:]
<ast.Raise object at 0x7da1b1294970>
if compare[call[name[self].params.get, parameter[constant[endpoint]]] equal[==] constant[/page/]] begin[:]
variable[msg] assign[=] binary_operation[constant[RESTBase /page/ entry points: %s] <ast.Mod object at 0x7da2590d6920> call[name[response].get, parameter[constant[items]]]]
call[name[utils].stderr, parameter[name[msg]]]
<ast.Delete object at 0x7da1b12949a0>
return[None]
return[name[response]] | keyword[def] identifier[_handle_response] ( identifier[self] ):
literal[string]
identifier[content] = identifier[self] . identifier[cache] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[if] identifier[content] . identifier[startswith] ( literal[string] ):
identifier[html] = identifier[self] . identifier[cache] [ literal[string] ][ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[html] , identifier[bytes] ):
identifier[html] = identifier[html] . identifier[decode] ( literal[string] )
identifier[self] . identifier[data] [ literal[string] ]= identifier[html]
keyword[return]
identifier[response] = identifier[self] . identifier[_load_response] ( literal[string] )
identifier[http_status] = identifier[self] . identifier[cache] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[if] identifier[http_status] == literal[int] :
keyword[raise] identifier[LookupError] ( identifier[self] . identifier[cache] [ literal[string] ][ literal[string] ])
keyword[if] identifier[self] . identifier[params] . identifier[get] ( literal[string] )== literal[string] :
identifier[msg] = literal[string] % identifier[response] . identifier[get] ( literal[string] )
identifier[utils] . identifier[stderr] ( identifier[msg] )
keyword[del] identifier[self] . identifier[cache] [ literal[string] ]
keyword[return]
keyword[return] identifier[response] | def _handle_response(self):
"""
returns RESTBase response if appropriate
"""
content = self.cache['restbase']['info']['content-type']
if content.startswith('text/html'):
html = self.cache['restbase']['response']
if isinstance(html, bytes):
html = html.decode('utf-8') # depends on [control=['if'], data=[]]
self.data['html'] = html
return # depends on [control=['if'], data=[]]
response = self._load_response('restbase')
http_status = self.cache['restbase']['info']['status']
if http_status == 404:
raise LookupError(self.cache['restbase']['query']) # depends on [control=['if'], data=[]]
if self.params.get('endpoint') == '/page/':
msg = 'RESTBase /page/ entry points: %s' % response.get('items')
utils.stderr(msg)
del self.cache['restbase']
return # depends on [control=['if'], data=[]]
return response |
def collision_encode(self, src, id, action, threat_level, time_to_minimum_delta, altitude_minimum_delta, horizontal_minimum_delta):
'''
Information about a potential collision
src : Collision data source (uint8_t)
id : Unique identifier, domain based on src field (uint32_t)
action : Action that is being taken to avoid this collision (uint8_t)
threat_level : How concerned the aircraft is about this collision (uint8_t)
time_to_minimum_delta : Estimated time until collision occurs (seconds) (float)
altitude_minimum_delta : Closest vertical distance in meters between vehicle and object (float)
horizontal_minimum_delta : Closest horizontal distance in meteres between vehicle and object (float)
'''
return MAVLink_collision_message(src, id, action, threat_level, time_to_minimum_delta, altitude_minimum_delta, horizontal_minimum_delta) | def function[collision_encode, parameter[self, src, id, action, threat_level, time_to_minimum_delta, altitude_minimum_delta, horizontal_minimum_delta]]:
constant[
Information about a potential collision
src : Collision data source (uint8_t)
id : Unique identifier, domain based on src field (uint32_t)
action : Action that is being taken to avoid this collision (uint8_t)
threat_level : How concerned the aircraft is about this collision (uint8_t)
time_to_minimum_delta : Estimated time until collision occurs (seconds) (float)
altitude_minimum_delta : Closest vertical distance in meters between vehicle and object (float)
horizontal_minimum_delta : Closest horizontal distance in meteres between vehicle and object (float)
]
return[call[name[MAVLink_collision_message], parameter[name[src], name[id], name[action], name[threat_level], name[time_to_minimum_delta], name[altitude_minimum_delta], name[horizontal_minimum_delta]]]] | keyword[def] identifier[collision_encode] ( identifier[self] , identifier[src] , identifier[id] , identifier[action] , identifier[threat_level] , identifier[time_to_minimum_delta] , identifier[altitude_minimum_delta] , identifier[horizontal_minimum_delta] ):
literal[string]
keyword[return] identifier[MAVLink_collision_message] ( identifier[src] , identifier[id] , identifier[action] , identifier[threat_level] , identifier[time_to_minimum_delta] , identifier[altitude_minimum_delta] , identifier[horizontal_minimum_delta] ) | def collision_encode(self, src, id, action, threat_level, time_to_minimum_delta, altitude_minimum_delta, horizontal_minimum_delta):
"""
Information about a potential collision
src : Collision data source (uint8_t)
id : Unique identifier, domain based on src field (uint32_t)
action : Action that is being taken to avoid this collision (uint8_t)
threat_level : How concerned the aircraft is about this collision (uint8_t)
time_to_minimum_delta : Estimated time until collision occurs (seconds) (float)
altitude_minimum_delta : Closest vertical distance in meters between vehicle and object (float)
horizontal_minimum_delta : Closest horizontal distance in meteres between vehicle and object (float)
"""
return MAVLink_collision_message(src, id, action, threat_level, time_to_minimum_delta, altitude_minimum_delta, horizontal_minimum_delta) |
def add_variable(self, v, bias, vartype=None):
"""Add variable v and/or its bias to a binary quadratic model.
Args:
v (variable):
The variable to add to the model. Can be any python object
that is a valid dict key.
bias (bias):
Linear bias associated with v. If v is already in the model, this value is added
to its current linear bias. Many methods and functions expect `bias` to be a number
but this is not explicitly checked.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates an Ising model with two variables, adds a third,
and adds to the linear biases of the initial two.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({0: 0.0, 1: 1.0}, {(0, 1): 0.5}, -0.5, dimod.SPIN)
>>> len(bqm.linear)
2
>>> bqm.add_variable(2, 2.0, vartype=dimod.SPIN) # Add a new variable
>>> bqm.add_variable(1, 0.33, vartype=dimod.SPIN)
>>> bqm.add_variable(0, 0.33, vartype=dimod.BINARY) # Binary value is converted to spin value
>>> len(bqm.linear)
3
>>> bqm.linear[1]
1.33
"""
# handle the case that a different vartype is provided
if vartype is not None and vartype is not self.vartype:
if self.vartype is Vartype.SPIN and vartype is Vartype.BINARY:
# convert from binary to spin
bias /= 2
self.offset += bias
elif self.vartype is Vartype.BINARY and vartype is Vartype.SPIN:
# convert from spin to binary
self.offset -= bias
bias *= 2
else:
raise ValueError("unknown vartype")
# we used to do this using self.linear but working directly with _adj
# is much faster
_adj = self._adj
if v in _adj:
if v in _adj[v]:
_adj[v][v] += bias
else:
_adj[v][v] = bias
else:
_adj[v] = {v: bias}
try:
self._counterpart.add_variable(v, bias, vartype=self.vartype)
except AttributeError:
pass | def function[add_variable, parameter[self, v, bias, vartype]]:
constant[Add variable v and/or its bias to a binary quadratic model.
Args:
v (variable):
The variable to add to the model. Can be any python object
that is a valid dict key.
bias (bias):
Linear bias associated with v. If v is already in the model, this value is added
to its current linear bias. Many methods and functions expect `bias` to be a number
but this is not explicitly checked.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates an Ising model with two variables, adds a third,
and adds to the linear biases of the initial two.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({0: 0.0, 1: 1.0}, {(0, 1): 0.5}, -0.5, dimod.SPIN)
>>> len(bqm.linear)
2
>>> bqm.add_variable(2, 2.0, vartype=dimod.SPIN) # Add a new variable
>>> bqm.add_variable(1, 0.33, vartype=dimod.SPIN)
>>> bqm.add_variable(0, 0.33, vartype=dimod.BINARY) # Binary value is converted to spin value
>>> len(bqm.linear)
3
>>> bqm.linear[1]
1.33
]
if <ast.BoolOp object at 0x7da1b0760c70> begin[:]
if <ast.BoolOp object at 0x7da1b0761210> begin[:]
<ast.AugAssign object at 0x7da1b0762a10>
<ast.AugAssign object at 0x7da1b0761240>
variable[_adj] assign[=] name[self]._adj
if compare[name[v] in name[_adj]] begin[:]
if compare[name[v] in call[name[_adj]][name[v]]] begin[:]
<ast.AugAssign object at 0x7da1b0763c10>
<ast.Try object at 0x7da1b07f5630> | keyword[def] identifier[add_variable] ( identifier[self] , identifier[v] , identifier[bias] , identifier[vartype] = keyword[None] ):
literal[string]
keyword[if] identifier[vartype] keyword[is] keyword[not] keyword[None] keyword[and] identifier[vartype] keyword[is] keyword[not] identifier[self] . identifier[vartype] :
keyword[if] identifier[self] . identifier[vartype] keyword[is] identifier[Vartype] . identifier[SPIN] keyword[and] identifier[vartype] keyword[is] identifier[Vartype] . identifier[BINARY] :
identifier[bias] /= literal[int]
identifier[self] . identifier[offset] += identifier[bias]
keyword[elif] identifier[self] . identifier[vartype] keyword[is] identifier[Vartype] . identifier[BINARY] keyword[and] identifier[vartype] keyword[is] identifier[Vartype] . identifier[SPIN] :
identifier[self] . identifier[offset] -= identifier[bias]
identifier[bias] *= literal[int]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[_adj] = identifier[self] . identifier[_adj]
keyword[if] identifier[v] keyword[in] identifier[_adj] :
keyword[if] identifier[v] keyword[in] identifier[_adj] [ identifier[v] ]:
identifier[_adj] [ identifier[v] ][ identifier[v] ]+= identifier[bias]
keyword[else] :
identifier[_adj] [ identifier[v] ][ identifier[v] ]= identifier[bias]
keyword[else] :
identifier[_adj] [ identifier[v] ]={ identifier[v] : identifier[bias] }
keyword[try] :
identifier[self] . identifier[_counterpart] . identifier[add_variable] ( identifier[v] , identifier[bias] , identifier[vartype] = identifier[self] . identifier[vartype] )
keyword[except] identifier[AttributeError] :
keyword[pass] | def add_variable(self, v, bias, vartype=None):
"""Add variable v and/or its bias to a binary quadratic model.
Args:
v (variable):
The variable to add to the model. Can be any python object
that is a valid dict key.
bias (bias):
Linear bias associated with v. If v is already in the model, this value is added
to its current linear bias. Many methods and functions expect `bias` to be a number
but this is not explicitly checked.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates an Ising model with two variables, adds a third,
and adds to the linear biases of the initial two.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({0: 0.0, 1: 1.0}, {(0, 1): 0.5}, -0.5, dimod.SPIN)
>>> len(bqm.linear)
2
>>> bqm.add_variable(2, 2.0, vartype=dimod.SPIN) # Add a new variable
>>> bqm.add_variable(1, 0.33, vartype=dimod.SPIN)
>>> bqm.add_variable(0, 0.33, vartype=dimod.BINARY) # Binary value is converted to spin value
>>> len(bqm.linear)
3
>>> bqm.linear[1]
1.33
"""
# handle the case that a different vartype is provided
if vartype is not None and vartype is not self.vartype:
if self.vartype is Vartype.SPIN and vartype is Vartype.BINARY:
# convert from binary to spin
bias /= 2
self.offset += bias # depends on [control=['if'], data=[]]
elif self.vartype is Vartype.BINARY and vartype is Vartype.SPIN:
# convert from spin to binary
self.offset -= bias
bias *= 2 # depends on [control=['if'], data=[]]
else:
raise ValueError('unknown vartype') # depends on [control=['if'], data=[]]
# we used to do this using self.linear but working directly with _adj
# is much faster
_adj = self._adj
if v in _adj:
if v in _adj[v]:
_adj[v][v] += bias # depends on [control=['if'], data=['v']]
else:
_adj[v][v] = bias # depends on [control=['if'], data=['v', '_adj']]
else:
_adj[v] = {v: bias}
try:
self._counterpart.add_variable(v, bias, vartype=self.vartype) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] |
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time' | def function[apply_widget_options, parameter[self, field_name]]:
constant[
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
]
variable[widget] assign[=] call[name[self].fields][name[field_name]].widget
if call[name[isinstance], parameter[name[widget], name[forms].DateInput]] begin[:]
name[widget].input_type assign[=] constant[date]
if call[name[isinstance], parameter[name[widget], name[forms].TimeInput]] begin[:]
name[widget].input_type assign[=] constant[time]
if call[name[isinstance], parameter[name[widget], name[forms].SplitDateTimeWidget]] begin[:]
call[name[widget].widgets][constant[0]].input_type assign[=] constant[date]
call[name[widget].widgets][constant[1]].input_type assign[=] constant[time] | keyword[def] identifier[apply_widget_options] ( identifier[self] , identifier[field_name] ):
literal[string]
identifier[widget] = identifier[self] . identifier[fields] [ identifier[field_name] ]. identifier[widget]
keyword[if] identifier[isinstance] ( identifier[widget] , identifier[forms] . identifier[DateInput] ):
identifier[widget] . identifier[input_type] = literal[string]
keyword[if] identifier[isinstance] ( identifier[widget] , identifier[forms] . identifier[TimeInput] ):
identifier[widget] . identifier[input_type] = literal[string]
keyword[if] identifier[isinstance] ( identifier[widget] , identifier[forms] . identifier[SplitDateTimeWidget] ):
identifier[widget] . identifier[widgets] [ literal[int] ]. identifier[input_type] = literal[string]
identifier[widget] . identifier[widgets] [ literal[int] ]. identifier[input_type] = literal[string] | def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date' # depends on [control=['if'], data=[]]
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time' # depends on [control=['if'], data=[]]
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time' # depends on [control=['if'], data=[]] |
def _cryptodome_encrypt(cipher_factory, plaintext, key, iv):
"""Use a Pycryptodome cipher factory to encrypt data.
:param cipher_factory: Factory callable that builds a Pycryptodome Cipher
instance based on the key and IV
:type cipher_factory: callable
:param bytes plaintext: Plaintext data to encrypt
:param bytes key: Encryption key
:param bytes IV: Initialization vector
:returns: Encrypted ciphertext
:rtype: bytes
"""
encryptor = cipher_factory(key, iv)
return encryptor.encrypt(plaintext) | def function[_cryptodome_encrypt, parameter[cipher_factory, plaintext, key, iv]]:
constant[Use a Pycryptodome cipher factory to encrypt data.
:param cipher_factory: Factory callable that builds a Pycryptodome Cipher
instance based on the key and IV
:type cipher_factory: callable
:param bytes plaintext: Plaintext data to encrypt
:param bytes key: Encryption key
:param bytes IV: Initialization vector
:returns: Encrypted ciphertext
:rtype: bytes
]
variable[encryptor] assign[=] call[name[cipher_factory], parameter[name[key], name[iv]]]
return[call[name[encryptor].encrypt, parameter[name[plaintext]]]] | keyword[def] identifier[_cryptodome_encrypt] ( identifier[cipher_factory] , identifier[plaintext] , identifier[key] , identifier[iv] ):
literal[string]
identifier[encryptor] = identifier[cipher_factory] ( identifier[key] , identifier[iv] )
keyword[return] identifier[encryptor] . identifier[encrypt] ( identifier[plaintext] ) | def _cryptodome_encrypt(cipher_factory, plaintext, key, iv):
"""Use a Pycryptodome cipher factory to encrypt data.
:param cipher_factory: Factory callable that builds a Pycryptodome Cipher
instance based on the key and IV
:type cipher_factory: callable
:param bytes plaintext: Plaintext data to encrypt
:param bytes key: Encryption key
:param bytes IV: Initialization vector
:returns: Encrypted ciphertext
:rtype: bytes
"""
encryptor = cipher_factory(key, iv)
return encryptor.encrypt(plaintext) |
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return '\n\n'.join(
codeobj.pretty_str(indent=indent)
for codeobj in self.children
) | def function[pretty_str, parameter[self, indent]]:
constant[Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
]
return[call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da18eb57cd0>]]] | keyword[def] identifier[pretty_str] ( identifier[self] , identifier[indent] = literal[int] ):
literal[string]
keyword[return] literal[string] . identifier[join] (
identifier[codeobj] . identifier[pretty_str] ( identifier[indent] = identifier[indent] )
keyword[for] identifier[codeobj] keyword[in] identifier[self] . identifier[children]
) | def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return '\n\n'.join((codeobj.pretty_str(indent=indent) for codeobj in self.children)) |
def create_Variable(self,name,value,dimensions,toCreate=None,createDim=None,extend=True):
"""
create_Variable : This function adds data to :class:`altimetry.data.hydro_data`
:parameter name: name of the parameter to create
:parameter value: values associated to the variable. Must be a numpy masked_array or a data structure.
:parameter dimensions: dimensional structure (cf. notes).
.. _structures:
.. note:: altimetry tools package handles the NetCDF data using specific structures.
NetCDF data is structured this way:
.. code-block:: python
:emphasize-lines: 1,3
NetCDF_data = {'_dimensions':dimension_structure, #File dimensions (COMPULSORY)
'_attributes':attribute_structure, #Global attributes
'dimension_1':data_structure, #Data associated to the dimensions. (COMPULSORY)
...,
'variable_1':data_structure, #Variables
...
}
In standard NetCDF files, dimensions are always associated to a variable.
If it is not the case, an array of indices the length of the dimension is generated and a warning is issued.
Moreover, dimensions MUST be defined to be accepted by :class:`altimetry.tools.nctools.nc` (empty NetCDF files would fail).
* a dimensional structure should be of the form :
.. code-block:: python
dimension_structure = {'_ndims':N, #Attribute setting the number of dimensions.
'dims':{'dim_A':A, #Structure containing the name
'dim_B':B, #of the dimensions and their size.
...,
'dim_N':N
}
}
* an attribute structure is a very simple structure containing the attribute names and values:
.. code-block:: python
data_structure = {'attribute_1':attribute_1,
...,
'attribute_N':attribute_N}
* a data structure should be of the form :
.. code-block:: python
:emphasize-lines: 1-2
data_structure = {'_dimensions':dimension_structure, #dimensions of hte variable (COMPULSORY)
'data':data, #data associated to the variable (COMPULSORY)
'long_name':long_name, #Variable attributes
'units':units,
...
}
DATA and _DIMENSIONS fields are compulsory.
Other fields are optional and will be treated as attributes.
Furthermore, code will have a special look at **scale**, **scale_factor** and **add_offset** while reading and writing data and to **_FillValue** and missing_value while reading (_FillValue being automatically filled by :class:`NetCDF4.Dataset` when writing)
"""
#Check variable name
####################
#This allows to remove impossible variable names
#!!!! This is not a good solution
name=name.replace('.','_')
#Check if data is structured or not
isStructure = True if isinstance(value,dict) else False
#Get dimensions
dimName = np.array(dimensions.keys())
dimVal = np.array(dimensions.values())
keys=np.array(self._dimensions.keys())
# if createDim is None : createDim = self._dimensions.has_key(dimName[0])
createDim = np.array([not self._dimensions.has_key(dim) for dim in dimName]) if createDim is None else np.array(createDim)
if toCreate is None : toCreate = np.sum(self.par_list == name) == 0
self.message(3,'Loading {0} ({1}:{2}) from {3}'.format(name,dimName,dimVal,os.path.basename(self._filename)))
#Cast variable into masked array first
######################################
if (not isinstance(value['data'],np.ma.core.MaskedArray) if isStructure else not isinstance(value,np.ma.core.MaskedArray)) :
value['data'] = np.ma.masked_array(value['data'],mask=np.zeros(tuple(dimVal),dtype='bool')) if isStructure else np.ma.masked_array(value,mask=np.zeros(tuple(dimVal),dtype='bool'))
self.message(4,'Casting variable to np.ma.MaskedArray')
#Restructure dataset if structure
if isStructure :
dumvalue=value.pop('data')
if value.has_key('_attributes'):
for a in value['_attributes'].keys():
self.message(4, "copying attribute %s" % a)
dumvalue.__setattr__(a,value['_attributes'][a])
value=copy.deepcopy(dumvalue)
curDim, nself=self.get_currentDim()
curInd=np.array(where_list(dimName,curDim[0]))
curDimVal=np.array(where_list(dimVal,curDim[1]))
existDims= (curInd != -1)
createDim = (curInd == -1)
createInd = np.where(createDim)[0]
appendDim=existDims & (curDimVal == -1)
appendInd=curInd[appendDim]
# curInd = set(atools.where_list(dimVal,curDim[1])).intersection(set(atools.where_list(dimName,curDim[0])))
#Get dims to be created
#######################
#Choose case between all different solutions :
##############################################
# 1: create a new variable with at least 1 new dimension
# 2: extend -> create a new variable using existing dimensions
# 3: append exisiting variable with data
# 4: impossible case ?
#1) Create variable
if createDim.any() & toCreate :
#Create Variable
self.message(4,'Create variable %s '+name)
# self.__setattr__(name,value)
# cmd='self.'+name+'=value'
#Append variable infos to object
self.par_list=np.append(self.par_list,name)
dimlist_cp=self.dim_list.tolist()
dimlist_cp.append(dimName.tolist())
self.dim_list=np.array(dimlist_cp) #np.append(self.dim_list,dimName.tolist())
updateDim=False
#2) Extend
elif (not createDim.any()) & toCreate :
#extend variable
if extend :
dumvalue = np.ma.masked_array(np.append(np.zeros(curDim[1][curInd]),value.data),mask=np.append(np.ones(curDim[1][curInd],dtype='bool'),value.mask))
for a in set(value.__dict__.keys()).difference(dumvalue.__dict__.keys()) :
dumvalue.__setattr__(a,value.__dict__[a] if hasattr(value, a) else self.__getattribute__(name).__getattribute__(a))
value=copy.deepcopy(dumvalue)
self.message(4,'Extend variable '+name)
# self.__setattr__(name,value)
# cmd='self.'+name+'=value'
# self.message(4,'exec : '+cmd)
#Append variable infos to object
self.par_list=np.append(self.par_list,name)
dimlist_cp=self.dim_list.tolist()
dimlist_cp.append(dimName.tolist())
self.dim_list=np.array(dimlist_cp)
# self.dim_list=np.append(self.dim_list,dimName)
updateDim=True
#3) Append
elif (not createDim.any()) & (not toCreate) :
#append variable
self.message(4,'Append data to variable '+name)
dumvalue = np.ma.masked_array(np.append(self.__getattribute__(name).data,value.data),mask=np.append(self.__getattribute__(name).mask,value.mask))
#We gather a list of attributes :
# - already in data structure,
# - in current data file
# - and not in output structure
attributes=set(self.__getattribute__(name).__dict__.keys())
attributes=attributes.union(value.__dict__.keys())
# attributes=attributes.difference(self.__getattribute__(name).__dict__.keys())
attributes=attributes.difference(dumvalue.__dict__.keys())
#Then :
# - we add attributes of current file not in data structure
# - we keep attributes of current data structure if they exist
for a in attributes :
dumvalue.__setattr__(a,value.__dict__[a] if hasattr(value, a) else self.__getattribute__(name).__getattribute__(a))
value=copy.deepcopy(dumvalue)
updateDim=True
elif createDim.any() & (not toCreate) :
#Impossible case ?
self.Error('Impossible case : create dimensions and variable {0} already existing'.format(name))
#Append dimensions to variable
if not dimensions.has_key('_ndims') :
dumDim=dimStr(dimensions)
dimensions=dumDim.copy()
#Update variable dimensions
if updateDim :
for k in dimensions.keys(): dimensions.update({k:self._dimensions[k]})
value.__setattr__('_dimensions',dimensions)
try : self.__setattr__(name,value)
except np.ma.core.MaskError : raise 'mask error'
#
# try : exec(cmd)
# except np.ma.core.MaskError :
# raise 'mask error'
# exec(cmd)
return updateDim | def function[create_Variable, parameter[self, name, value, dimensions, toCreate, createDim, extend]]:
constant[
create_Variable : This function adds data to :class:`altimetry.data.hydro_data`
:parameter name: name of the parameter to create
:parameter value: values associated to the variable. Must be a numpy masked_array or a data structure.
:parameter dimensions: dimensional structure (cf. notes).
.. _structures:
.. note:: altimetry tools package handles the NetCDF data using specific structures.
NetCDF data is structured this way:
.. code-block:: python
:emphasize-lines: 1,3
NetCDF_data = {'_dimensions':dimension_structure, #File dimensions (COMPULSORY)
'_attributes':attribute_structure, #Global attributes
'dimension_1':data_structure, #Data associated to the dimensions. (COMPULSORY)
...,
'variable_1':data_structure, #Variables
...
}
In standard NetCDF files, dimensions are always associated to a variable.
If it is not the case, an array of indices the length of the dimension is generated and a warning is issued.
Moreover, dimensions MUST be defined to be accepted by :class:`altimetry.tools.nctools.nc` (empty NetCDF files would fail).
* a dimensional structure should be of the form :
.. code-block:: python
dimension_structure = {'_ndims':N, #Attribute setting the number of dimensions.
'dims':{'dim_A':A, #Structure containing the name
'dim_B':B, #of the dimensions and their size.
...,
'dim_N':N
}
}
* an attribute structure is a very simple structure containing the attribute names and values:
.. code-block:: python
data_structure = {'attribute_1':attribute_1,
...,
'attribute_N':attribute_N}
* a data structure should be of the form :
.. code-block:: python
:emphasize-lines: 1-2
data_structure = {'_dimensions':dimension_structure, #dimensions of hte variable (COMPULSORY)
'data':data, #data associated to the variable (COMPULSORY)
'long_name':long_name, #Variable attributes
'units':units,
...
}
DATA and _DIMENSIONS fields are compulsory.
Other fields are optional and will be treated as attributes.
Furthermore, code will have a special look at **scale**, **scale_factor** and **add_offset** while reading and writing data and to **_FillValue** and missing_value while reading (_FillValue being automatically filled by :class:`NetCDF4.Dataset` when writing)
]
variable[name] assign[=] call[name[name].replace, parameter[constant[.], constant[_]]]
variable[isStructure] assign[=] <ast.IfExp object at 0x7da1b081bb20>
variable[dimName] assign[=] call[name[np].array, parameter[call[name[dimensions].keys, parameter[]]]]
variable[dimVal] assign[=] call[name[np].array, parameter[call[name[dimensions].values, parameter[]]]]
variable[keys] assign[=] call[name[np].array, parameter[call[name[self]._dimensions.keys, parameter[]]]]
variable[createDim] assign[=] <ast.IfExp object at 0x7da1b081b4c0>
if compare[name[toCreate] is constant[None]] begin[:]
variable[toCreate] assign[=] compare[call[name[np].sum, parameter[compare[name[self].par_list equal[==] name[name]]]] equal[==] constant[0]]
call[name[self].message, parameter[constant[3], call[constant[Loading {0} ({1}:{2}) from {3}].format, parameter[name[name], name[dimName], name[dimVal], call[name[os].path.basename, parameter[name[self]._filename]]]]]]
if <ast.IfExp object at 0x7da1b081aa40> begin[:]
call[name[value]][constant[data]] assign[=] <ast.IfExp object at 0x7da1b081a5c0>
call[name[self].message, parameter[constant[4], constant[Casting variable to np.ma.MaskedArray]]]
if name[isStructure] begin[:]
variable[dumvalue] assign[=] call[name[value].pop, parameter[constant[data]]]
if call[name[value].has_key, parameter[constant[_attributes]]] begin[:]
for taget[name[a]] in starred[call[call[name[value]][constant[_attributes]].keys, parameter[]]] begin[:]
call[name[self].message, parameter[constant[4], binary_operation[constant[copying attribute %s] <ast.Mod object at 0x7da2590d6920> name[a]]]]
call[name[dumvalue].__setattr__, parameter[name[a], call[call[name[value]][constant[_attributes]]][name[a]]]]
variable[value] assign[=] call[name[copy].deepcopy, parameter[name[dumvalue]]]
<ast.Tuple object at 0x7da1b08195a0> assign[=] call[name[self].get_currentDim, parameter[]]
variable[curInd] assign[=] call[name[np].array, parameter[call[name[where_list], parameter[name[dimName], call[name[curDim]][constant[0]]]]]]
variable[curDimVal] assign[=] call[name[np].array, parameter[call[name[where_list], parameter[name[dimVal], call[name[curDim]][constant[1]]]]]]
variable[existDims] assign[=] compare[name[curInd] not_equal[!=] <ast.UnaryOp object at 0x7da1b0818fa0>]
variable[createDim] assign[=] compare[name[curInd] equal[==] <ast.UnaryOp object at 0x7da1b0818e80>]
variable[createInd] assign[=] call[call[name[np].where, parameter[name[createDim]]]][constant[0]]
variable[appendDim] assign[=] binary_operation[name[existDims] <ast.BitAnd object at 0x7da2590d6b60> compare[name[curDimVal] equal[==] <ast.UnaryOp object at 0x7da1b0818b80>]]
variable[appendInd] assign[=] call[name[curInd]][name[appendDim]]
if binary_operation[call[name[createDim].any, parameter[]] <ast.BitAnd object at 0x7da2590d6b60> name[toCreate]] begin[:]
call[name[self].message, parameter[constant[4], binary_operation[constant[Create variable %s ] + name[name]]]]
name[self].par_list assign[=] call[name[np].append, parameter[name[self].par_list, name[name]]]
variable[dimlist_cp] assign[=] call[name[self].dim_list.tolist, parameter[]]
call[name[dimlist_cp].append, parameter[call[name[dimName].tolist, parameter[]]]]
name[self].dim_list assign[=] call[name[np].array, parameter[name[dimlist_cp]]]
variable[updateDim] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da1b0930d60> begin[:]
variable[dumDim] assign[=] call[name[dimStr], parameter[name[dimensions]]]
variable[dimensions] assign[=] call[name[dumDim].copy, parameter[]]
if name[updateDim] begin[:]
for taget[name[k]] in starred[call[name[dimensions].keys, parameter[]]] begin[:]
call[name[dimensions].update, parameter[dictionary[[<ast.Name object at 0x7da1b0930850>], [<ast.Subscript object at 0x7da1b0930820>]]]]
call[name[value].__setattr__, parameter[constant[_dimensions], name[dimensions]]]
<ast.Try object at 0x7da1b09318a0>
return[name[updateDim]] | keyword[def] identifier[create_Variable] ( identifier[self] , identifier[name] , identifier[value] , identifier[dimensions] , identifier[toCreate] = keyword[None] , identifier[createDim] = keyword[None] , identifier[extend] = keyword[True] ):
literal[string]
identifier[name] = identifier[name] . identifier[replace] ( literal[string] , literal[string] )
identifier[isStructure] = keyword[True] keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[else] keyword[False]
identifier[dimName] = identifier[np] . identifier[array] ( identifier[dimensions] . identifier[keys] ())
identifier[dimVal] = identifier[np] . identifier[array] ( identifier[dimensions] . identifier[values] ())
identifier[keys] = identifier[np] . identifier[array] ( identifier[self] . identifier[_dimensions] . identifier[keys] ())
identifier[createDim] = identifier[np] . identifier[array] ([ keyword[not] identifier[self] . identifier[_dimensions] . identifier[has_key] ( identifier[dim] ) keyword[for] identifier[dim] keyword[in] identifier[dimName] ]) keyword[if] identifier[createDim] keyword[is] keyword[None] keyword[else] identifier[np] . identifier[array] ( identifier[createDim] )
keyword[if] identifier[toCreate] keyword[is] keyword[None] : identifier[toCreate] = identifier[np] . identifier[sum] ( identifier[self] . identifier[par_list] == identifier[name] )== literal[int]
identifier[self] . identifier[message] ( literal[int] , literal[string] . identifier[format] ( identifier[name] , identifier[dimName] , identifier[dimVal] , identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[_filename] )))
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[value] [ literal[string] ], identifier[np] . identifier[ma] . identifier[core] . identifier[MaskedArray] ) keyword[if] identifier[isStructure] keyword[else] keyword[not] identifier[isinstance] ( identifier[value] , identifier[np] . identifier[ma] . identifier[core] . identifier[MaskedArray] )):
identifier[value] [ literal[string] ]= identifier[np] . identifier[ma] . identifier[masked_array] ( identifier[value] [ literal[string] ], identifier[mask] = identifier[np] . identifier[zeros] ( identifier[tuple] ( identifier[dimVal] ), identifier[dtype] = literal[string] )) keyword[if] identifier[isStructure] keyword[else] identifier[np] . identifier[ma] . identifier[masked_array] ( identifier[value] , identifier[mask] = identifier[np] . identifier[zeros] ( identifier[tuple] ( identifier[dimVal] ), identifier[dtype] = literal[string] ))
identifier[self] . identifier[message] ( literal[int] , literal[string] )
keyword[if] identifier[isStructure] :
identifier[dumvalue] = identifier[value] . identifier[pop] ( literal[string] )
keyword[if] identifier[value] . identifier[has_key] ( literal[string] ):
keyword[for] identifier[a] keyword[in] identifier[value] [ literal[string] ]. identifier[keys] ():
identifier[self] . identifier[message] ( literal[int] , literal[string] % identifier[a] )
identifier[dumvalue] . identifier[__setattr__] ( identifier[a] , identifier[value] [ literal[string] ][ identifier[a] ])
identifier[value] = identifier[copy] . identifier[deepcopy] ( identifier[dumvalue] )
identifier[curDim] , identifier[nself] = identifier[self] . identifier[get_currentDim] ()
identifier[curInd] = identifier[np] . identifier[array] ( identifier[where_list] ( identifier[dimName] , identifier[curDim] [ literal[int] ]))
identifier[curDimVal] = identifier[np] . identifier[array] ( identifier[where_list] ( identifier[dimVal] , identifier[curDim] [ literal[int] ]))
identifier[existDims] =( identifier[curInd] !=- literal[int] )
identifier[createDim] =( identifier[curInd] ==- literal[int] )
identifier[createInd] = identifier[np] . identifier[where] ( identifier[createDim] )[ literal[int] ]
identifier[appendDim] = identifier[existDims] &( identifier[curDimVal] ==- literal[int] )
identifier[appendInd] = identifier[curInd] [ identifier[appendDim] ]
keyword[if] identifier[createDim] . identifier[any] ()& identifier[toCreate] :
identifier[self] . identifier[message] ( literal[int] , literal[string] + identifier[name] )
identifier[self] . identifier[par_list] = identifier[np] . identifier[append] ( identifier[self] . identifier[par_list] , identifier[name] )
identifier[dimlist_cp] = identifier[self] . identifier[dim_list] . identifier[tolist] ()
identifier[dimlist_cp] . identifier[append] ( identifier[dimName] . identifier[tolist] ())
identifier[self] . identifier[dim_list] = identifier[np] . identifier[array] ( identifier[dimlist_cp] )
identifier[updateDim] = keyword[False]
keyword[elif] ( keyword[not] identifier[createDim] . identifier[any] ())& identifier[toCreate] :
keyword[if] identifier[extend] :
identifier[dumvalue] = identifier[np] . identifier[ma] . identifier[masked_array] ( identifier[np] . identifier[append] ( identifier[np] . identifier[zeros] ( identifier[curDim] [ literal[int] ][ identifier[curInd] ]), identifier[value] . identifier[data] ), identifier[mask] = identifier[np] . identifier[append] ( identifier[np] . identifier[ones] ( identifier[curDim] [ literal[int] ][ identifier[curInd] ], identifier[dtype] = literal[string] ), identifier[value] . identifier[mask] ))
keyword[for] identifier[a] keyword[in] identifier[set] ( identifier[value] . identifier[__dict__] . identifier[keys] ()). identifier[difference] ( identifier[dumvalue] . identifier[__dict__] . identifier[keys] ()):
identifier[dumvalue] . identifier[__setattr__] ( identifier[a] , identifier[value] . identifier[__dict__] [ identifier[a] ] keyword[if] identifier[hasattr] ( identifier[value] , identifier[a] ) keyword[else] identifier[self] . identifier[__getattribute__] ( identifier[name] ). identifier[__getattribute__] ( identifier[a] ))
identifier[value] = identifier[copy] . identifier[deepcopy] ( identifier[dumvalue] )
identifier[self] . identifier[message] ( literal[int] , literal[string] + identifier[name] )
identifier[self] . identifier[par_list] = identifier[np] . identifier[append] ( identifier[self] . identifier[par_list] , identifier[name] )
identifier[dimlist_cp] = identifier[self] . identifier[dim_list] . identifier[tolist] ()
identifier[dimlist_cp] . identifier[append] ( identifier[dimName] . identifier[tolist] ())
identifier[self] . identifier[dim_list] = identifier[np] . identifier[array] ( identifier[dimlist_cp] )
identifier[updateDim] = keyword[True]
keyword[elif] ( keyword[not] identifier[createDim] . identifier[any] ())&( keyword[not] identifier[toCreate] ):
identifier[self] . identifier[message] ( literal[int] , literal[string] + identifier[name] )
identifier[dumvalue] = identifier[np] . identifier[ma] . identifier[masked_array] ( identifier[np] . identifier[append] ( identifier[self] . identifier[__getattribute__] ( identifier[name] ). identifier[data] , identifier[value] . identifier[data] ), identifier[mask] = identifier[np] . identifier[append] ( identifier[self] . identifier[__getattribute__] ( identifier[name] ). identifier[mask] , identifier[value] . identifier[mask] ))
identifier[attributes] = identifier[set] ( identifier[self] . identifier[__getattribute__] ( identifier[name] ). identifier[__dict__] . identifier[keys] ())
identifier[attributes] = identifier[attributes] . identifier[union] ( identifier[value] . identifier[__dict__] . identifier[keys] ())
identifier[attributes] = identifier[attributes] . identifier[difference] ( identifier[dumvalue] . identifier[__dict__] . identifier[keys] ())
keyword[for] identifier[a] keyword[in] identifier[attributes] :
identifier[dumvalue] . identifier[__setattr__] ( identifier[a] , identifier[value] . identifier[__dict__] [ identifier[a] ] keyword[if] identifier[hasattr] ( identifier[value] , identifier[a] ) keyword[else] identifier[self] . identifier[__getattribute__] ( identifier[name] ). identifier[__getattribute__] ( identifier[a] ))
identifier[value] = identifier[copy] . identifier[deepcopy] ( identifier[dumvalue] )
identifier[updateDim] = keyword[True]
keyword[elif] identifier[createDim] . identifier[any] ()&( keyword[not] identifier[toCreate] ):
identifier[self] . identifier[Error] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[if] keyword[not] identifier[dimensions] . identifier[has_key] ( literal[string] ):
identifier[dumDim] = identifier[dimStr] ( identifier[dimensions] )
identifier[dimensions] = identifier[dumDim] . identifier[copy] ()
keyword[if] identifier[updateDim] :
keyword[for] identifier[k] keyword[in] identifier[dimensions] . identifier[keys] (): identifier[dimensions] . identifier[update] ({ identifier[k] : identifier[self] . identifier[_dimensions] [ identifier[k] ]})
identifier[value] . identifier[__setattr__] ( literal[string] , identifier[dimensions] )
keyword[try] : identifier[self] . identifier[__setattr__] ( identifier[name] , identifier[value] )
keyword[except] identifier[np] . identifier[ma] . identifier[core] . identifier[MaskError] : keyword[raise] literal[string]
keyword[return] identifier[updateDim] | def create_Variable(self, name, value, dimensions, toCreate=None, createDim=None, extend=True):
"""
create_Variable : This function adds data to :class:`altimetry.data.hydro_data`
:parameter name: name of the parameter to create
:parameter value: values associated to the variable. Must be a numpy masked_array or a data structure.
:parameter dimensions: dimensional structure (cf. notes).
.. _structures:
.. note:: altimetry tools package handles the NetCDF data using specific structures.
NetCDF data is structured this way:
.. code-block:: python
:emphasize-lines: 1,3
NetCDF_data = {'_dimensions':dimension_structure, #File dimensions (COMPULSORY)
'_attributes':attribute_structure, #Global attributes
'dimension_1':data_structure, #Data associated to the dimensions. (COMPULSORY)
...,
'variable_1':data_structure, #Variables
...
}
In standard NetCDF files, dimensions are always associated to a variable.
If it is not the case, an array of indices the length of the dimension is generated and a warning is issued.
Moreover, dimensions MUST be defined to be accepted by :class:`altimetry.tools.nctools.nc` (empty NetCDF files would fail).
* a dimensional structure should be of the form :
.. code-block:: python
dimension_structure = {'_ndims':N, #Attribute setting the number of dimensions.
'dims':{'dim_A':A, #Structure containing the name
'dim_B':B, #of the dimensions and their size.
...,
'dim_N':N
}
}
* an attribute structure is a very simple structure containing the attribute names and values:
.. code-block:: python
data_structure = {'attribute_1':attribute_1,
...,
'attribute_N':attribute_N}
* a data structure should be of the form :
.. code-block:: python
:emphasize-lines: 1-2
data_structure = {'_dimensions':dimension_structure, #dimensions of hte variable (COMPULSORY)
'data':data, #data associated to the variable (COMPULSORY)
'long_name':long_name, #Variable attributes
'units':units,
...
}
DATA and _DIMENSIONS fields are compulsory.
Other fields are optional and will be treated as attributes.
Furthermore, code will have a special look at **scale**, **scale_factor** and **add_offset** while reading and writing data and to **_FillValue** and missing_value while reading (_FillValue being automatically filled by :class:`NetCDF4.Dataset` when writing)
""" #Check variable name
####################
#This allows to remove impossible variable names
#!!!! This is not a good solution
name = name.replace('.', '_') #Check if data is structured or not
isStructure = True if isinstance(value, dict) else False #Get dimensions
dimName = np.array(dimensions.keys())
dimVal = np.array(dimensions.values())
keys = np.array(self._dimensions.keys()) # if createDim is None : createDim = self._dimensions.has_key(dimName[0])
createDim = np.array([not self._dimensions.has_key(dim) for dim in dimName]) if createDim is None else np.array(createDim)
if toCreate is None:
toCreate = np.sum(self.par_list == name) == 0 # depends on [control=['if'], data=['toCreate']]
self.message(3, 'Loading {0} ({1}:{2}) from {3}'.format(name, dimName, dimVal, os.path.basename(self._filename))) #Cast variable into masked array first
######################################
if not isinstance(value['data'], np.ma.core.MaskedArray) if isStructure else not isinstance(value, np.ma.core.MaskedArray):
value['data'] = np.ma.masked_array(value['data'], mask=np.zeros(tuple(dimVal), dtype='bool')) if isStructure else np.ma.masked_array(value, mask=np.zeros(tuple(dimVal), dtype='bool'))
self.message(4, 'Casting variable to np.ma.MaskedArray') # depends on [control=['if'], data=[]] #Restructure dataset if structure
if isStructure:
dumvalue = value.pop('data')
if value.has_key('_attributes'):
for a in value['_attributes'].keys():
self.message(4, 'copying attribute %s' % a)
dumvalue.__setattr__(a, value['_attributes'][a]) # depends on [control=['for'], data=['a']] # depends on [control=['if'], data=[]]
value = copy.deepcopy(dumvalue) # depends on [control=['if'], data=[]]
(curDim, nself) = self.get_currentDim()
curInd = np.array(where_list(dimName, curDim[0]))
curDimVal = np.array(where_list(dimVal, curDim[1]))
existDims = curInd != -1
createDim = curInd == -1
createInd = np.where(createDim)[0]
appendDim = existDims & (curDimVal == -1)
appendInd = curInd[appendDim] # curInd = set(atools.where_list(dimVal,curDim[1])).intersection(set(atools.where_list(dimName,curDim[0])))
#Get dims to be created
#######################
#Choose case between all different solutions :
##############################################
# 1: create a new variable with at least 1 new dimension
# 2: extend -> create a new variable using existing dimensions
# 3: append exisiting variable with data
# 4: impossible case ?
#1) Create variable
if createDim.any() & toCreate: #Create Variable
self.message(4, 'Create variable %s ' + name) # self.__setattr__(name,value)
# cmd='self.'+name+'=value'
#Append variable infos to object
self.par_list = np.append(self.par_list, name)
dimlist_cp = self.dim_list.tolist()
dimlist_cp.append(dimName.tolist())
self.dim_list = np.array(dimlist_cp) #np.append(self.dim_list,dimName.tolist())
updateDim = False # depends on [control=['if'], data=[]] #2) Extend
elif (not createDim.any()) & toCreate: #extend variable
if extend:
dumvalue = np.ma.masked_array(np.append(np.zeros(curDim[1][curInd]), value.data), mask=np.append(np.ones(curDim[1][curInd], dtype='bool'), value.mask))
for a in set(value.__dict__.keys()).difference(dumvalue.__dict__.keys()):
dumvalue.__setattr__(a, value.__dict__[a] if hasattr(value, a) else self.__getattribute__(name).__getattribute__(a)) # depends on [control=['for'], data=['a']]
value = copy.deepcopy(dumvalue) # depends on [control=['if'], data=[]]
self.message(4, 'Extend variable ' + name) # self.__setattr__(name,value)
# cmd='self.'+name+'=value'
# self.message(4,'exec : '+cmd)
#Append variable infos to object
self.par_list = np.append(self.par_list, name)
dimlist_cp = self.dim_list.tolist()
dimlist_cp.append(dimName.tolist())
self.dim_list = np.array(dimlist_cp) # self.dim_list=np.append(self.dim_list,dimName)
updateDim = True # depends on [control=['if'], data=[]] #3) Append
elif (not createDim.any()) & (not toCreate): #append variable
self.message(4, 'Append data to variable ' + name)
dumvalue = np.ma.masked_array(np.append(self.__getattribute__(name).data, value.data), mask=np.append(self.__getattribute__(name).mask, value.mask)) #We gather a list of attributes :
# - already in data structure,
# - in current data file
# - and not in output structure
attributes = set(self.__getattribute__(name).__dict__.keys())
attributes = attributes.union(value.__dict__.keys()) # attributes=attributes.difference(self.__getattribute__(name).__dict__.keys())
attributes = attributes.difference(dumvalue.__dict__.keys()) #Then :
# - we add attributes of current file not in data structure
# - we keep attributes of current data structure if they exist
for a in attributes:
dumvalue.__setattr__(a, value.__dict__[a] if hasattr(value, a) else self.__getattribute__(name).__getattribute__(a)) # depends on [control=['for'], data=['a']]
value = copy.deepcopy(dumvalue)
updateDim = True # depends on [control=['if'], data=[]]
elif createDim.any() & (not toCreate): #Impossible case ?
self.Error('Impossible case : create dimensions and variable {0} already existing'.format(name)) # depends on [control=['if'], data=[]] #Append dimensions to variable
if not dimensions.has_key('_ndims'):
dumDim = dimStr(dimensions)
dimensions = dumDim.copy() # depends on [control=['if'], data=[]] #Update variable dimensions
if updateDim:
for k in dimensions.keys():
dimensions.update({k: self._dimensions[k]}) # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]]
value.__setattr__('_dimensions', dimensions)
try:
self.__setattr__(name, value) # depends on [control=['try'], data=[]]
except np.ma.core.MaskError:
raise 'mask error' # depends on [control=['except'], data=[]] #
# try : exec(cmd)
# except np.ma.core.MaskError :
# raise 'mask error'
# exec(cmd)
return updateDim |
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
::
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.IMPLICIT_WAIT, {
'ms': float(time_to_wait) * 1000}) | def function[implicitly_wait, parameter[self, time_to_wait]]:
constant[
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
::
driver.implicitly_wait(30)
]
if name[self].w3c begin[:]
call[name[self].execute, parameter[name[Command].SET_TIMEOUTS, dictionary[[<ast.Constant object at 0x7da20e9b0c40>], [<ast.Call object at 0x7da20e9b3ca0>]]]] | keyword[def] identifier[implicitly_wait] ( identifier[self] , identifier[time_to_wait] ):
literal[string]
keyword[if] identifier[self] . identifier[w3c] :
identifier[self] . identifier[execute] ( identifier[Command] . identifier[SET_TIMEOUTS] ,{
literal[string] : identifier[int] ( identifier[float] ( identifier[time_to_wait] )* literal[int] )})
keyword[else] :
identifier[self] . identifier[execute] ( identifier[Command] . identifier[IMPLICIT_WAIT] ,{
literal[string] : identifier[float] ( identifier[time_to_wait] )* literal[int] }) | def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
::
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {'implicit': int(float(time_to_wait) * 1000)}) # depends on [control=['if'], data=[]]
else:
self.execute(Command.IMPLICIT_WAIT, {'ms': float(time_to_wait) * 1000}) |
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr | def function[originalTextFor, parameter[expr, asString]]:
constant[Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
]
variable[locMarker] assign[=] call[call[name[Empty], parameter[]].setParseAction, parameter[<ast.Lambda object at 0x7da2054a4cd0>]]
variable[endlocMarker] assign[=] call[name[locMarker].copy, parameter[]]
name[endlocMarker].callPreparse assign[=] constant[False]
variable[matchExpr] assign[=] binary_operation[binary_operation[call[name[locMarker], parameter[constant[_original_start]]] + name[expr]] + call[name[endlocMarker], parameter[constant[_original_end]]]]
if name[asString] begin[:]
variable[extractText] assign[=] <ast.Lambda object at 0x7da2054a6cb0>
call[name[matchExpr].setParseAction, parameter[name[extractText]]]
name[matchExpr].ignoreExprs assign[=] name[expr].ignoreExprs
return[name[matchExpr]] | keyword[def] identifier[originalTextFor] ( identifier[expr] , identifier[asString] = keyword[True] ):
literal[string]
identifier[locMarker] = identifier[Empty] (). identifier[setParseAction] ( keyword[lambda] identifier[s] , identifier[loc] , identifier[t] : identifier[loc] )
identifier[endlocMarker] = identifier[locMarker] . identifier[copy] ()
identifier[endlocMarker] . identifier[callPreparse] = keyword[False]
identifier[matchExpr] = identifier[locMarker] ( literal[string] )+ identifier[expr] + identifier[endlocMarker] ( literal[string] )
keyword[if] identifier[asString] :
identifier[extractText] = keyword[lambda] identifier[s] , identifier[l] , identifier[t] : identifier[s] [ identifier[t] . identifier[_original_start] : identifier[t] . identifier[_original_end] ]
keyword[else] :
keyword[def] identifier[extractText] ( identifier[s] , identifier[l] , identifier[t] ):
identifier[t] [:]=[ identifier[s] [ identifier[t] . identifier[pop] ( literal[string] ): identifier[t] . identifier[pop] ( literal[string] )]]
identifier[matchExpr] . identifier[setParseAction] ( identifier[extractText] )
identifier[matchExpr] . identifier[ignoreExprs] = identifier[expr] . identifier[ignoreExprs]
keyword[return] identifier[matchExpr] | def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker('_original_start') + expr + endlocMarker('_original_end')
if asString:
extractText = lambda s, l, t: s[t._original_start:t._original_end] # depends on [control=['if'], data=[]]
else:
def extractText(s, l, t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr |
def is_admin(self):
"""Is the user a system administrator"""
return self.role == self.roles.administrator.value and self.state == State.approved | def function[is_admin, parameter[self]]:
constant[Is the user a system administrator]
return[<ast.BoolOp object at 0x7da204347850>] | keyword[def] identifier[is_admin] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[role] == identifier[self] . identifier[roles] . identifier[administrator] . identifier[value] keyword[and] identifier[self] . identifier[state] == identifier[State] . identifier[approved] | def is_admin(self):
"""Is the user a system administrator"""
return self.role == self.roles.administrator.value and self.state == State.approved |
def _Liquid(T, P=0.1):
"""Supplementary release on properties of liquid water at 0.1 MPa
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Although this relation is for P=0.1MPa, can be extrapoled at pressure
0.3 MPa
Returns
-------
prop : dict
Dict with calculated properties of water. The available properties are:
* h: Specific enthalpy, [kJ/kg]
* u: Specific internal energy, [kJ/kg]
* a: Specific Helmholtz energy, [kJ/kg]
* g: Specific Gibbs energy, [kJ/kg]
* s: Specific entropy, [kJ/kgK]
* cp: Specific isobaric heat capacity, [kJ/kgK]
* cv: Specific isochoric heat capacity, [kJ/kgK]
* w: Speed of sound, [m/s²]
* rho: Density, [kg/m³]
* v: Specific volume, [m³/kg]
* vt: [∂v/∂T]P, [m³/kgK]
* vtt: [∂²v/∂T²]P, [m³/kgK²]
* vp: [∂v/∂P]T, [m³/kg/MPa]
* vtp: [∂²v/∂T∂P], [m³/kg/MPa]
* alfav: Cubic expansion coefficient, [1/K]
* xkappa : Isothermal compressibility, [1/MPa]
* ks: Isentropic compressibility, [1/MPa]
* mu: Viscosity, [mPas]
* k: Thermal conductivity, [W/mK]
* epsilon: Dielectric constant, [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 253.15 ≤ T ≤ 383.15
* 0.1 ≤ P ≤ 0.3
Examples
--------
>>> st1 = _Liquid(260)
>>> st1["rho"], st1["h"], st1["s"]
997.0683602710492 -55.86223174460868 -0.20998554842619535
References
----------
IAPWS, Revised Supplementary Release on Properties of Liquid Water at 0.1
MPa, http://www.iapws.org/relguide/LiquidWater.html
"""
# Check input in range of validity
if T <= 253.15 or T >= 383.15 or P < 0.1 or P > 0.3:
raise NotImplementedError("Incoming out of bound")
elif P != 0.1:
# Raise a warning if the P value is extrapolated
warnings.warn("Using extrapolated values")
R = 0.46151805 # kJ/kgK
Po = 0.1
Tr = 10
tau = T/Tr
alfa = Tr/(593-T)
beta = Tr/(T-232)
a = [None, -1.661470539e5, 2.708781640e6, -1.557191544e8, None,
1.93763157e-2, 6.74458446e3, -2.22521604e5, 1.00231247e8,
-1.63552118e9, 8.32299658e9, -7.5245878e-6, -1.3767418e-2,
1.0627293e1, -2.0457795e2, 1.2037414e3]
b = [None, -8.237426256e-1, 1.908956353, -2.017597384, 8.546361348e-1,
5.78545292e-3, -1.53195665E-2, 3.11337859e-2, -4.23546241e-2,
3.38713507e-2, -1.19946761e-2, -3.1091470e-6, 2.8964919e-5,
-1.3112763e-4, 3.0410453e-4, -3.9034594e-4, 2.3403117e-4,
-4.8510101e-5]
c = [None, -2.452093414e2, 3.869269598e1, -8.983025854]
n = [None, 4, 5, 7, None, None, 4, 5, 7, 8, 9, 1, 3, 5, 6, 7]
m = [None, 2, 3, 4, 5, 1, 2, 3, 4, 5, 6, 1, 3, 4, 5, 6, 7, 9]
suma1 = sum([a[i]*alfa**n[i] for i in range(1, 4)])
suma2 = sum([b[i]*beta**m[i] for i in range(1, 5)])
go = R*Tr*(c[1]+c[2]*tau+c[3]*tau*log(tau)+suma1+suma2)
suma1 = sum([a[i]*alfa**n[i] for i in range(6, 11)])
suma2 = sum([b[i]*beta**m[i] for i in range(5, 11)])
vo = R*Tr/Po/1000*(a[5]+suma1+suma2)
suma1 = sum([a[i]*alfa**n[i] for i in range(11, 16)])
suma2 = sum([b[i]*beta**m[i] for i in range(11, 18)])
vpo = R*Tr/Po**2/1000*(suma1+suma2)
suma1 = sum([n[i]*a[i]*alfa**(n[i]+1) for i in range(1, 4)])
suma2 = sum([m[i]*b[i]*beta**(m[i]+1) for i in range(1, 5)])
so = -R*(c[2]+c[3]*(1+log(tau))+suma1-suma2)
suma1 = sum([n[i]*(n[i]+1)*a[i]*alfa**(n[i]+2) for i in range(1, 4)])
suma2 = sum([m[i]*(m[i]+1)*b[i]*beta**(m[i]+2) for i in range(1, 5)])
cpo = -R*(c[3]+tau*suma1+tau*suma2)
suma1 = sum([n[i]*a[i]*alfa**(n[i]+1) for i in range(6, 11)])
suma2 = sum([m[i]*b[i]*beta**(m[i]+1) for i in range(5, 11)])
vto = R/Po/1000*(suma1-suma2)
# This properties are only neccessary for computing thermodynamic
# properties at pressures different from 0.1 MPa
suma1 = sum([n[i]*(n[i]+1)*a[i]*alfa**(n[i]+2) for i in range(6, 11)])
suma2 = sum([m[i]*(m[i]+1)*b[i]*beta**(m[i]+2) for i in range(5, 11)])
vtto = R/Tr/Po/1000*(suma1+suma2)
suma1 = sum([n[i]*a[i]*alfa**(n[i]+1) for i in range(11, 16)])
suma2 = sum([m[i]*b[i]*beta**(m[i]+1) for i in range(11, 18)])
vpto = R/Po**2/1000*(suma1-suma2)
if P != 0.1:
go += vo*(P-0.1)
so -= vto*(P-0.1)
cpo -= T*vtto*(P-0.1)
vo -= vpo*(P-0.1)
vto += vpto*(P-0.1)
vppo = 3.24e-10*R*Tr/0.1**3
vpo += vppo*(P-0.1)
h = go+T*so
u = h-P*vo
a = go-P*vo
cv = cpo+T*vto**2/vpo
xkappa = -vpo/vo
alfa = vto/vo
ks = -(T*vto**2/cpo+vpo)/vo
w = (-vo**2*1e9/(vpo*1e3+T*vto**2*1e6/cpo))**0.5
propiedades = {}
propiedades["g"] = go
propiedades["T"] = T
propiedades["P"] = P
propiedades["v"] = vo
propiedades["vt"] = vto
propiedades["vp"] = vpo
propiedades["vpt"] = vpto
propiedades["vtt"] = vtto
propiedades["rho"] = 1/vo
propiedades["h"] = h
propiedades["s"] = so
propiedades["cp"] = cpo
propiedades["cv"] = cv
propiedades["u"] = u
propiedades["a"] = a
propiedades["xkappa"] = xkappa
propiedades["alfav"] = vto/vo
propiedades["ks"] = ks
propiedades["w"] = w
# Viscosity correlation, Eq 7
a = [None, 280.68, 511.45, 61.131, 0.45903]
b = [None, -1.9, -7.7, -19.6, -40]
T_ = T/300
mu = sum([a[i]*T_**b[i] for i in range(1, 5)])/1e6
propiedades["mu"] = mu
# Thermal conductivity correlation, Eq 8
c = [None, 1.6630, -1.7781, 1.1567, -0.432115]
d = [None, -1.15, -3.4, -6.0, -7.6]
k = sum([c[i]*T_**d[i] for i in range(1, 5)])
propiedades["k"] = k
# Dielectric constant correlation, Eq 9
e = [None, -43.7527, 299.504, -399.364, 221.327]
f = [None, -0.05, -1.47, -2.11, -2.31]
epsilon = sum([e[i]*T_**f[i] for i in range(1, 5)])
propiedades["epsilon"] = epsilon
return propiedades | def function[_Liquid, parameter[T, P]]:
constant[Supplementary release on properties of liquid water at 0.1 MPa
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Although this relation is for P=0.1MPa, can be extrapoled at pressure
0.3 MPa
Returns
-------
prop : dict
Dict with calculated properties of water. The available properties are:
* h: Specific enthalpy, [kJ/kg]
* u: Specific internal energy, [kJ/kg]
* a: Specific Helmholtz energy, [kJ/kg]
* g: Specific Gibbs energy, [kJ/kg]
* s: Specific entropy, [kJ/kgK]
* cp: Specific isobaric heat capacity, [kJ/kgK]
* cv: Specific isochoric heat capacity, [kJ/kgK]
* w: Speed of sound, [m/s²]
* rho: Density, [kg/m³]
* v: Specific volume, [m³/kg]
* vt: [∂v/∂T]P, [m³/kgK]
* vtt: [∂²v/∂T²]P, [m³/kgK²]
* vp: [∂v/∂P]T, [m³/kg/MPa]
* vtp: [∂²v/∂T∂P], [m³/kg/MPa]
* alfav: Cubic expansion coefficient, [1/K]
* xkappa : Isothermal compressibility, [1/MPa]
* ks: Isentropic compressibility, [1/MPa]
* mu: Viscosity, [mPas]
* k: Thermal conductivity, [W/mK]
* epsilon: Dielectric constant, [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 253.15 ≤ T ≤ 383.15
* 0.1 ≤ P ≤ 0.3
Examples
--------
>>> st1 = _Liquid(260)
>>> st1["rho"], st1["h"], st1["s"]
997.0683602710492 -55.86223174460868 -0.20998554842619535
References
----------
IAPWS, Revised Supplementary Release on Properties of Liquid Water at 0.1
MPa, http://www.iapws.org/relguide/LiquidWater.html
]
if <ast.BoolOp object at 0x7da1b06c9c30> begin[:]
<ast.Raise object at 0x7da1b06c9ea0>
variable[R] assign[=] constant[0.46151805]
variable[Po] assign[=] constant[0.1]
variable[Tr] assign[=] constant[10]
variable[tau] assign[=] binary_operation[name[T] / name[Tr]]
variable[alfa] assign[=] binary_operation[name[Tr] / binary_operation[constant[593] - name[T]]]
variable[beta] assign[=] binary_operation[name[Tr] / binary_operation[name[T] - constant[232]]]
variable[a] assign[=] list[[<ast.Constant object at 0x7da1b06ca710>, <ast.UnaryOp object at 0x7da1b06ca740>, <ast.Constant object at 0x7da1b06ca7a0>, <ast.UnaryOp object at 0x7da1b06ca7d0>, <ast.Constant object at 0x7da1b06ca830>, <ast.Constant object at 0x7da1b06ca860>, <ast.Constant object at 0x7da1b06ca890>, <ast.UnaryOp object at 0x7da1b06ca8c0>, <ast.Constant object at 0x7da1b06ca920>, <ast.UnaryOp object at 0x7da1b06ca950>, <ast.Constant object at 0x7da1b06ca9b0>, <ast.UnaryOp object at 0x7da1b06ca9e0>, <ast.UnaryOp object at 0x7da1b06caa40>, <ast.Constant object at 0x7da1b06caaa0>, <ast.UnaryOp object at 0x7da1b06caad0>, <ast.Constant object at 0x7da1b06cab30>]]
variable[b] assign[=] list[[<ast.Constant object at 0x7da1b06cabf0>, <ast.UnaryOp object at 0x7da1b06cac20>, <ast.Constant object at 0x7da1b06cac80>, <ast.UnaryOp object at 0x7da1b06cacb0>, <ast.Constant object at 0x7da1b06cad10>, <ast.Constant object at 0x7da1b06cad40>, <ast.UnaryOp object at 0x7da1b06cad70>, <ast.Constant object at 0x7da1b06cadd0>, <ast.UnaryOp object at 0x7da1b06cae00>, <ast.Constant object at 0x7da1b06cae60>, <ast.UnaryOp object at 0x7da1b06cae90>, <ast.UnaryOp object at 0x7da1b06caef0>, <ast.Constant object at 0x7da1b06caf50>, <ast.UnaryOp object at 0x7da1b06caf80>, <ast.Constant object at 0x7da1b06cafe0>, <ast.UnaryOp object at 0x7da1b06cb010>, <ast.Constant object at 0x7da1b06cb070>, <ast.UnaryOp object at 0x7da1b06cb0a0>]]
variable[c] assign[=] list[[<ast.Constant object at 0x7da1b06cb190>, <ast.UnaryOp object at 0x7da1b06cb1c0>, <ast.Constant object at 0x7da1b06cb220>, <ast.UnaryOp object at 0x7da1b06cb250>]]
variable[n] assign[=] list[[<ast.Constant object at 0x7da1b06cb340>, <ast.Constant object at 0x7da1b06cb370>, <ast.Constant object at 0x7da1b06cb3a0>, <ast.Constant object at 0x7da1b06cb3d0>, <ast.Constant object at 0x7da1b06cb400>, <ast.Constant object at 0x7da1b06cb430>, <ast.Constant object at 0x7da1b06cb460>, <ast.Constant object at 0x7da1b06cb490>, <ast.Constant object at 0x7da1b06cb4c0>, <ast.Constant object at 0x7da1b06cb4f0>, <ast.Constant object at 0x7da1b06cb520>, <ast.Constant object at 0x7da1b06cb550>, <ast.Constant object at 0x7da1b06cb580>, <ast.Constant object at 0x7da1b06cb5b0>, <ast.Constant object at 0x7da1b06cb5e0>, <ast.Constant object at 0x7da1b06cb610>]]
variable[m] assign[=] list[[<ast.Constant object at 0x7da1b06cb6d0>, <ast.Constant object at 0x7da1b06cb700>, <ast.Constant object at 0x7da1b06cb730>, <ast.Constant object at 0x7da1b06cb760>, <ast.Constant object at 0x7da1b06cb790>, <ast.Constant object at 0x7da1b06cb7c0>, <ast.Constant object at 0x7da1b06cb7f0>, <ast.Constant object at 0x7da1b06cb820>, <ast.Constant object at 0x7da1b06cb850>, <ast.Constant object at 0x7da1b06cb880>, <ast.Constant object at 0x7da1b06cb8b0>, <ast.Constant object at 0x7da1b06cb8e0>, <ast.Constant object at 0x7da1b06cb910>, <ast.Constant object at 0x7da1b06cb940>, <ast.Constant object at 0x7da1b06cb970>, <ast.Constant object at 0x7da1b06cb9a0>, <ast.Constant object at 0x7da1b06cb9d0>, <ast.Constant object at 0x7da1b06cba00>]]
variable[suma1] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b06cbaf0>]]
variable[suma2] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b06cbee0>]]
variable[go] assign[=] binary_operation[binary_operation[name[R] * name[Tr]] * binary_operation[binary_operation[binary_operation[binary_operation[call[name[c]][constant[1]] + binary_operation[call[name[c]][constant[2]] * name[tau]]] + binary_operation[binary_operation[call[name[c]][constant[3]] * name[tau]] * call[name[log], parameter[name[tau]]]]] + name[suma1]] + name[suma2]]]
variable[suma1] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b0687790>]]
variable[suma2] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b06873a0>]]
variable[vo] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[R] * name[Tr]] / name[Po]] / constant[1000]] * binary_operation[binary_operation[call[name[a]][constant[5]] + name[suma1]] + name[suma2]]]
variable[suma1] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b0686c80>]]
variable[suma2] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b0686890>]]
variable[vpo] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[R] * name[Tr]] / binary_operation[name[Po] ** constant[2]]] / constant[1000]] * binary_operation[name[suma1] + name[suma2]]]
variable[suma1] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b0684d30>]]
variable[suma2] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b0684820>]]
variable[so] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b0684340> * binary_operation[binary_operation[binary_operation[call[name[c]][constant[2]] + binary_operation[call[name[c]][constant[3]] * binary_operation[constant[1] + call[name[log], parameter[name[tau]]]]]] + name[suma1]] - name[suma2]]]
variable[suma1] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2044c3a30>]]
variable[suma2] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2044c07c0>]]
variable[cpo] assign[=] binary_operation[<ast.UnaryOp object at 0x7da2044c21a0> * binary_operation[binary_operation[call[name[c]][constant[3]] + binary_operation[name[tau] * name[suma1]]] + binary_operation[name[tau] * name[suma2]]]]
variable[suma1] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2044c38e0>]]
variable[suma2] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2044c0e80>]]
variable[vto] assign[=] binary_operation[binary_operation[binary_operation[name[R] / name[Po]] / constant[1000]] * binary_operation[name[suma1] - name[suma2]]]
variable[suma1] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2044c18a0>]]
variable[suma2] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2044c37c0>]]
variable[vtto] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[R] / name[Tr]] / name[Po]] / constant[1000]] * binary_operation[name[suma1] + name[suma2]]]
variable[suma1] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2044c3460>]]
variable[suma2] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da2044c2650>]]
variable[vpto] assign[=] binary_operation[binary_operation[binary_operation[name[R] / binary_operation[name[Po] ** constant[2]]] / constant[1000]] * binary_operation[name[suma1] - name[suma2]]]
if compare[name[P] not_equal[!=] constant[0.1]] begin[:]
<ast.AugAssign object at 0x7da2044c2b60>
<ast.AugAssign object at 0x7da2044c2ce0>
<ast.AugAssign object at 0x7da2044c2ef0>
<ast.AugAssign object at 0x7da2044c09a0>
<ast.AugAssign object at 0x7da2044c2110>
variable[vppo] assign[=] binary_operation[binary_operation[binary_operation[constant[3.24e-10] * name[R]] * name[Tr]] / binary_operation[constant[0.1] ** constant[3]]]
<ast.AugAssign object at 0x7da20c6c7460>
variable[h] assign[=] binary_operation[name[go] + binary_operation[name[T] * name[so]]]
variable[u] assign[=] binary_operation[name[h] - binary_operation[name[P] * name[vo]]]
variable[a] assign[=] binary_operation[name[go] - binary_operation[name[P] * name[vo]]]
variable[cv] assign[=] binary_operation[name[cpo] + binary_operation[binary_operation[name[T] * binary_operation[name[vto] ** constant[2]]] / name[vpo]]]
variable[xkappa] assign[=] binary_operation[<ast.UnaryOp object at 0x7da20c6c4190> / name[vo]]
variable[alfa] assign[=] binary_operation[name[vto] / name[vo]]
variable[ks] assign[=] binary_operation[<ast.UnaryOp object at 0x7da20c6c7ee0> / name[vo]]
variable[w] assign[=] binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da20c6c76a0> * constant[1000000000.0]] / binary_operation[binary_operation[name[vpo] * constant[1000.0]] + binary_operation[binary_operation[binary_operation[name[T] * binary_operation[name[vto] ** constant[2]]] * constant[1000000.0]] / name[cpo]]]] ** constant[0.5]]
variable[propiedades] assign[=] dictionary[[], []]
call[name[propiedades]][constant[g]] assign[=] name[go]
call[name[propiedades]][constant[T]] assign[=] name[T]
call[name[propiedades]][constant[P]] assign[=] name[P]
call[name[propiedades]][constant[v]] assign[=] name[vo]
call[name[propiedades]][constant[vt]] assign[=] name[vto]
call[name[propiedades]][constant[vp]] assign[=] name[vpo]
call[name[propiedades]][constant[vpt]] assign[=] name[vpto]
call[name[propiedades]][constant[vtt]] assign[=] name[vtto]
call[name[propiedades]][constant[rho]] assign[=] binary_operation[constant[1] / name[vo]]
call[name[propiedades]][constant[h]] assign[=] name[h]
call[name[propiedades]][constant[s]] assign[=] name[so]
call[name[propiedades]][constant[cp]] assign[=] name[cpo]
call[name[propiedades]][constant[cv]] assign[=] name[cv]
call[name[propiedades]][constant[u]] assign[=] name[u]
call[name[propiedades]][constant[a]] assign[=] name[a]
call[name[propiedades]][constant[xkappa]] assign[=] name[xkappa]
call[name[propiedades]][constant[alfav]] assign[=] binary_operation[name[vto] / name[vo]]
call[name[propiedades]][constant[ks]] assign[=] name[ks]
call[name[propiedades]][constant[w]] assign[=] name[w]
variable[a] assign[=] list[[<ast.Constant object at 0x7da20c6c61d0>, <ast.Constant object at 0x7da20c6c53f0>, <ast.Constant object at 0x7da20c6c66e0>, <ast.Constant object at 0x7da20c6c6a70>, <ast.Constant object at 0x7da20c6c73d0>]]
variable[b] assign[=] list[[<ast.Constant object at 0x7da20c6c57b0>, <ast.UnaryOp object at 0x7da20c6c7970>, <ast.UnaryOp object at 0x7da204564550>, <ast.UnaryOp object at 0x7da204567250>, <ast.UnaryOp object at 0x7da204565780>]]
variable[T_] assign[=] binary_operation[name[T] / constant[300]]
variable[mu] assign[=] binary_operation[call[name[sum], parameter[<ast.ListComp object at 0x7da204564e80>]] / constant[1000000.0]]
call[name[propiedades]][constant[mu]] assign[=] name[mu]
variable[c] assign[=] list[[<ast.Constant object at 0x7da204566620>, <ast.Constant object at 0x7da204564910>, <ast.UnaryOp object at 0x7da20e9548b0>, <ast.Constant object at 0x7da20e957fd0>, <ast.UnaryOp object at 0x7da20e956560>]]
variable[d] assign[=] list[[<ast.Constant object at 0x7da20e957e50>, <ast.UnaryOp object at 0x7da20e957040>, <ast.UnaryOp object at 0x7da20e957df0>, <ast.UnaryOp object at 0x7da20e9563e0>, <ast.UnaryOp object at 0x7da20e956c80>]]
variable[k] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da20e954c70>]]
call[name[propiedades]][constant[k]] assign[=] name[k]
variable[e] assign[=] list[[<ast.Constant object at 0x7da20c76d870>, <ast.UnaryOp object at 0x7da20c76da50>, <ast.Constant object at 0x7da20c76e7a0>, <ast.UnaryOp object at 0x7da20c76ed70>, <ast.Constant object at 0x7da20c76d060>]]
variable[f] assign[=] list[[<ast.Constant object at 0x7da20c76d300>, <ast.UnaryOp object at 0x7da20c76f400>, <ast.UnaryOp object at 0x7da20c76e650>, <ast.UnaryOp object at 0x7da20c76dd50>, <ast.UnaryOp object at 0x7da20c76eaa0>]]
variable[epsilon] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da20c76de10>]]
call[name[propiedades]][constant[epsilon]] assign[=] name[epsilon]
return[name[propiedades]] | keyword[def] identifier[_Liquid] ( identifier[T] , identifier[P] = literal[int] ):
literal[string]
keyword[if] identifier[T] <= literal[int] keyword[or] identifier[T] >= literal[int] keyword[or] identifier[P] < literal[int] keyword[or] identifier[P] > literal[int] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[elif] identifier[P] != literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[R] = literal[int]
identifier[Po] = literal[int]
identifier[Tr] = literal[int]
identifier[tau] = identifier[T] / identifier[Tr]
identifier[alfa] = identifier[Tr] /( literal[int] - identifier[T] )
identifier[beta] = identifier[Tr] /( identifier[T] - literal[int] )
identifier[a] =[ keyword[None] ,- literal[int] , literal[int] ,- literal[int] , keyword[None] ,
literal[int] , literal[int] ,- literal[int] , literal[int] ,
- literal[int] , literal[int] ,- literal[int] ,- literal[int] ,
literal[int] ,- literal[int] , literal[int] ]
identifier[b] =[ keyword[None] ,- literal[int] , literal[int] ,- literal[int] , literal[int] ,
literal[int] ,- literal[int] , literal[int] ,- literal[int] ,
literal[int] ,- literal[int] ,- literal[int] , literal[int] ,
- literal[int] , literal[int] ,- literal[int] , literal[int] ,
- literal[int] ]
identifier[c] =[ keyword[None] ,- literal[int] , literal[int] ,- literal[int] ]
identifier[n] =[ keyword[None] , literal[int] , literal[int] , literal[int] , keyword[None] , keyword[None] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[m] =[ keyword[None] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[suma1] = identifier[sum] ([ identifier[a] [ identifier[i] ]* identifier[alfa] ** identifier[n] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[suma2] = identifier[sum] ([ identifier[b] [ identifier[i] ]* identifier[beta] ** identifier[m] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[go] = identifier[R] * identifier[Tr] *( identifier[c] [ literal[int] ]+ identifier[c] [ literal[int] ]* identifier[tau] + identifier[c] [ literal[int] ]* identifier[tau] * identifier[log] ( identifier[tau] )+ identifier[suma1] + identifier[suma2] )
identifier[suma1] = identifier[sum] ([ identifier[a] [ identifier[i] ]* identifier[alfa] ** identifier[n] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[suma2] = identifier[sum] ([ identifier[b] [ identifier[i] ]* identifier[beta] ** identifier[m] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[vo] = identifier[R] * identifier[Tr] / identifier[Po] / literal[int] *( identifier[a] [ literal[int] ]+ identifier[suma1] + identifier[suma2] )
identifier[suma1] = identifier[sum] ([ identifier[a] [ identifier[i] ]* identifier[alfa] ** identifier[n] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[suma2] = identifier[sum] ([ identifier[b] [ identifier[i] ]* identifier[beta] ** identifier[m] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[vpo] = identifier[R] * identifier[Tr] / identifier[Po] ** literal[int] / literal[int] *( identifier[suma1] + identifier[suma2] )
identifier[suma1] = identifier[sum] ([ identifier[n] [ identifier[i] ]* identifier[a] [ identifier[i] ]* identifier[alfa] **( identifier[n] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[suma2] = identifier[sum] ([ identifier[m] [ identifier[i] ]* identifier[b] [ identifier[i] ]* identifier[beta] **( identifier[m] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[so] =- identifier[R] *( identifier[c] [ literal[int] ]+ identifier[c] [ literal[int] ]*( literal[int] + identifier[log] ( identifier[tau] ))+ identifier[suma1] - identifier[suma2] )
identifier[suma1] = identifier[sum] ([ identifier[n] [ identifier[i] ]*( identifier[n] [ identifier[i] ]+ literal[int] )* identifier[a] [ identifier[i] ]* identifier[alfa] **( identifier[n] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[suma2] = identifier[sum] ([ identifier[m] [ identifier[i] ]*( identifier[m] [ identifier[i] ]+ literal[int] )* identifier[b] [ identifier[i] ]* identifier[beta] **( identifier[m] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[cpo] =- identifier[R] *( identifier[c] [ literal[int] ]+ identifier[tau] * identifier[suma1] + identifier[tau] * identifier[suma2] )
identifier[suma1] = identifier[sum] ([ identifier[n] [ identifier[i] ]* identifier[a] [ identifier[i] ]* identifier[alfa] **( identifier[n] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[suma2] = identifier[sum] ([ identifier[m] [ identifier[i] ]* identifier[b] [ identifier[i] ]* identifier[beta] **( identifier[m] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[vto] = identifier[R] / identifier[Po] / literal[int] *( identifier[suma1] - identifier[suma2] )
identifier[suma1] = identifier[sum] ([ identifier[n] [ identifier[i] ]*( identifier[n] [ identifier[i] ]+ literal[int] )* identifier[a] [ identifier[i] ]* identifier[alfa] **( identifier[n] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[suma2] = identifier[sum] ([ identifier[m] [ identifier[i] ]*( identifier[m] [ identifier[i] ]+ literal[int] )* identifier[b] [ identifier[i] ]* identifier[beta] **( identifier[m] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[vtto] = identifier[R] / identifier[Tr] / identifier[Po] / literal[int] *( identifier[suma1] + identifier[suma2] )
identifier[suma1] = identifier[sum] ([ identifier[n] [ identifier[i] ]* identifier[a] [ identifier[i] ]* identifier[alfa] **( identifier[n] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[suma2] = identifier[sum] ([ identifier[m] [ identifier[i] ]* identifier[b] [ identifier[i] ]* identifier[beta] **( identifier[m] [ identifier[i] ]+ literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[vpto] = identifier[R] / identifier[Po] ** literal[int] / literal[int] *( identifier[suma1] - identifier[suma2] )
keyword[if] identifier[P] != literal[int] :
identifier[go] += identifier[vo] *( identifier[P] - literal[int] )
identifier[so] -= identifier[vto] *( identifier[P] - literal[int] )
identifier[cpo] -= identifier[T] * identifier[vtto] *( identifier[P] - literal[int] )
identifier[vo] -= identifier[vpo] *( identifier[P] - literal[int] )
identifier[vto] += identifier[vpto] *( identifier[P] - literal[int] )
identifier[vppo] = literal[int] * identifier[R] * identifier[Tr] / literal[int] ** literal[int]
identifier[vpo] += identifier[vppo] *( identifier[P] - literal[int] )
identifier[h] = identifier[go] + identifier[T] * identifier[so]
identifier[u] = identifier[h] - identifier[P] * identifier[vo]
identifier[a] = identifier[go] - identifier[P] * identifier[vo]
identifier[cv] = identifier[cpo] + identifier[T] * identifier[vto] ** literal[int] / identifier[vpo]
identifier[xkappa] =- identifier[vpo] / identifier[vo]
identifier[alfa] = identifier[vto] / identifier[vo]
identifier[ks] =-( identifier[T] * identifier[vto] ** literal[int] / identifier[cpo] + identifier[vpo] )/ identifier[vo]
identifier[w] =(- identifier[vo] ** literal[int] * literal[int] /( identifier[vpo] * literal[int] + identifier[T] * identifier[vto] ** literal[int] * literal[int] / identifier[cpo] ))** literal[int]
identifier[propiedades] ={}
identifier[propiedades] [ literal[string] ]= identifier[go]
identifier[propiedades] [ literal[string] ]= identifier[T]
identifier[propiedades] [ literal[string] ]= identifier[P]
identifier[propiedades] [ literal[string] ]= identifier[vo]
identifier[propiedades] [ literal[string] ]= identifier[vto]
identifier[propiedades] [ literal[string] ]= identifier[vpo]
identifier[propiedades] [ literal[string] ]= identifier[vpto]
identifier[propiedades] [ literal[string] ]= identifier[vtto]
identifier[propiedades] [ literal[string] ]= literal[int] / identifier[vo]
identifier[propiedades] [ literal[string] ]= identifier[h]
identifier[propiedades] [ literal[string] ]= identifier[so]
identifier[propiedades] [ literal[string] ]= identifier[cpo]
identifier[propiedades] [ literal[string] ]= identifier[cv]
identifier[propiedades] [ literal[string] ]= identifier[u]
identifier[propiedades] [ literal[string] ]= identifier[a]
identifier[propiedades] [ literal[string] ]= identifier[xkappa]
identifier[propiedades] [ literal[string] ]= identifier[vto] / identifier[vo]
identifier[propiedades] [ literal[string] ]= identifier[ks]
identifier[propiedades] [ literal[string] ]= identifier[w]
identifier[a] =[ keyword[None] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[b] =[ keyword[None] ,- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ]
identifier[T_] = identifier[T] / literal[int]
identifier[mu] = identifier[sum] ([ identifier[a] [ identifier[i] ]* identifier[T_] ** identifier[b] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])/ literal[int]
identifier[propiedades] [ literal[string] ]= identifier[mu]
identifier[c] =[ keyword[None] , literal[int] ,- literal[int] , literal[int] ,- literal[int] ]
identifier[d] =[ keyword[None] ,- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ]
identifier[k] = identifier[sum] ([ identifier[c] [ identifier[i] ]* identifier[T_] ** identifier[d] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[propiedades] [ literal[string] ]= identifier[k]
identifier[e] =[ keyword[None] ,- literal[int] , literal[int] ,- literal[int] , literal[int] ]
identifier[f] =[ keyword[None] ,- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ]
identifier[epsilon] = identifier[sum] ([ identifier[e] [ identifier[i] ]* identifier[T_] ** identifier[f] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[propiedades] [ literal[string] ]= identifier[epsilon]
keyword[return] identifier[propiedades] | def _Liquid(T, P=0.1):
"""Supplementary release on properties of liquid water at 0.1 MPa
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Although this relation is for P=0.1MPa, can be extrapoled at pressure
0.3 MPa
Returns
-------
prop : dict
Dict with calculated properties of water. The available properties are:
* h: Specific enthalpy, [kJ/kg]
* u: Specific internal energy, [kJ/kg]
* a: Specific Helmholtz energy, [kJ/kg]
* g: Specific Gibbs energy, [kJ/kg]
* s: Specific entropy, [kJ/kgK]
* cp: Specific isobaric heat capacity, [kJ/kgK]
* cv: Specific isochoric heat capacity, [kJ/kgK]
* w: Speed of sound, [m/s²]
* rho: Density, [kg/m³]
* v: Specific volume, [m³/kg]
* vt: [∂v/∂T]P, [m³/kgK]
* vtt: [∂²v/∂T²]P, [m³/kgK²]
* vp: [∂v/∂P]T, [m³/kg/MPa]
* vtp: [∂²v/∂T∂P], [m³/kg/MPa]
* alfav: Cubic expansion coefficient, [1/K]
* xkappa : Isothermal compressibility, [1/MPa]
* ks: Isentropic compressibility, [1/MPa]
* mu: Viscosity, [mPas]
* k: Thermal conductivity, [W/mK]
* epsilon: Dielectric constant, [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 253.15 ≤ T ≤ 383.15
* 0.1 ≤ P ≤ 0.3
Examples
--------
>>> st1 = _Liquid(260)
>>> st1["rho"], st1["h"], st1["s"]
997.0683602710492 -55.86223174460868 -0.20998554842619535
References
----------
IAPWS, Revised Supplementary Release on Properties of Liquid Water at 0.1
MPa, http://www.iapws.org/relguide/LiquidWater.html
"""
# Check input in range of validity
if T <= 253.15 or T >= 383.15 or P < 0.1 or (P > 0.3):
raise NotImplementedError('Incoming out of bound') # depends on [control=['if'], data=[]]
elif P != 0.1:
# Raise a warning if the P value is extrapolated
warnings.warn('Using extrapolated values') # depends on [control=['if'], data=[]]
R = 0.46151805 # kJ/kgK
Po = 0.1
Tr = 10
tau = T / Tr
alfa = Tr / (593 - T)
beta = Tr / (T - 232)
a = [None, -166147.0539, 2708781.64, -155719154.4, None, 0.0193763157, 6744.58446, -222521.604, 100231247.0, -1635521180.0, 8322996580.0, -7.5245878e-06, -0.013767418, 10.627293, -204.57795, 1203.7414]
b = [None, -0.8237426256, 1.908956353, -2.017597384, 0.8546361348, 0.00578545292, -0.0153195665, 0.0311337859, -0.0423546241, 0.0338713507, -0.0119946761, -3.109147e-06, 2.8964919e-05, -0.00013112763, 0.00030410453, -0.00039034594, 0.00023403117, -4.8510101e-05]
c = [None, -245.2093414, 38.69269598, -8.983025854]
n = [None, 4, 5, 7, None, None, 4, 5, 7, 8, 9, 1, 3, 5, 6, 7]
m = [None, 2, 3, 4, 5, 1, 2, 3, 4, 5, 6, 1, 3, 4, 5, 6, 7, 9]
suma1 = sum([a[i] * alfa ** n[i] for i in range(1, 4)])
suma2 = sum([b[i] * beta ** m[i] for i in range(1, 5)])
go = R * Tr * (c[1] + c[2] * tau + c[3] * tau * log(tau) + suma1 + suma2)
suma1 = sum([a[i] * alfa ** n[i] for i in range(6, 11)])
suma2 = sum([b[i] * beta ** m[i] for i in range(5, 11)])
vo = R * Tr / Po / 1000 * (a[5] + suma1 + suma2)
suma1 = sum([a[i] * alfa ** n[i] for i in range(11, 16)])
suma2 = sum([b[i] * beta ** m[i] for i in range(11, 18)])
vpo = R * Tr / Po ** 2 / 1000 * (suma1 + suma2)
suma1 = sum([n[i] * a[i] * alfa ** (n[i] + 1) for i in range(1, 4)])
suma2 = sum([m[i] * b[i] * beta ** (m[i] + 1) for i in range(1, 5)])
so = -R * (c[2] + c[3] * (1 + log(tau)) + suma1 - suma2)
suma1 = sum([n[i] * (n[i] + 1) * a[i] * alfa ** (n[i] + 2) for i in range(1, 4)])
suma2 = sum([m[i] * (m[i] + 1) * b[i] * beta ** (m[i] + 2) for i in range(1, 5)])
cpo = -R * (c[3] + tau * suma1 + tau * suma2)
suma1 = sum([n[i] * a[i] * alfa ** (n[i] + 1) for i in range(6, 11)])
suma2 = sum([m[i] * b[i] * beta ** (m[i] + 1) for i in range(5, 11)])
vto = R / Po / 1000 * (suma1 - suma2)
# This properties are only neccessary for computing thermodynamic
# properties at pressures different from 0.1 MPa
suma1 = sum([n[i] * (n[i] + 1) * a[i] * alfa ** (n[i] + 2) for i in range(6, 11)])
suma2 = sum([m[i] * (m[i] + 1) * b[i] * beta ** (m[i] + 2) for i in range(5, 11)])
vtto = R / Tr / Po / 1000 * (suma1 + suma2)
suma1 = sum([n[i] * a[i] * alfa ** (n[i] + 1) for i in range(11, 16)])
suma2 = sum([m[i] * b[i] * beta ** (m[i] + 1) for i in range(11, 18)])
vpto = R / Po ** 2 / 1000 * (suma1 - suma2)
if P != 0.1:
go += vo * (P - 0.1)
so -= vto * (P - 0.1)
cpo -= T * vtto * (P - 0.1)
vo -= vpo * (P - 0.1)
vto += vpto * (P - 0.1)
vppo = 3.24e-10 * R * Tr / 0.1 ** 3
vpo += vppo * (P - 0.1) # depends on [control=['if'], data=['P']]
h = go + T * so
u = h - P * vo
a = go - P * vo
cv = cpo + T * vto ** 2 / vpo
xkappa = -vpo / vo
alfa = vto / vo
ks = -(T * vto ** 2 / cpo + vpo) / vo
w = (-vo ** 2 * 1000000000.0 / (vpo * 1000.0 + T * vto ** 2 * 1000000.0 / cpo)) ** 0.5
propiedades = {}
propiedades['g'] = go
propiedades['T'] = T
propiedades['P'] = P
propiedades['v'] = vo
propiedades['vt'] = vto
propiedades['vp'] = vpo
propiedades['vpt'] = vpto
propiedades['vtt'] = vtto
propiedades['rho'] = 1 / vo
propiedades['h'] = h
propiedades['s'] = so
propiedades['cp'] = cpo
propiedades['cv'] = cv
propiedades['u'] = u
propiedades['a'] = a
propiedades['xkappa'] = xkappa
propiedades['alfav'] = vto / vo
propiedades['ks'] = ks
propiedades['w'] = w
# Viscosity correlation, Eq 7
a = [None, 280.68, 511.45, 61.131, 0.45903]
b = [None, -1.9, -7.7, -19.6, -40]
T_ = T / 300
mu = sum([a[i] * T_ ** b[i] for i in range(1, 5)]) / 1000000.0
propiedades['mu'] = mu
# Thermal conductivity correlation, Eq 8
c = [None, 1.663, -1.7781, 1.1567, -0.432115]
d = [None, -1.15, -3.4, -6.0, -7.6]
k = sum([c[i] * T_ ** d[i] for i in range(1, 5)])
propiedades['k'] = k
# Dielectric constant correlation, Eq 9
e = [None, -43.7527, 299.504, -399.364, 221.327]
f = [None, -0.05, -1.47, -2.11, -2.31]
epsilon = sum([e[i] * T_ ** f[i] for i in range(1, 5)])
propiedades['epsilon'] = epsilon
return propiedades |
def get_alias(self,
alias=None,
manifest=None,
verify=True,
sizes=False,
dcd=None):
# pylint: disable=too-many-arguments
"""
Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:type manifest: str
:param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).
:type verify: bool
:param sizes: Whether to return sizes of the blobs along with their hashes
:type sizes: bool
:param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.
:type dcd: str
:rtype: list
:returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.
"""
return self._get_alias(alias, manifest, verify, sizes, dcd, False) | def function[get_alias, parameter[self, alias, manifest, verify, sizes, dcd]]:
constant[
Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:type manifest: str
:param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).
:type verify: bool
:param sizes: Whether to return sizes of the blobs along with their hashes
:type sizes: bool
:param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.
:type dcd: str
:rtype: list
:returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.
]
return[call[name[self]._get_alias, parameter[name[alias], name[manifest], name[verify], name[sizes], name[dcd], constant[False]]]] | keyword[def] identifier[get_alias] ( identifier[self] ,
identifier[alias] = keyword[None] ,
identifier[manifest] = keyword[None] ,
identifier[verify] = keyword[True] ,
identifier[sizes] = keyword[False] ,
identifier[dcd] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_get_alias] ( identifier[alias] , identifier[manifest] , identifier[verify] , identifier[sizes] , identifier[dcd] , keyword[False] ) | def get_alias(self, alias=None, manifest=None, verify=True, sizes=False, dcd=None):
# pylint: disable=too-many-arguments
"\n Get the blob hashes assigned to an alias.\n\n :param alias: Alias name. You almost definitely will only need to pass this argument.\n :type alias: str\n\n :param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.\n :type manifest: str\n\n :param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).\n :type verify: bool\n\n :param sizes: Whether to return sizes of the blobs along with their hashes\n :type sizes: bool\n\n :param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.\n :type dcd: str\n\n :rtype: list\n :returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.\n "
return self._get_alias(alias, manifest, verify, sizes, dcd, False) |
def find_coordinates(hmms, bit_thresh):
"""
find 16S rRNA gene sequence coordinates
"""
# get coordinates from cmsearch output
seq2hmm = parse_hmm(hmms, bit_thresh)
seq2hmm = best_model(seq2hmm)
group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]
for seq, info in list(seq2hmm.items()):
group2hmm[seq] = {}
# info = [model, [[hit1], [hit2], ...]]
for group_num, group in enumerate(hit_groups(info[1])):
# group is a group of hits to a single 16S gene
# determine matching strand based on best hit
best = sorted(group, reverse = True, key = itemgetter(-1))[0]
strand = best[5]
coordinates = [i[0] for i in group] + [i[1] for i in group]
coordinates = [min(coordinates), max(coordinates), strand]
# make sure all hits are to the same strand
matches = [i for i in group if i[5] == strand]
# gaps = [[gstart, gend], [gstart2, gend2]]
gaps = check_gaps(matches)
group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps]
return group2hmm | def function[find_coordinates, parameter[hmms, bit_thresh]]:
constant[
find 16S rRNA gene sequence coordinates
]
variable[seq2hmm] assign[=] call[name[parse_hmm], parameter[name[hmms], name[bit_thresh]]]
variable[seq2hmm] assign[=] call[name[best_model], parameter[name[seq2hmm]]]
variable[group2hmm] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2041d8af0>, <ast.Name object at 0x7da2041da260>]]] in starred[call[name[list], parameter[call[name[seq2hmm].items, parameter[]]]]] begin[:]
call[name[group2hmm]][name[seq]] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2041d8820>, <ast.Name object at 0x7da2041d9420>]]] in starred[call[name[enumerate], parameter[call[name[hit_groups], parameter[call[name[info]][constant[1]]]]]]] begin[:]
variable[best] assign[=] call[call[name[sorted], parameter[name[group]]]][constant[0]]
variable[strand] assign[=] call[name[best]][constant[5]]
variable[coordinates] assign[=] binary_operation[<ast.ListComp object at 0x7da2041d9480> + <ast.ListComp object at 0x7da2041da770>]
variable[coordinates] assign[=] list[[<ast.Call object at 0x7da2041dbb80>, <ast.Call object at 0x7da2041db8b0>, <ast.Name object at 0x7da2041db5b0>]]
variable[matches] assign[=] <ast.ListComp object at 0x7da2041dbb20>
variable[gaps] assign[=] call[name[check_gaps], parameter[name[matches]]]
call[call[name[group2hmm]][name[seq]]][name[group_num]] assign[=] list[[<ast.Subscript object at 0x7da2041d9150>, <ast.Name object at 0x7da2041d9ed0>, <ast.Name object at 0x7da2041dbf70>, <ast.Name object at 0x7da2041d8cd0>, <ast.Name object at 0x7da2041da740>]]
return[name[group2hmm]] | keyword[def] identifier[find_coordinates] ( identifier[hmms] , identifier[bit_thresh] ):
literal[string]
identifier[seq2hmm] = identifier[parse_hmm] ( identifier[hmms] , identifier[bit_thresh] )
identifier[seq2hmm] = identifier[best_model] ( identifier[seq2hmm] )
identifier[group2hmm] ={}
keyword[for] identifier[seq] , identifier[info] keyword[in] identifier[list] ( identifier[seq2hmm] . identifier[items] ()):
identifier[group2hmm] [ identifier[seq] ]={}
keyword[for] identifier[group_num] , identifier[group] keyword[in] identifier[enumerate] ( identifier[hit_groups] ( identifier[info] [ literal[int] ])):
identifier[best] = identifier[sorted] ( identifier[group] , identifier[reverse] = keyword[True] , identifier[key] = identifier[itemgetter] (- literal[int] ))[ literal[int] ]
identifier[strand] = identifier[best] [ literal[int] ]
identifier[coordinates] =[ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[group] ]+[ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[group] ]
identifier[coordinates] =[ identifier[min] ( identifier[coordinates] ), identifier[max] ( identifier[coordinates] ), identifier[strand] ]
identifier[matches] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[group] keyword[if] identifier[i] [ literal[int] ]== identifier[strand] ]
identifier[gaps] = identifier[check_gaps] ( identifier[matches] )
identifier[group2hmm] [ identifier[seq] ][ identifier[group_num] ]=[ identifier[info] [ literal[int] ], identifier[strand] , identifier[coordinates] , identifier[matches] , identifier[gaps] ]
keyword[return] identifier[group2hmm] | def find_coordinates(hmms, bit_thresh):
"""
find 16S rRNA gene sequence coordinates
"""
# get coordinates from cmsearch output
seq2hmm = parse_hmm(hmms, bit_thresh)
seq2hmm = best_model(seq2hmm)
group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]
for (seq, info) in list(seq2hmm.items()):
group2hmm[seq] = {}
# info = [model, [[hit1], [hit2], ...]]
for (group_num, group) in enumerate(hit_groups(info[1])):
# group is a group of hits to a single 16S gene
# determine matching strand based on best hit
best = sorted(group, reverse=True, key=itemgetter(-1))[0]
strand = best[5]
coordinates = [i[0] for i in group] + [i[1] for i in group]
coordinates = [min(coordinates), max(coordinates), strand]
# make sure all hits are to the same strand
matches = [i for i in group if i[5] == strand]
# gaps = [[gstart, gend], [gstart2, gend2]]
gaps = check_gaps(matches)
group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return group2hmm |
def totz(when, tz=None):
"""
Return a date, time, or datetime converted to a datetime in the given timezone. If when is a
datetime and has no timezone it is assumed to be local time. Date and time objects are also
assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to
a datetime.
"""
if when is None:
return None
when = to_datetime(when)
if when.tzinfo is None:
when = when.replace(tzinfo=localtz)
return when.astimezone(tz or utc) | def function[totz, parameter[when, tz]]:
constant[
Return a date, time, or datetime converted to a datetime in the given timezone. If when is a
datetime and has no timezone it is assumed to be local time. Date and time objects are also
assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to
a datetime.
]
if compare[name[when] is constant[None]] begin[:]
return[constant[None]]
variable[when] assign[=] call[name[to_datetime], parameter[name[when]]]
if compare[name[when].tzinfo is constant[None]] begin[:]
variable[when] assign[=] call[name[when].replace, parameter[]]
return[call[name[when].astimezone, parameter[<ast.BoolOp object at 0x7da20e957430>]]] | keyword[def] identifier[totz] ( identifier[when] , identifier[tz] = keyword[None] ):
literal[string]
keyword[if] identifier[when] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[when] = identifier[to_datetime] ( identifier[when] )
keyword[if] identifier[when] . identifier[tzinfo] keyword[is] keyword[None] :
identifier[when] = identifier[when] . identifier[replace] ( identifier[tzinfo] = identifier[localtz] )
keyword[return] identifier[when] . identifier[astimezone] ( identifier[tz] keyword[or] identifier[utc] ) | def totz(when, tz=None):
"""
Return a date, time, or datetime converted to a datetime in the given timezone. If when is a
datetime and has no timezone it is assumed to be local time. Date and time objects are also
assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to
a datetime.
"""
if when is None:
return None # depends on [control=['if'], data=[]]
when = to_datetime(when)
if when.tzinfo is None:
when = when.replace(tzinfo=localtz) # depends on [control=['if'], data=[]]
return when.astimezone(tz or utc) |
def delegate_method(other, method, name=None):
"""Add a method to the current class that delegates to another method.
The *other* argument must be a property that returns the instance to
delegate to. Due to an implementation detail, the property must be defined
in the current class. The *method* argument specifies a method to delegate
to. It can be any callable as long as it takes the instances as its first
argument.
It is a common paradigm in Gruvi to expose protocol methods onto clients.
This keeps most of the logic into the protocol, but prevents the user from
having to type ``'client.protocol.*methodname*'`` all the time.
For example::
class MyClient(Client):
protocol = Client.protocol
delegate_method(protocol, MyProtocol.method)
"""
frame = sys._getframe(1)
classdict = frame.f_locals
@functools.wraps(method)
def delegate(self, *args, **kwargs):
other_self = other.__get__(self)
return method(other_self, *args, **kwargs)
if getattr(method, '__switchpoint__', False):
delegate.__switchpoint__ = True
if name is None:
name = method.__name__
propname = None
for key in classdict:
if classdict[key] is other:
propname = key
break
# If we know the property name, replace the docstring with a small
# reference instead of copying the function docstring.
if propname:
qname = getattr(method, '__qualname__', method.__name__)
if '.' in qname:
delegate.__doc__ = 'A shorthand for ``self.{propname}.{name}()``.' \
.format(name=name, propname=propname)
else:
delegate.__doc__ = 'A shorthand for ``{name}({propname}, ...)``.' \
.format(name=name, propname=propname)
classdict[name] = delegate | def function[delegate_method, parameter[other, method, name]]:
constant[Add a method to the current class that delegates to another method.
The *other* argument must be a property that returns the instance to
delegate to. Due to an implementation detail, the property must be defined
in the current class. The *method* argument specifies a method to delegate
to. It can be any callable as long as it takes the instances as its first
argument.
It is a common paradigm in Gruvi to expose protocol methods onto clients.
This keeps most of the logic into the protocol, but prevents the user from
having to type ``'client.protocol.*methodname*'`` all the time.
For example::
class MyClient(Client):
protocol = Client.protocol
delegate_method(protocol, MyProtocol.method)
]
variable[frame] assign[=] call[name[sys]._getframe, parameter[constant[1]]]
variable[classdict] assign[=] name[frame].f_locals
def function[delegate, parameter[self]]:
variable[other_self] assign[=] call[name[other].__get__, parameter[name[self]]]
return[call[name[method], parameter[name[other_self], <ast.Starred object at 0x7da1b0274a60>]]]
if call[name[getattr], parameter[name[method], constant[__switchpoint__], constant[False]]] begin[:]
name[delegate].__switchpoint__ assign[=] constant[True]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] name[method].__name__
variable[propname] assign[=] constant[None]
for taget[name[key]] in starred[name[classdict]] begin[:]
if compare[call[name[classdict]][name[key]] is name[other]] begin[:]
variable[propname] assign[=] name[key]
break
if name[propname] begin[:]
variable[qname] assign[=] call[name[getattr], parameter[name[method], constant[__qualname__], name[method].__name__]]
if compare[constant[.] in name[qname]] begin[:]
name[delegate].__doc__ assign[=] call[constant[A shorthand for ``self.{propname}.{name}()``.].format, parameter[]]
call[name[classdict]][name[name]] assign[=] name[delegate] | keyword[def] identifier[delegate_method] ( identifier[other] , identifier[method] , identifier[name] = keyword[None] ):
literal[string]
identifier[frame] = identifier[sys] . identifier[_getframe] ( literal[int] )
identifier[classdict] = identifier[frame] . identifier[f_locals]
@ identifier[functools] . identifier[wraps] ( identifier[method] )
keyword[def] identifier[delegate] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
identifier[other_self] = identifier[other] . identifier[__get__] ( identifier[self] )
keyword[return] identifier[method] ( identifier[other_self] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[getattr] ( identifier[method] , literal[string] , keyword[False] ):
identifier[delegate] . identifier[__switchpoint__] = keyword[True]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[method] . identifier[__name__]
identifier[propname] = keyword[None]
keyword[for] identifier[key] keyword[in] identifier[classdict] :
keyword[if] identifier[classdict] [ identifier[key] ] keyword[is] identifier[other] :
identifier[propname] = identifier[key]
keyword[break]
keyword[if] identifier[propname] :
identifier[qname] = identifier[getattr] ( identifier[method] , literal[string] , identifier[method] . identifier[__name__] )
keyword[if] literal[string] keyword[in] identifier[qname] :
identifier[delegate] . identifier[__doc__] = literal[string] . identifier[format] ( identifier[name] = identifier[name] , identifier[propname] = identifier[propname] )
keyword[else] :
identifier[delegate] . identifier[__doc__] = literal[string] . identifier[format] ( identifier[name] = identifier[name] , identifier[propname] = identifier[propname] )
identifier[classdict] [ identifier[name] ]= identifier[delegate] | def delegate_method(other, method, name=None):
"""Add a method to the current class that delegates to another method.
The *other* argument must be a property that returns the instance to
delegate to. Due to an implementation detail, the property must be defined
in the current class. The *method* argument specifies a method to delegate
to. It can be any callable as long as it takes the instances as its first
argument.
It is a common paradigm in Gruvi to expose protocol methods onto clients.
This keeps most of the logic into the protocol, but prevents the user from
having to type ``'client.protocol.*methodname*'`` all the time.
For example::
class MyClient(Client):
protocol = Client.protocol
delegate_method(protocol, MyProtocol.method)
"""
frame = sys._getframe(1)
classdict = frame.f_locals
@functools.wraps(method)
def delegate(self, *args, **kwargs):
other_self = other.__get__(self)
return method(other_self, *args, **kwargs)
if getattr(method, '__switchpoint__', False):
delegate.__switchpoint__ = True # depends on [control=['if'], data=[]]
if name is None:
name = method.__name__ # depends on [control=['if'], data=['name']]
propname = None
for key in classdict:
if classdict[key] is other:
propname = key
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
# If we know the property name, replace the docstring with a small
# reference instead of copying the function docstring.
if propname:
qname = getattr(method, '__qualname__', method.__name__)
if '.' in qname:
delegate.__doc__ = 'A shorthand for ``self.{propname}.{name}()``.'.format(name=name, propname=propname) # depends on [control=['if'], data=[]]
else:
delegate.__doc__ = 'A shorthand for ``{name}({propname}, ...)``.'.format(name=name, propname=propname) # depends on [control=['if'], data=[]]
classdict[name] = delegate |
def create_table(self, model):
"""Create model and table in database.
>> migrator.create_table(model)
"""
self.orm[model._meta.table_name] = model
model._meta.database = self.database
self.ops.append(model.create_table)
return model | def function[create_table, parameter[self, model]]:
constant[Create model and table in database.
>> migrator.create_table(model)
]
call[name[self].orm][name[model]._meta.table_name] assign[=] name[model]
name[model]._meta.database assign[=] name[self].database
call[name[self].ops.append, parameter[name[model].create_table]]
return[name[model]] | keyword[def] identifier[create_table] ( identifier[self] , identifier[model] ):
literal[string]
identifier[self] . identifier[orm] [ identifier[model] . identifier[_meta] . identifier[table_name] ]= identifier[model]
identifier[model] . identifier[_meta] . identifier[database] = identifier[self] . identifier[database]
identifier[self] . identifier[ops] . identifier[append] ( identifier[model] . identifier[create_table] )
keyword[return] identifier[model] | def create_table(self, model):
"""Create model and table in database.
>> migrator.create_table(model)
"""
self.orm[model._meta.table_name] = model
model._meta.database = self.database
self.ops.append(model.create_table)
return model |
def labels(self):
"""
Get field label for fields
"""
if type(self.object_list) == type([]):
model = self.formset.model
else:
model = self.object_list.model
for field in self.visible_fields:
name = None
if self.formset:
f = self.formset.empty_form.fields.get(field, None)
if f:
name = f.label
if name is None:
name = label_for_field(field, model)
if name == model._meta.verbose_name:
name = self.model_name and self.model_name or \
model._meta.verbose_name
stype = None
cur_sorted = False
sortable = False
if self.order_type:
sortable = get_sort_field(field, model)
stype = self.ASC
# change order_type so that next sorting on the same
# field will give reversed results
if sortable and field == self.sort_field:
cur_sorted = True
if self.order_type == self.ASC:
stype = self.DESC
elif self.order_type == self.DESC:
stype = self.ASC
else:
stype = self.ASC
yield AdminListLabel(name, field, stype, cur_sorted, bool(sortable)) | def function[labels, parameter[self]]:
constant[
Get field label for fields
]
if compare[call[name[type], parameter[name[self].object_list]] equal[==] call[name[type], parameter[list[[]]]]] begin[:]
variable[model] assign[=] name[self].formset.model
for taget[name[field]] in starred[name[self].visible_fields] begin[:]
variable[name] assign[=] constant[None]
if name[self].formset begin[:]
variable[f] assign[=] call[name[self].formset.empty_form.fields.get, parameter[name[field], constant[None]]]
if name[f] begin[:]
variable[name] assign[=] name[f].label
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] call[name[label_for_field], parameter[name[field], name[model]]]
if compare[name[name] equal[==] name[model]._meta.verbose_name] begin[:]
variable[name] assign[=] <ast.BoolOp object at 0x7da20e957e50>
variable[stype] assign[=] constant[None]
variable[cur_sorted] assign[=] constant[False]
variable[sortable] assign[=] constant[False]
if name[self].order_type begin[:]
variable[sortable] assign[=] call[name[get_sort_field], parameter[name[field], name[model]]]
variable[stype] assign[=] name[self].ASC
if <ast.BoolOp object at 0x7da20e9557b0> begin[:]
variable[cur_sorted] assign[=] constant[True]
if compare[name[self].order_type equal[==] name[self].ASC] begin[:]
variable[stype] assign[=] name[self].DESC
<ast.Yield object at 0x7da20e956d10> | keyword[def] identifier[labels] ( identifier[self] ):
literal[string]
keyword[if] identifier[type] ( identifier[self] . identifier[object_list] )== identifier[type] ([]):
identifier[model] = identifier[self] . identifier[formset] . identifier[model]
keyword[else] :
identifier[model] = identifier[self] . identifier[object_list] . identifier[model]
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[visible_fields] :
identifier[name] = keyword[None]
keyword[if] identifier[self] . identifier[formset] :
identifier[f] = identifier[self] . identifier[formset] . identifier[empty_form] . identifier[fields] . identifier[get] ( identifier[field] , keyword[None] )
keyword[if] identifier[f] :
identifier[name] = identifier[f] . identifier[label]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[label_for_field] ( identifier[field] , identifier[model] )
keyword[if] identifier[name] == identifier[model] . identifier[_meta] . identifier[verbose_name] :
identifier[name] = identifier[self] . identifier[model_name] keyword[and] identifier[self] . identifier[model_name] keyword[or] identifier[model] . identifier[_meta] . identifier[verbose_name]
identifier[stype] = keyword[None]
identifier[cur_sorted] = keyword[False]
identifier[sortable] = keyword[False]
keyword[if] identifier[self] . identifier[order_type] :
identifier[sortable] = identifier[get_sort_field] ( identifier[field] , identifier[model] )
identifier[stype] = identifier[self] . identifier[ASC]
keyword[if] identifier[sortable] keyword[and] identifier[field] == identifier[self] . identifier[sort_field] :
identifier[cur_sorted] = keyword[True]
keyword[if] identifier[self] . identifier[order_type] == identifier[self] . identifier[ASC] :
identifier[stype] = identifier[self] . identifier[DESC]
keyword[elif] identifier[self] . identifier[order_type] == identifier[self] . identifier[DESC] :
identifier[stype] = identifier[self] . identifier[ASC]
keyword[else] :
identifier[stype] = identifier[self] . identifier[ASC]
keyword[yield] identifier[AdminListLabel] ( identifier[name] , identifier[field] , identifier[stype] , identifier[cur_sorted] , identifier[bool] ( identifier[sortable] )) | def labels(self):
"""
Get field label for fields
"""
if type(self.object_list) == type([]):
model = self.formset.model # depends on [control=['if'], data=[]]
else:
model = self.object_list.model
for field in self.visible_fields:
name = None
if self.formset:
f = self.formset.empty_form.fields.get(field, None)
if f:
name = f.label # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if name is None:
name = label_for_field(field, model) # depends on [control=['if'], data=['name']]
if name == model._meta.verbose_name:
name = self.model_name and self.model_name or model._meta.verbose_name # depends on [control=['if'], data=['name']]
stype = None
cur_sorted = False
sortable = False
if self.order_type:
sortable = get_sort_field(field, model)
stype = self.ASC
# change order_type so that next sorting on the same
# field will give reversed results
if sortable and field == self.sort_field:
cur_sorted = True
if self.order_type == self.ASC:
stype = self.DESC # depends on [control=['if'], data=[]]
elif self.order_type == self.DESC:
stype = self.ASC # depends on [control=['if'], data=[]]
else:
stype = self.ASC # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
yield AdminListLabel(name, field, stype, cur_sorted, bool(sortable)) # depends on [control=['for'], data=['field']] |
def parse(readDataInstance):
"""
Returns a new L{NETDirectory} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
"""
nd = NETDirectory()
nd.directory = NetDirectory.parse(readDataInstance)
nd.netMetaDataHeader = NetMetaDataHeader.parse(readDataInstance)
nd.netMetaDataStreams = NetMetaDataStreams.parse(readDataInstance)
return nd | def function[parse, parameter[readDataInstance]]:
constant[
Returns a new L{NETDirectory} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
]
variable[nd] assign[=] call[name[NETDirectory], parameter[]]
name[nd].directory assign[=] call[name[NetDirectory].parse, parameter[name[readDataInstance]]]
name[nd].netMetaDataHeader assign[=] call[name[NetMetaDataHeader].parse, parameter[name[readDataInstance]]]
name[nd].netMetaDataStreams assign[=] call[name[NetMetaDataStreams].parse, parameter[name[readDataInstance]]]
return[name[nd]] | keyword[def] identifier[parse] ( identifier[readDataInstance] ):
literal[string]
identifier[nd] = identifier[NETDirectory] ()
identifier[nd] . identifier[directory] = identifier[NetDirectory] . identifier[parse] ( identifier[readDataInstance] )
identifier[nd] . identifier[netMetaDataHeader] = identifier[NetMetaDataHeader] . identifier[parse] ( identifier[readDataInstance] )
identifier[nd] . identifier[netMetaDataStreams] = identifier[NetMetaDataStreams] . identifier[parse] ( identifier[readDataInstance] )
keyword[return] identifier[nd] | def parse(readDataInstance):
"""
Returns a new L{NETDirectory} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NETDirectory} object.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
"""
nd = NETDirectory()
nd.directory = NetDirectory.parse(readDataInstance)
nd.netMetaDataHeader = NetMetaDataHeader.parse(readDataInstance)
nd.netMetaDataStreams = NetMetaDataStreams.parse(readDataInstance)
return nd |
def _compute_median_z1pt0(self, vs30):
"""
Compute and return median z1pt0 (in m), equation 17, pqge 79.
"""
z1pt0_median = np.zeros_like(vs30) + 6.745
idx = np.where((vs30 >= 180.0) & (vs30 <= 500.0))
z1pt0_median[idx] = 6.745 - 1.35 * np.log(vs30[idx] / 180.0)
idx = vs30 > 500.0
z1pt0_median[idx] = 5.394 - 4.48 * np.log(vs30[idx] / 500.0)
return np.exp(z1pt0_median) | def function[_compute_median_z1pt0, parameter[self, vs30]]:
constant[
Compute and return median z1pt0 (in m), equation 17, pqge 79.
]
variable[z1pt0_median] assign[=] binary_operation[call[name[np].zeros_like, parameter[name[vs30]]] + constant[6.745]]
variable[idx] assign[=] call[name[np].where, parameter[binary_operation[compare[name[vs30] greater_or_equal[>=] constant[180.0]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[vs30] less_or_equal[<=] constant[500.0]]]]]
call[name[z1pt0_median]][name[idx]] assign[=] binary_operation[constant[6.745] - binary_operation[constant[1.35] * call[name[np].log, parameter[binary_operation[call[name[vs30]][name[idx]] / constant[180.0]]]]]]
variable[idx] assign[=] compare[name[vs30] greater[>] constant[500.0]]
call[name[z1pt0_median]][name[idx]] assign[=] binary_operation[constant[5.394] - binary_operation[constant[4.48] * call[name[np].log, parameter[binary_operation[call[name[vs30]][name[idx]] / constant[500.0]]]]]]
return[call[name[np].exp, parameter[name[z1pt0_median]]]] | keyword[def] identifier[_compute_median_z1pt0] ( identifier[self] , identifier[vs30] ):
literal[string]
identifier[z1pt0_median] = identifier[np] . identifier[zeros_like] ( identifier[vs30] )+ literal[int]
identifier[idx] = identifier[np] . identifier[where] (( identifier[vs30] >= literal[int] )&( identifier[vs30] <= literal[int] ))
identifier[z1pt0_median] [ identifier[idx] ]= literal[int] - literal[int] * identifier[np] . identifier[log] ( identifier[vs30] [ identifier[idx] ]/ literal[int] )
identifier[idx] = identifier[vs30] > literal[int]
identifier[z1pt0_median] [ identifier[idx] ]= literal[int] - literal[int] * identifier[np] . identifier[log] ( identifier[vs30] [ identifier[idx] ]/ literal[int] )
keyword[return] identifier[np] . identifier[exp] ( identifier[z1pt0_median] ) | def _compute_median_z1pt0(self, vs30):
"""
Compute and return median z1pt0 (in m), equation 17, pqge 79.
"""
z1pt0_median = np.zeros_like(vs30) + 6.745
idx = np.where((vs30 >= 180.0) & (vs30 <= 500.0))
z1pt0_median[idx] = 6.745 - 1.35 * np.log(vs30[idx] / 180.0)
idx = vs30 > 500.0
z1pt0_median[idx] = 5.394 - 4.48 * np.log(vs30[idx] / 500.0)
return np.exp(z1pt0_median) |
def sync(self):
"""Retrieve areas from ElkM1"""
self.elk.send(as_encode())
self.get_descriptions(TextDescriptions.AREA.value) | def function[sync, parameter[self]]:
constant[Retrieve areas from ElkM1]
call[name[self].elk.send, parameter[call[name[as_encode], parameter[]]]]
call[name[self].get_descriptions, parameter[name[TextDescriptions].AREA.value]] | keyword[def] identifier[sync] ( identifier[self] ):
literal[string]
identifier[self] . identifier[elk] . identifier[send] ( identifier[as_encode] ())
identifier[self] . identifier[get_descriptions] ( identifier[TextDescriptions] . identifier[AREA] . identifier[value] ) | def sync(self):
"""Retrieve areas from ElkM1"""
self.elk.send(as_encode())
self.get_descriptions(TextDescriptions.AREA.value) |
def get_duration(self):
"""Get game duration."""
postgame = self.get_postgame()
if postgame:
return postgame.duration_int * 1000
duration = self._header.initial.restore_time
try:
while self._handle.tell() < self.size:
operation = mgz.body.operation.parse_stream(self._handle)
if operation.type == 'sync':
duration += operation.time_increment
elif operation.type == 'action':
if operation.action.type == 'resign':
self._cache['resigned'].add(operation.action.player_id)
self._handle.seek(self.body_position)
except (construct.core.ConstructError, zlib.error, ValueError):
raise RuntimeError("invalid mgz file")
return duration | def function[get_duration, parameter[self]]:
constant[Get game duration.]
variable[postgame] assign[=] call[name[self].get_postgame, parameter[]]
if name[postgame] begin[:]
return[binary_operation[name[postgame].duration_int * constant[1000]]]
variable[duration] assign[=] name[self]._header.initial.restore_time
<ast.Try object at 0x7da1b25e8790>
return[name[duration]] | keyword[def] identifier[get_duration] ( identifier[self] ):
literal[string]
identifier[postgame] = identifier[self] . identifier[get_postgame] ()
keyword[if] identifier[postgame] :
keyword[return] identifier[postgame] . identifier[duration_int] * literal[int]
identifier[duration] = identifier[self] . identifier[_header] . identifier[initial] . identifier[restore_time]
keyword[try] :
keyword[while] identifier[self] . identifier[_handle] . identifier[tell] ()< identifier[self] . identifier[size] :
identifier[operation] = identifier[mgz] . identifier[body] . identifier[operation] . identifier[parse_stream] ( identifier[self] . identifier[_handle] )
keyword[if] identifier[operation] . identifier[type] == literal[string] :
identifier[duration] += identifier[operation] . identifier[time_increment]
keyword[elif] identifier[operation] . identifier[type] == literal[string] :
keyword[if] identifier[operation] . identifier[action] . identifier[type] == literal[string] :
identifier[self] . identifier[_cache] [ literal[string] ]. identifier[add] ( identifier[operation] . identifier[action] . identifier[player_id] )
identifier[self] . identifier[_handle] . identifier[seek] ( identifier[self] . identifier[body_position] )
keyword[except] ( identifier[construct] . identifier[core] . identifier[ConstructError] , identifier[zlib] . identifier[error] , identifier[ValueError] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[duration] | def get_duration(self):
"""Get game duration."""
postgame = self.get_postgame()
if postgame:
return postgame.duration_int * 1000 # depends on [control=['if'], data=[]]
duration = self._header.initial.restore_time
try:
while self._handle.tell() < self.size:
operation = mgz.body.operation.parse_stream(self._handle)
if operation.type == 'sync':
duration += operation.time_increment # depends on [control=['if'], data=[]]
elif operation.type == 'action':
if operation.action.type == 'resign':
self._cache['resigned'].add(operation.action.player_id) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
self._handle.seek(self.body_position) # depends on [control=['try'], data=[]]
except (construct.core.ConstructError, zlib.error, ValueError):
raise RuntimeError('invalid mgz file') # depends on [control=['except'], data=[]]
return duration |
def current_boost_dir():
"""Returns the (relative) path to the Boost source-directory this file is located in (if any)."""
# Path to directory containing this script.
path = os.path.dirname( os.path.realpath(__file__) )
# Making sure it is located in "${boost-dir}/libs/mpl/preprocessed".
for directory in reversed( ["libs", "mpl", "preprocessed"] ):
(head, tail) = os.path.split(path)
if tail == directory:
path = head
else:
return None
return os.path.relpath( path ) | def function[current_boost_dir, parameter[]]:
constant[Returns the (relative) path to the Boost source-directory this file is located in (if any).]
variable[path] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.realpath, parameter[name[__file__]]]]]
for taget[name[directory]] in starred[call[name[reversed], parameter[list[[<ast.Constant object at 0x7da1b1fa5930>, <ast.Constant object at 0x7da1b1fa54b0>, <ast.Constant object at 0x7da1b1fa73a0>]]]]] begin[:]
<ast.Tuple object at 0x7da1b1fa7ee0> assign[=] call[name[os].path.split, parameter[name[path]]]
if compare[name[tail] equal[==] name[directory]] begin[:]
variable[path] assign[=] name[head]
return[call[name[os].path.relpath, parameter[name[path]]]] | keyword[def] identifier[current_boost_dir] ():
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[__file__] ))
keyword[for] identifier[directory] keyword[in] identifier[reversed] ([ literal[string] , literal[string] , literal[string] ]):
( identifier[head] , identifier[tail] )= identifier[os] . identifier[path] . identifier[split] ( identifier[path] )
keyword[if] identifier[tail] == identifier[directory] :
identifier[path] = identifier[head]
keyword[else] :
keyword[return] keyword[None]
keyword[return] identifier[os] . identifier[path] . identifier[relpath] ( identifier[path] ) | def current_boost_dir():
"""Returns the (relative) path to the Boost source-directory this file is located in (if any)."""
# Path to directory containing this script.
path = os.path.dirname(os.path.realpath(__file__))
# Making sure it is located in "${boost-dir}/libs/mpl/preprocessed".
for directory in reversed(['libs', 'mpl', 'preprocessed']):
(head, tail) = os.path.split(path)
if tail == directory:
path = head # depends on [control=['if'], data=[]]
else:
return None # depends on [control=['for'], data=['directory']]
return os.path.relpath(path) |
def get_mem(self, id):
"""Fetch the memory with the supplied id"""
for m in self.mems:
if m.id == id:
return m
return None | def function[get_mem, parameter[self, id]]:
constant[Fetch the memory with the supplied id]
for taget[name[m]] in starred[name[self].mems] begin[:]
if compare[name[m].id equal[==] name[id]] begin[:]
return[name[m]]
return[constant[None]] | keyword[def] identifier[get_mem] ( identifier[self] , identifier[id] ):
literal[string]
keyword[for] identifier[m] keyword[in] identifier[self] . identifier[mems] :
keyword[if] identifier[m] . identifier[id] == identifier[id] :
keyword[return] identifier[m]
keyword[return] keyword[None] | def get_mem(self, id):
"""Fetch the memory with the supplied id"""
for m in self.mems:
if m.id == id:
return m # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']]
return None |
def set(self, lo, hi):
"""
Set the fractional values of the slider position.
:param lo: lower end of the scrollbar (between 0 and 1)
:type lo: float
:param hi: upper end of the scrollbar (between 0 and 1)
:type hi: float
"""
if float(lo) <= 0.0 and float(hi) >= 1.0:
if self._layout == 'place':
self.place_forget()
elif self._layout == 'pack':
self.pack_forget()
else:
self.grid_remove()
else:
if self._layout == 'place':
self.place(**self._place_kw)
elif self._layout == 'pack':
self.pack(**self._pack_kw)
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi) | def function[set, parameter[self, lo, hi]]:
constant[
Set the fractional values of the slider position.
:param lo: lower end of the scrollbar (between 0 and 1)
:type lo: float
:param hi: upper end of the scrollbar (between 0 and 1)
:type hi: float
]
if <ast.BoolOp object at 0x7da1b236feb0> begin[:]
if compare[name[self]._layout equal[==] constant[place]] begin[:]
call[name[self].place_forget, parameter[]]
call[name[ttk].Scrollbar.set, parameter[name[self], name[lo], name[hi]]] | keyword[def] identifier[set] ( identifier[self] , identifier[lo] , identifier[hi] ):
literal[string]
keyword[if] identifier[float] ( identifier[lo] )<= literal[int] keyword[and] identifier[float] ( identifier[hi] )>= literal[int] :
keyword[if] identifier[self] . identifier[_layout] == literal[string] :
identifier[self] . identifier[place_forget] ()
keyword[elif] identifier[self] . identifier[_layout] == literal[string] :
identifier[self] . identifier[pack_forget] ()
keyword[else] :
identifier[self] . identifier[grid_remove] ()
keyword[else] :
keyword[if] identifier[self] . identifier[_layout] == literal[string] :
identifier[self] . identifier[place] (** identifier[self] . identifier[_place_kw] )
keyword[elif] identifier[self] . identifier[_layout] == literal[string] :
identifier[self] . identifier[pack] (** identifier[self] . identifier[_pack_kw] )
keyword[else] :
identifier[self] . identifier[grid] ()
identifier[ttk] . identifier[Scrollbar] . identifier[set] ( identifier[self] , identifier[lo] , identifier[hi] ) | def set(self, lo, hi):
"""
Set the fractional values of the slider position.
:param lo: lower end of the scrollbar (between 0 and 1)
:type lo: float
:param hi: upper end of the scrollbar (between 0 and 1)
:type hi: float
"""
if float(lo) <= 0.0 and float(hi) >= 1.0:
if self._layout == 'place':
self.place_forget() # depends on [control=['if'], data=[]]
elif self._layout == 'pack':
self.pack_forget() # depends on [control=['if'], data=[]]
else:
self.grid_remove() # depends on [control=['if'], data=[]]
elif self._layout == 'place':
self.place(**self._place_kw) # depends on [control=['if'], data=[]]
elif self._layout == 'pack':
self.pack(**self._pack_kw) # depends on [control=['if'], data=[]]
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi) |
def remove_dirs(dpath, verbose=None, ignore_errors=True, dryrun=False,
quiet=QUIET):
r"""
Recursively removes a single directory (need to change function name)
DEPRICATE
Args:
dpath (str): directory path
dryrun (bool): (default = False)
ignore_errors (bool): (default = True)
quiet (bool): (default = False)
Returns:
bool: False
CommandLine:
python -m utool.util_path --test-remove_dirs
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_resource_dir('utool', 'testremovedir')
>>> assert exists(dpath), 'nothing to remove'
>>> flag = remove_dirs(dpath, verbose=True)
>>> print('flag = %r' % (flag,))
>>> assert not exists(dpath), 'did not remove dpath'
>>> assert flag is True
"""
if verbose is None:
verbose = not quiet
if verbose:
print('[util_path] Removing directory: %r' % dpath)
if dryrun:
return False
try:
shutil.rmtree(dpath)
except OSError as e:
warnings.warn('OSError: %s,\n Could not delete %s' % (str(e), dpath))
if not ignore_errors:
raise
return False
return True | def function[remove_dirs, parameter[dpath, verbose, ignore_errors, dryrun, quiet]]:
constant[
Recursively removes a single directory (need to change function name)
DEPRICATE
Args:
dpath (str): directory path
dryrun (bool): (default = False)
ignore_errors (bool): (default = True)
quiet (bool): (default = False)
Returns:
bool: False
CommandLine:
python -m utool.util_path --test-remove_dirs
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_resource_dir('utool', 'testremovedir')
>>> assert exists(dpath), 'nothing to remove'
>>> flag = remove_dirs(dpath, verbose=True)
>>> print('flag = %r' % (flag,))
>>> assert not exists(dpath), 'did not remove dpath'
>>> assert flag is True
]
if compare[name[verbose] is constant[None]] begin[:]
variable[verbose] assign[=] <ast.UnaryOp object at 0x7da1b24caf80>
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[util_path] Removing directory: %r] <ast.Mod object at 0x7da2590d6920> name[dpath]]]]
if name[dryrun] begin[:]
return[constant[False]]
<ast.Try object at 0x7da1b24ca2f0>
return[constant[True]] | keyword[def] identifier[remove_dirs] ( identifier[dpath] , identifier[verbose] = keyword[None] , identifier[ignore_errors] = keyword[True] , identifier[dryrun] = keyword[False] ,
identifier[quiet] = identifier[QUIET] ):
literal[string]
keyword[if] identifier[verbose] keyword[is] keyword[None] :
identifier[verbose] = keyword[not] identifier[quiet]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] % identifier[dpath] )
keyword[if] identifier[dryrun] :
keyword[return] keyword[False]
keyword[try] :
identifier[shutil] . identifier[rmtree] ( identifier[dpath] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[warnings] . identifier[warn] ( literal[string] %( identifier[str] ( identifier[e] ), identifier[dpath] ))
keyword[if] keyword[not] identifier[ignore_errors] :
keyword[raise]
keyword[return] keyword[False]
keyword[return] keyword[True] | def remove_dirs(dpath, verbose=None, ignore_errors=True, dryrun=False, quiet=QUIET):
"""
Recursively removes a single directory (need to change function name)
DEPRICATE
Args:
dpath (str): directory path
dryrun (bool): (default = False)
ignore_errors (bool): (default = True)
quiet (bool): (default = False)
Returns:
bool: False
CommandLine:
python -m utool.util_path --test-remove_dirs
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_resource_dir('utool', 'testremovedir')
>>> assert exists(dpath), 'nothing to remove'
>>> flag = remove_dirs(dpath, verbose=True)
>>> print('flag = %r' % (flag,))
>>> assert not exists(dpath), 'did not remove dpath'
>>> assert flag is True
"""
if verbose is None:
verbose = not quiet # depends on [control=['if'], data=['verbose']]
if verbose:
print('[util_path] Removing directory: %r' % dpath) # depends on [control=['if'], data=[]]
if dryrun:
return False # depends on [control=['if'], data=[]]
try:
shutil.rmtree(dpath) # depends on [control=['try'], data=[]]
except OSError as e:
warnings.warn('OSError: %s,\n Could not delete %s' % (str(e), dpath))
if not ignore_errors:
raise # depends on [control=['if'], data=[]]
return False # depends on [control=['except'], data=['e']]
return True |
def evaluate_method(method, events, aux=0.):
"""Evaluate a TMVA::MethodBase over a NumPy array.
.. warning:: TMVA::Reader has known problems with thread safety in versions
of ROOT earlier than 6.03. There will potentially be a crash if you call
``method = reader.FindMVA(name)`` in Python and then pass this
``method`` here. Consider using ``evaluate_reader`` instead if you are
affected by this crash.
Parameters
----------
method : TMVA::MethodBase
A TMVA::MethodBase instance with variables booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_reader
"""
if not isinstance(method, TMVA.MethodBase):
raise TypeError("reader must be a TMVA.MethodBase instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
return _libtmvanumpy.evaluate_method(ROOT.AsCObject(method), events, aux) | def function[evaluate_method, parameter[method, events, aux]]:
constant[Evaluate a TMVA::MethodBase over a NumPy array.
.. warning:: TMVA::Reader has known problems with thread safety in versions
of ROOT earlier than 6.03. There will potentially be a crash if you call
``method = reader.FindMVA(name)`` in Python and then pass this
``method`` here. Consider using ``evaluate_reader`` instead if you are
affected by this crash.
Parameters
----------
method : TMVA::MethodBase
A TMVA::MethodBase instance with variables booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_reader
]
if <ast.UnaryOp object at 0x7da18dc042b0> begin[:]
<ast.Raise object at 0x7da18dc07d00>
variable[events] assign[=] call[name[np].ascontiguousarray, parameter[name[events]]]
if compare[name[events].ndim equal[==] constant[1]] begin[:]
variable[events] assign[=] call[name[events]][tuple[[<ast.Slice object at 0x7da18dc04c70>, <ast.Attribute object at 0x7da18dc06ad0>]]]
return[call[name[_libtmvanumpy].evaluate_method, parameter[call[name[ROOT].AsCObject, parameter[name[method]]], name[events], name[aux]]]] | keyword[def] identifier[evaluate_method] ( identifier[method] , identifier[events] , identifier[aux] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[method] , identifier[TMVA] . identifier[MethodBase] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[events] = identifier[np] . identifier[ascontiguousarray] ( identifier[events] , identifier[dtype] = identifier[np] . identifier[float64] )
keyword[if] identifier[events] . identifier[ndim] == literal[int] :
identifier[events] = identifier[events] [:, identifier[np] . identifier[newaxis] ]
keyword[elif] identifier[events] . identifier[ndim] != literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] )
keyword[return] identifier[_libtmvanumpy] . identifier[evaluate_method] ( identifier[ROOT] . identifier[AsCObject] ( identifier[method] ), identifier[events] , identifier[aux] ) | def evaluate_method(method, events, aux=0.0):
"""Evaluate a TMVA::MethodBase over a NumPy array.
.. warning:: TMVA::Reader has known problems with thread safety in versions
of ROOT earlier than 6.03. There will potentially be a crash if you call
``method = reader.FindMVA(name)`` in Python and then pass this
``method`` here. Consider using ``evaluate_reader`` instead if you are
affected by this crash.
Parameters
----------
method : TMVA::MethodBase
A TMVA::MethodBase instance with variables booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_reader
"""
if not isinstance(method, TMVA.MethodBase):
raise TypeError('reader must be a TMVA.MethodBase instance') # depends on [control=['if'], data=[]]
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis] # depends on [control=['if'], data=[]]
elif events.ndim != 2:
raise ValueError('events must be a two-dimensional array with one event per row') # depends on [control=['if'], data=[]]
return _libtmvanumpy.evaluate_method(ROOT.AsCObject(method), events, aux) |
def bibtex_run(self):
'''
Start bibtex run.
'''
self.log.info('Running bibtex...')
try:
with open(os.devnull, 'w') as null:
Popen(['bibtex', self.project_name], stdout=null).wait()
except OSError:
self.log.error(NO_LATEX_ERROR % 'bibtex')
sys.exit(1)
shutil.copy('%s.bib' % self.bib_file,
'%s.bib.old' % self.bib_file) | def function[bibtex_run, parameter[self]]:
constant[
Start bibtex run.
]
call[name[self].log.info, parameter[constant[Running bibtex...]]]
<ast.Try object at 0x7da18dc05630>
call[name[shutil].copy, parameter[binary_operation[constant[%s.bib] <ast.Mod object at 0x7da2590d6920> name[self].bib_file], binary_operation[constant[%s.bib.old] <ast.Mod object at 0x7da2590d6920> name[self].bib_file]]] | keyword[def] identifier[bibtex_run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[try] :
keyword[with] identifier[open] ( identifier[os] . identifier[devnull] , literal[string] ) keyword[as] identifier[null] :
identifier[Popen] ([ literal[string] , identifier[self] . identifier[project_name] ], identifier[stdout] = identifier[null] ). identifier[wait] ()
keyword[except] identifier[OSError] :
identifier[self] . identifier[log] . identifier[error] ( identifier[NO_LATEX_ERROR] % literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[shutil] . identifier[copy] ( literal[string] % identifier[self] . identifier[bib_file] ,
literal[string] % identifier[self] . identifier[bib_file] ) | def bibtex_run(self):
"""
Start bibtex run.
"""
self.log.info('Running bibtex...')
try:
with open(os.devnull, 'w') as null:
Popen(['bibtex', self.project_name], stdout=null).wait() # depends on [control=['with'], data=['null']] # depends on [control=['try'], data=[]]
except OSError:
self.log.error(NO_LATEX_ERROR % 'bibtex')
sys.exit(1) # depends on [control=['except'], data=[]]
shutil.copy('%s.bib' % self.bib_file, '%s.bib.old' % self.bib_file) |
def edit_rules(self, description, key, value):
'''
Edit all rules that match a specified description.
@description - The description to match against.
@key - The key to change for each matching rule.
@value - The new key value for each matching rule.
Returns the number of rules modified.
'''
count = 0
description = description.lower()
for i in range(0, len(self.extract_rules)):
if self.extract_rules[i]['regex'].search(description):
if has_key(self.extract_rules[i], key):
self.extract_rules[i][key] = value
count += 1
return count | def function[edit_rules, parameter[self, description, key, value]]:
constant[
Edit all rules that match a specified description.
@description - The description to match against.
@key - The key to change for each matching rule.
@value - The new key value for each matching rule.
Returns the number of rules modified.
]
variable[count] assign[=] constant[0]
variable[description] assign[=] call[name[description].lower, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[self].extract_rules]]]]] begin[:]
if call[call[call[name[self].extract_rules][name[i]]][constant[regex]].search, parameter[name[description]]] begin[:]
if call[name[has_key], parameter[call[name[self].extract_rules][name[i]], name[key]]] begin[:]
call[call[name[self].extract_rules][name[i]]][name[key]] assign[=] name[value]
<ast.AugAssign object at 0x7da1b1c89ba0>
return[name[count]] | keyword[def] identifier[edit_rules] ( identifier[self] , identifier[description] , identifier[key] , identifier[value] ):
literal[string]
identifier[count] = literal[int]
identifier[description] = identifier[description] . identifier[lower] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[self] . identifier[extract_rules] )):
keyword[if] identifier[self] . identifier[extract_rules] [ identifier[i] ][ literal[string] ]. identifier[search] ( identifier[description] ):
keyword[if] identifier[has_key] ( identifier[self] . identifier[extract_rules] [ identifier[i] ], identifier[key] ):
identifier[self] . identifier[extract_rules] [ identifier[i] ][ identifier[key] ]= identifier[value]
identifier[count] += literal[int]
keyword[return] identifier[count] | def edit_rules(self, description, key, value):
"""
Edit all rules that match a specified description.
@description - The description to match against.
@key - The key to change for each matching rule.
@value - The new key value for each matching rule.
Returns the number of rules modified.
"""
count = 0
description = description.lower()
for i in range(0, len(self.extract_rules)):
if self.extract_rules[i]['regex'].search(description):
if has_key(self.extract_rules[i], key):
self.extract_rules[i][key] = value
count += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return count |
def get_argument_parser():
"""Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx.
"""
file_mv = cli.file_mv
desc = 'Find all runs (SRR..) associated with an SRA experiment (SRX...).'
parser = cli.get_argument_parser(desc=desc)
parser.add_argument(
'-e', '--experiment-file', type=str, required=True, metavar=file_mv,
help='File with SRA experiment IDs (starting with "SRX").'
)
parser.add_argument(
'-o', '--output-file', type=str, required=True, metavar=file_mv,
help='The output file.'
)
cli.add_reporting_args(parser)
return parser | def function[get_argument_parser, parameter[]]:
constant[Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx.
]
variable[file_mv] assign[=] name[cli].file_mv
variable[desc] assign[=] constant[Find all runs (SRR..) associated with an SRA experiment (SRX...).]
variable[parser] assign[=] call[name[cli].get_argument_parser, parameter[]]
call[name[parser].add_argument, parameter[constant[-e], constant[--experiment-file]]]
call[name[parser].add_argument, parameter[constant[-o], constant[--output-file]]]
call[name[cli].add_reporting_args, parameter[name[parser]]]
return[name[parser]] | keyword[def] identifier[get_argument_parser] ():
literal[string]
identifier[file_mv] = identifier[cli] . identifier[file_mv]
identifier[desc] = literal[string]
identifier[parser] = identifier[cli] . identifier[get_argument_parser] ( identifier[desc] = identifier[desc] )
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[type] = identifier[str] , identifier[required] = keyword[True] , identifier[metavar] = identifier[file_mv] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] , identifier[type] = identifier[str] , identifier[required] = keyword[True] , identifier[metavar] = identifier[file_mv] ,
identifier[help] = literal[string]
)
identifier[cli] . identifier[add_reporting_args] ( identifier[parser] )
keyword[return] identifier[parser] | def get_argument_parser():
"""Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx.
"""
file_mv = cli.file_mv
desc = 'Find all runs (SRR..) associated with an SRA experiment (SRX...).'
parser = cli.get_argument_parser(desc=desc)
parser.add_argument('-e', '--experiment-file', type=str, required=True, metavar=file_mv, help='File with SRA experiment IDs (starting with "SRX").')
parser.add_argument('-o', '--output-file', type=str, required=True, metavar=file_mv, help='The output file.')
cli.add_reporting_args(parser)
return parser |
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
cls = self.__class__
# Internal checks before executing inherited function...
# There is a case where there is no nw: when there is not special_prop defined
# at all!!
if not self.notificationways:
for prop in self.special_properties:
if not hasattr(self, prop):
msg = "[contact::%s] %s property is missing" % (self.get_name(), prop)
self.add_error(msg)
state = False
if not hasattr(self, 'contact_name'):
if hasattr(self, 'alias'):
# Use the alias if we miss the contact_name
self.contact_name = self.alias
for char in cls.illegal_object_name_chars:
if char not in self.contact_name:
continue
msg = "[contact::%s] %s character not allowed in contact_name" \
% (self.get_name(), char)
self.add_error(msg)
state = False
return super(Contact, self).is_correct() and state | def function[is_correct, parameter[self]]:
constant[Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
]
variable[state] assign[=] constant[True]
variable[cls] assign[=] name[self].__class__
if <ast.UnaryOp object at 0x7da18fe90100> begin[:]
for taget[name[prop]] in starred[name[self].special_properties] begin[:]
if <ast.UnaryOp object at 0x7da18fe934c0> begin[:]
variable[msg] assign[=] binary_operation[constant[[contact::%s] %s property is missing] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18bc72230>, <ast.Name object at 0x7da18bc726b0>]]]
call[name[self].add_error, parameter[name[msg]]]
variable[state] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da18bc71ae0> begin[:]
if call[name[hasattr], parameter[name[self], constant[alias]]] begin[:]
name[self].contact_name assign[=] name[self].alias
for taget[name[char]] in starred[name[cls].illegal_object_name_chars] begin[:]
if compare[name[char] <ast.NotIn object at 0x7da2590d7190> name[self].contact_name] begin[:]
continue
variable[msg] assign[=] binary_operation[constant[[contact::%s] %s character not allowed in contact_name] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18bc732b0>, <ast.Name object at 0x7da18bc71690>]]]
call[name[self].add_error, parameter[name[msg]]]
variable[state] assign[=] constant[False]
return[<ast.BoolOp object at 0x7da18bc70190>] | keyword[def] identifier[is_correct] ( identifier[self] ):
literal[string]
identifier[state] = keyword[True]
identifier[cls] = identifier[self] . identifier[__class__]
keyword[if] keyword[not] identifier[self] . identifier[notificationways] :
keyword[for] identifier[prop] keyword[in] identifier[self] . identifier[special_properties] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , identifier[prop] ):
identifier[msg] = literal[string] %( identifier[self] . identifier[get_name] (), identifier[prop] )
identifier[self] . identifier[add_error] ( identifier[msg] )
identifier[state] = keyword[False]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[contact_name] = identifier[self] . identifier[alias]
keyword[for] identifier[char] keyword[in] identifier[cls] . identifier[illegal_object_name_chars] :
keyword[if] identifier[char] keyword[not] keyword[in] identifier[self] . identifier[contact_name] :
keyword[continue]
identifier[msg] = literal[string] %( identifier[self] . identifier[get_name] (), identifier[char] )
identifier[self] . identifier[add_error] ( identifier[msg] )
identifier[state] = keyword[False]
keyword[return] identifier[super] ( identifier[Contact] , identifier[self] ). identifier[is_correct] () keyword[and] identifier[state] | def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
cls = self.__class__
# Internal checks before executing inherited function...
# There is a case where there is no nw: when there is not special_prop defined
# at all!!
if not self.notificationways:
for prop in self.special_properties:
if not hasattr(self, prop):
msg = '[contact::%s] %s property is missing' % (self.get_name(), prop)
self.add_error(msg)
state = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prop']] # depends on [control=['if'], data=[]]
if not hasattr(self, 'contact_name'):
if hasattr(self, 'alias'):
# Use the alias if we miss the contact_name
self.contact_name = self.alias # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
for char in cls.illegal_object_name_chars:
if char not in self.contact_name:
continue # depends on [control=['if'], data=[]]
msg = '[contact::%s] %s character not allowed in contact_name' % (self.get_name(), char)
self.add_error(msg)
state = False # depends on [control=['for'], data=['char']]
return super(Contact, self).is_correct() and state |
def infer(cls, sub_ija, T_ia, root_state, pc=0.01,
gap_limit=0.01, Nit=30, dp=1e-5, **kwargs):
"""
Infer a GTR model by specifying the number of transitions and time spent in each
character. The basic equation that is being solved is
:math:`n_{ij} = pi_i W_{ij} T_j`
where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium
state frequencies, :math:`W_{ij}` is the "substitution attempt matrix",
while :math:`T_i` is the time on the tree spent in character state
:math:`i`. To regularize the process, we add pseudocounts and also need
to account for the fact that the root of the tree is in a particular
state. the modified equation is
:math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\_state)`
Parameters
----------
nija : nxn matrix
The number of times a change in character state is observed
between state j and i at position a
Tia :n vector
The time spent in each character state at position a
root_state : n vector
The number of characters in state i in the sequence
of the root node.
pc : float
Pseudocounts, this determines the lower cutoff on the rate when
no substitutions are observed
**kwargs:
Key word arguments to be passed
Keyword Args
------------
alphabet : str
Specify alphabet when applicable. If the alphabet specification
is required, but no alphabet is specified, the nucleotide alphabet will be used as default.
"""
from scipy import linalg as LA
gtr = cls(**kwargs)
gtr.logger("GTR: model inference ",1)
q = len(gtr.alphabet)
L = sub_ija.shape[-1]
n_iter = 0
n_ija = np.copy(sub_ija)
n_ija[range(q),range(q),:] = 0
n_ij = n_ija.sum(axis=-1)
m_ia = np.sum(n_ija,axis=1) + root_state + pc
n_a = n_ija.sum(axis=1).sum(axis=0) + pc
Lambda = np.sum(root_state,axis=0) + q*pc
p_ia_old=np.zeros((q,L))
p_ia = np.ones((q,L))/q
mu_a = np.ones(L)
W_ij = np.ones((q,q)) - np.eye(q)
while (LA.norm(p_ia_old-p_ia)>dp) and n_iter<Nit:
n_iter += 1
p_ia_old = np.copy(p_ia)
S_ij = np.einsum('a,ia,ja',mu_a, p_ia, T_ia)
W_ij = (n_ij + n_ij.T + pc)/(S_ij + S_ij.T + pc)
avg_pi = p_ia.mean(axis=-1)
average_rate = W_ij.dot(avg_pi).dot(avg_pi)
W_ij = W_ij/average_rate
mu_a *=average_rate
p_ia = m_ia/(mu_a*np.dot(W_ij,T_ia)+Lambda)
p_ia = p_ia/p_ia.sum(axis=0)
mu_a = n_a/(pc+np.einsum('ia,ij,ja->a', p_ia, W_ij, T_ia))
if n_iter >= Nit:
gtr.logger('WARNING: maximum number of iterations has been reached in GTR inference',3, warn=True)
if LA.norm(p_ia_old-p_ia) > dp:
gtr.logger('the iterative scheme has not converged',3,warn=True)
if gtr.gap_index is not None:
for p in range(p_ia.shape[-1]):
if p_ia[gtr.gap_index,p]<gap_limit:
gtr.logger('The model allows for gaps which are estimated to occur at a low fraction of %1.3e'%p_ia[gtr.gap_index]+
'\n\t\tthis can potentially result in artificats.'+
'\n\t\tgap fraction will be set to %1.4f'%gap_limit,2,warn=True)
p_ia[gtr.gap_index,p] = gap_limit
p_ia[:,p] /= p_ia[:,p].sum()
gtr.assign_rates(mu=mu_a, W=W_ij, pi=p_ia)
return gtr | def function[infer, parameter[cls, sub_ija, T_ia, root_state, pc, gap_limit, Nit, dp]]:
constant[
Infer a GTR model by specifying the number of transitions and time spent in each
character. The basic equation that is being solved is
:math:`n_{ij} = pi_i W_{ij} T_j`
where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium
state frequencies, :math:`W_{ij}` is the "substitution attempt matrix",
while :math:`T_i` is the time on the tree spent in character state
:math:`i`. To regularize the process, we add pseudocounts and also need
to account for the fact that the root of the tree is in a particular
state. the modified equation is
:math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\_state)`
Parameters
----------
nija : nxn matrix
The number of times a change in character state is observed
between state j and i at position a
Tia :n vector
The time spent in each character state at position a
root_state : n vector
The number of characters in state i in the sequence
of the root node.
pc : float
Pseudocounts, this determines the lower cutoff on the rate when
no substitutions are observed
**kwargs:
Key word arguments to be passed
Keyword Args
------------
alphabet : str
Specify alphabet when applicable. If the alphabet specification
is required, but no alphabet is specified, the nucleotide alphabet will be used as default.
]
from relative_module[scipy] import module[linalg]
variable[gtr] assign[=] call[name[cls], parameter[]]
call[name[gtr].logger, parameter[constant[GTR: model inference ], constant[1]]]
variable[q] assign[=] call[name[len], parameter[name[gtr].alphabet]]
variable[L] assign[=] call[name[sub_ija].shape][<ast.UnaryOp object at 0x7da1b01864a0>]
variable[n_iter] assign[=] constant[0]
variable[n_ija] assign[=] call[name[np].copy, parameter[name[sub_ija]]]
call[name[n_ija]][tuple[[<ast.Call object at 0x7da1b01861d0>, <ast.Call object at 0x7da1b0186140>, <ast.Slice object at 0x7da1b01860b0>]]] assign[=] constant[0]
variable[n_ij] assign[=] call[name[n_ija].sum, parameter[]]
variable[m_ia] assign[=] binary_operation[binary_operation[call[name[np].sum, parameter[name[n_ija]]] + name[root_state]] + name[pc]]
variable[n_a] assign[=] binary_operation[call[call[name[n_ija].sum, parameter[]].sum, parameter[]] + name[pc]]
variable[Lambda] assign[=] binary_operation[call[name[np].sum, parameter[name[root_state]]] + binary_operation[name[q] * name[pc]]]
variable[p_ia_old] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b01856c0>, <ast.Name object at 0x7da1b0185690>]]]]
variable[p_ia] assign[=] binary_operation[call[name[np].ones, parameter[tuple[[<ast.Name object at 0x7da1b02f5450>, <ast.Name object at 0x7da1b02f58a0>]]]] / name[q]]
variable[mu_a] assign[=] call[name[np].ones, parameter[name[L]]]
variable[W_ij] assign[=] binary_operation[call[name[np].ones, parameter[tuple[[<ast.Name object at 0x7da1b02f7eb0>, <ast.Name object at 0x7da1b02f73a0>]]]] - call[name[np].eye, parameter[name[q]]]]
while <ast.BoolOp object at 0x7da1b02f7ca0> begin[:]
<ast.AugAssign object at 0x7da1b02f7550>
variable[p_ia_old] assign[=] call[name[np].copy, parameter[name[p_ia]]]
variable[S_ij] assign[=] call[name[np].einsum, parameter[constant[a,ia,ja], name[mu_a], name[p_ia], name[T_ia]]]
variable[W_ij] assign[=] binary_operation[binary_operation[binary_operation[name[n_ij] + name[n_ij].T] + name[pc]] / binary_operation[binary_operation[name[S_ij] + name[S_ij].T] + name[pc]]]
variable[avg_pi] assign[=] call[name[p_ia].mean, parameter[]]
variable[average_rate] assign[=] call[call[name[W_ij].dot, parameter[name[avg_pi]]].dot, parameter[name[avg_pi]]]
variable[W_ij] assign[=] binary_operation[name[W_ij] / name[average_rate]]
<ast.AugAssign object at 0x7da1b02f74c0>
variable[p_ia] assign[=] binary_operation[name[m_ia] / binary_operation[binary_operation[name[mu_a] * call[name[np].dot, parameter[name[W_ij], name[T_ia]]]] + name[Lambda]]]
variable[p_ia] assign[=] binary_operation[name[p_ia] / call[name[p_ia].sum, parameter[]]]
variable[mu_a] assign[=] binary_operation[name[n_a] / binary_operation[name[pc] + call[name[np].einsum, parameter[constant[ia,ij,ja->a], name[p_ia], name[W_ij], name[T_ia]]]]]
if compare[name[n_iter] greater_or_equal[>=] name[Nit]] begin[:]
call[name[gtr].logger, parameter[constant[WARNING: maximum number of iterations has been reached in GTR inference], constant[3]]]
if compare[call[name[LA].norm, parameter[binary_operation[name[p_ia_old] - name[p_ia]]]] greater[>] name[dp]] begin[:]
call[name[gtr].logger, parameter[constant[the iterative scheme has not converged], constant[3]]]
if compare[name[gtr].gap_index is_not constant[None]] begin[:]
for taget[name[p]] in starred[call[name[range], parameter[call[name[p_ia].shape][<ast.UnaryOp object at 0x7da1b02f40d0>]]]] begin[:]
if compare[call[name[p_ia]][tuple[[<ast.Attribute object at 0x7da1b02f5f90>, <ast.Name object at 0x7da1b02f5300>]]] less[<] name[gap_limit]] begin[:]
call[name[gtr].logger, parameter[binary_operation[binary_operation[binary_operation[constant[The model allows for gaps which are estimated to occur at a low fraction of %1.3e] <ast.Mod object at 0x7da2590d6920> call[name[p_ia]][name[gtr].gap_index]] + constant[
this can potentially result in artificats.]] + binary_operation[constant[
gap fraction will be set to %1.4f] <ast.Mod object at 0x7da2590d6920> name[gap_limit]]], constant[2]]]
call[name[p_ia]][tuple[[<ast.Attribute object at 0x7da1b02f4fa0>, <ast.Name object at 0x7da1b02f5270>]]] assign[=] name[gap_limit]
<ast.AugAssign object at 0x7da1b02f55a0>
call[name[gtr].assign_rates, parameter[]]
return[name[gtr]] | keyword[def] identifier[infer] ( identifier[cls] , identifier[sub_ija] , identifier[T_ia] , identifier[root_state] , identifier[pc] = literal[int] ,
identifier[gap_limit] = literal[int] , identifier[Nit] = literal[int] , identifier[dp] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[scipy] keyword[import] identifier[linalg] keyword[as] identifier[LA]
identifier[gtr] = identifier[cls] (** identifier[kwargs] )
identifier[gtr] . identifier[logger] ( literal[string] , literal[int] )
identifier[q] = identifier[len] ( identifier[gtr] . identifier[alphabet] )
identifier[L] = identifier[sub_ija] . identifier[shape] [- literal[int] ]
identifier[n_iter] = literal[int]
identifier[n_ija] = identifier[np] . identifier[copy] ( identifier[sub_ija] )
identifier[n_ija] [ identifier[range] ( identifier[q] ), identifier[range] ( identifier[q] ),:]= literal[int]
identifier[n_ij] = identifier[n_ija] . identifier[sum] ( identifier[axis] =- literal[int] )
identifier[m_ia] = identifier[np] . identifier[sum] ( identifier[n_ija] , identifier[axis] = literal[int] )+ identifier[root_state] + identifier[pc]
identifier[n_a] = identifier[n_ija] . identifier[sum] ( identifier[axis] = literal[int] ). identifier[sum] ( identifier[axis] = literal[int] )+ identifier[pc]
identifier[Lambda] = identifier[np] . identifier[sum] ( identifier[root_state] , identifier[axis] = literal[int] )+ identifier[q] * identifier[pc]
identifier[p_ia_old] = identifier[np] . identifier[zeros] (( identifier[q] , identifier[L] ))
identifier[p_ia] = identifier[np] . identifier[ones] (( identifier[q] , identifier[L] ))/ identifier[q]
identifier[mu_a] = identifier[np] . identifier[ones] ( identifier[L] )
identifier[W_ij] = identifier[np] . identifier[ones] (( identifier[q] , identifier[q] ))- identifier[np] . identifier[eye] ( identifier[q] )
keyword[while] ( identifier[LA] . identifier[norm] ( identifier[p_ia_old] - identifier[p_ia] )> identifier[dp] ) keyword[and] identifier[n_iter] < identifier[Nit] :
identifier[n_iter] += literal[int]
identifier[p_ia_old] = identifier[np] . identifier[copy] ( identifier[p_ia] )
identifier[S_ij] = identifier[np] . identifier[einsum] ( literal[string] , identifier[mu_a] , identifier[p_ia] , identifier[T_ia] )
identifier[W_ij] =( identifier[n_ij] + identifier[n_ij] . identifier[T] + identifier[pc] )/( identifier[S_ij] + identifier[S_ij] . identifier[T] + identifier[pc] )
identifier[avg_pi] = identifier[p_ia] . identifier[mean] ( identifier[axis] =- literal[int] )
identifier[average_rate] = identifier[W_ij] . identifier[dot] ( identifier[avg_pi] ). identifier[dot] ( identifier[avg_pi] )
identifier[W_ij] = identifier[W_ij] / identifier[average_rate]
identifier[mu_a] *= identifier[average_rate]
identifier[p_ia] = identifier[m_ia] /( identifier[mu_a] * identifier[np] . identifier[dot] ( identifier[W_ij] , identifier[T_ia] )+ identifier[Lambda] )
identifier[p_ia] = identifier[p_ia] / identifier[p_ia] . identifier[sum] ( identifier[axis] = literal[int] )
identifier[mu_a] = identifier[n_a] /( identifier[pc] + identifier[np] . identifier[einsum] ( literal[string] , identifier[p_ia] , identifier[W_ij] , identifier[T_ia] ))
keyword[if] identifier[n_iter] >= identifier[Nit] :
identifier[gtr] . identifier[logger] ( literal[string] , literal[int] , identifier[warn] = keyword[True] )
keyword[if] identifier[LA] . identifier[norm] ( identifier[p_ia_old] - identifier[p_ia] )> identifier[dp] :
identifier[gtr] . identifier[logger] ( literal[string] , literal[int] , identifier[warn] = keyword[True] )
keyword[if] identifier[gtr] . identifier[gap_index] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[p] keyword[in] identifier[range] ( identifier[p_ia] . identifier[shape] [- literal[int] ]):
keyword[if] identifier[p_ia] [ identifier[gtr] . identifier[gap_index] , identifier[p] ]< identifier[gap_limit] :
identifier[gtr] . identifier[logger] ( literal[string] % identifier[p_ia] [ identifier[gtr] . identifier[gap_index] ]+
literal[string] +
literal[string] % identifier[gap_limit] , literal[int] , identifier[warn] = keyword[True] )
identifier[p_ia] [ identifier[gtr] . identifier[gap_index] , identifier[p] ]= identifier[gap_limit]
identifier[p_ia] [:, identifier[p] ]/= identifier[p_ia] [:, identifier[p] ]. identifier[sum] ()
identifier[gtr] . identifier[assign_rates] ( identifier[mu] = identifier[mu_a] , identifier[W] = identifier[W_ij] , identifier[pi] = identifier[p_ia] )
keyword[return] identifier[gtr] | def infer(cls, sub_ija, T_ia, root_state, pc=0.01, gap_limit=0.01, Nit=30, dp=1e-05, **kwargs):
"""
Infer a GTR model by specifying the number of transitions and time spent in each
character. The basic equation that is being solved is
:math:`n_{ij} = pi_i W_{ij} T_j`
where :math:`n_{ij}` are the transitions, :math:`pi_i` are the equilibrium
state frequencies, :math:`W_{ij}` is the "substitution attempt matrix",
while :math:`T_i` is the time on the tree spent in character state
:math:`i`. To regularize the process, we add pseudocounts and also need
to account for the fact that the root of the tree is in a particular
state. the modified equation is
:math:`n_{ij} + pc = pi_i W_{ij} (T_j+pc+root\\_state)`
Parameters
----------
nija : nxn matrix
The number of times a change in character state is observed
between state j and i at position a
Tia :n vector
The time spent in each character state at position a
root_state : n vector
The number of characters in state i in the sequence
of the root node.
pc : float
Pseudocounts, this determines the lower cutoff on the rate when
no substitutions are observed
**kwargs:
Key word arguments to be passed
Keyword Args
------------
alphabet : str
Specify alphabet when applicable. If the alphabet specification
is required, but no alphabet is specified, the nucleotide alphabet will be used as default.
"""
from scipy import linalg as LA
gtr = cls(**kwargs)
gtr.logger('GTR: model inference ', 1)
q = len(gtr.alphabet)
L = sub_ija.shape[-1]
n_iter = 0
n_ija = np.copy(sub_ija)
n_ija[range(q), range(q), :] = 0
n_ij = n_ija.sum(axis=-1)
m_ia = np.sum(n_ija, axis=1) + root_state + pc
n_a = n_ija.sum(axis=1).sum(axis=0) + pc
Lambda = np.sum(root_state, axis=0) + q * pc
p_ia_old = np.zeros((q, L))
p_ia = np.ones((q, L)) / q
mu_a = np.ones(L)
W_ij = np.ones((q, q)) - np.eye(q)
while LA.norm(p_ia_old - p_ia) > dp and n_iter < Nit:
n_iter += 1
p_ia_old = np.copy(p_ia)
S_ij = np.einsum('a,ia,ja', mu_a, p_ia, T_ia)
W_ij = (n_ij + n_ij.T + pc) / (S_ij + S_ij.T + pc)
avg_pi = p_ia.mean(axis=-1)
average_rate = W_ij.dot(avg_pi).dot(avg_pi)
W_ij = W_ij / average_rate
mu_a *= average_rate
p_ia = m_ia / (mu_a * np.dot(W_ij, T_ia) + Lambda)
p_ia = p_ia / p_ia.sum(axis=0)
mu_a = n_a / (pc + np.einsum('ia,ij,ja->a', p_ia, W_ij, T_ia)) # depends on [control=['while'], data=[]]
if n_iter >= Nit:
gtr.logger('WARNING: maximum number of iterations has been reached in GTR inference', 3, warn=True)
if LA.norm(p_ia_old - p_ia) > dp:
gtr.logger('the iterative scheme has not converged', 3, warn=True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if gtr.gap_index is not None:
for p in range(p_ia.shape[-1]):
if p_ia[gtr.gap_index, p] < gap_limit:
gtr.logger('The model allows for gaps which are estimated to occur at a low fraction of %1.3e' % p_ia[gtr.gap_index] + '\n\t\tthis can potentially result in artificats.' + '\n\t\tgap fraction will be set to %1.4f' % gap_limit, 2, warn=True) # depends on [control=['if'], data=['gap_limit']]
p_ia[gtr.gap_index, p] = gap_limit
p_ia[:, p] /= p_ia[:, p].sum() # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]]
gtr.assign_rates(mu=mu_a, W=W_ij, pi=p_ia)
return gtr |
def list_sources(embedding_name=None):
"""Get valid token embedding names and their pre-trained file names.
To load token embedding vectors from an externally hosted pre-trained token embedding file,
such as those of GloVe and FastText, one should use
`gluonnlp.embedding.create(embedding_name, source)`. This method returns all the
valid names of `source` for the specified `embedding_name`. If `embedding_name` is set to
None, this method returns all the valid names of `embedding_name` with their associated
`source`.
Parameters
----------
embedding_name : str or None, default None
The pre-trained token embedding name.
Returns
-------
dict or list:
A list of all the valid pre-trained token embedding file names (`source`) for the
specified token embedding name (`embedding_name`). If the text embedding name is set to
None, returns a dict mapping each valid token embedding name to a list of valid pre-trained
files (`source`). They can be plugged into
`gluonnlp.embedding.create(embedding_name, source)`.
"""
text_embedding_reg = registry.get_registry(TokenEmbedding)
if embedding_name is not None:
embedding_name = embedding_name.lower()
if embedding_name not in text_embedding_reg:
raise KeyError('Cannot find `embedding_name` {}. Use '
'`list_sources(embedding_name=None).keys()` to get all the valid'
'embedding names.'.format(embedding_name))
return list(text_embedding_reg[embedding_name].source_file_hash.keys())
else:
return {embedding_name: list(embedding_cls.source_file_hash.keys())
for embedding_name, embedding_cls in registry.get_registry(TokenEmbedding).items()} | def function[list_sources, parameter[embedding_name]]:
constant[Get valid token embedding names and their pre-trained file names.
To load token embedding vectors from an externally hosted pre-trained token embedding file,
such as those of GloVe and FastText, one should use
`gluonnlp.embedding.create(embedding_name, source)`. This method returns all the
valid names of `source` for the specified `embedding_name`. If `embedding_name` is set to
None, this method returns all the valid names of `embedding_name` with their associated
`source`.
Parameters
----------
embedding_name : str or None, default None
The pre-trained token embedding name.
Returns
-------
dict or list:
A list of all the valid pre-trained token embedding file names (`source`) for the
specified token embedding name (`embedding_name`). If the text embedding name is set to
None, returns a dict mapping each valid token embedding name to a list of valid pre-trained
files (`source`). They can be plugged into
`gluonnlp.embedding.create(embedding_name, source)`.
]
variable[text_embedding_reg] assign[=] call[name[registry].get_registry, parameter[name[TokenEmbedding]]]
if compare[name[embedding_name] is_not constant[None]] begin[:]
variable[embedding_name] assign[=] call[name[embedding_name].lower, parameter[]]
if compare[name[embedding_name] <ast.NotIn object at 0x7da2590d7190> name[text_embedding_reg]] begin[:]
<ast.Raise object at 0x7da2041d8e50>
return[call[name[list], parameter[call[call[name[text_embedding_reg]][name[embedding_name]].source_file_hash.keys, parameter[]]]]] | keyword[def] identifier[list_sources] ( identifier[embedding_name] = keyword[None] ):
literal[string]
identifier[text_embedding_reg] = identifier[registry] . identifier[get_registry] ( identifier[TokenEmbedding] )
keyword[if] identifier[embedding_name] keyword[is] keyword[not] keyword[None] :
identifier[embedding_name] = identifier[embedding_name] . identifier[lower] ()
keyword[if] identifier[embedding_name] keyword[not] keyword[in] identifier[text_embedding_reg] :
keyword[raise] identifier[KeyError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[embedding_name] ))
keyword[return] identifier[list] ( identifier[text_embedding_reg] [ identifier[embedding_name] ]. identifier[source_file_hash] . identifier[keys] ())
keyword[else] :
keyword[return] { identifier[embedding_name] : identifier[list] ( identifier[embedding_cls] . identifier[source_file_hash] . identifier[keys] ())
keyword[for] identifier[embedding_name] , identifier[embedding_cls] keyword[in] identifier[registry] . identifier[get_registry] ( identifier[TokenEmbedding] ). identifier[items] ()} | def list_sources(embedding_name=None):
"""Get valid token embedding names and their pre-trained file names.
To load token embedding vectors from an externally hosted pre-trained token embedding file,
such as those of GloVe and FastText, one should use
`gluonnlp.embedding.create(embedding_name, source)`. This method returns all the
valid names of `source` for the specified `embedding_name`. If `embedding_name` is set to
None, this method returns all the valid names of `embedding_name` with their associated
`source`.
Parameters
----------
embedding_name : str or None, default None
The pre-trained token embedding name.
Returns
-------
dict or list:
A list of all the valid pre-trained token embedding file names (`source`) for the
specified token embedding name (`embedding_name`). If the text embedding name is set to
None, returns a dict mapping each valid token embedding name to a list of valid pre-trained
files (`source`). They can be plugged into
`gluonnlp.embedding.create(embedding_name, source)`.
"""
text_embedding_reg = registry.get_registry(TokenEmbedding)
if embedding_name is not None:
embedding_name = embedding_name.lower()
if embedding_name not in text_embedding_reg:
raise KeyError('Cannot find `embedding_name` {}. Use `list_sources(embedding_name=None).keys()` to get all the validembedding names.'.format(embedding_name)) # depends on [control=['if'], data=['embedding_name']]
return list(text_embedding_reg[embedding_name].source_file_hash.keys()) # depends on [control=['if'], data=['embedding_name']]
else:
return {embedding_name: list(embedding_cls.source_file_hash.keys()) for (embedding_name, embedding_cls) in registry.get_registry(TokenEmbedding).items()} |
def Exception(obj, eng, callbacks, exc_info):
"""Handle general exceptions in workflow, saving states."""
exception_repr = ''.join(traceback.format_exception(*exc_info))
msg = "Error:\n%s" % (exception_repr)
eng.log.error(msg)
if obj:
# Sets an error message as a tuple (title, details)
obj.extra_data['_error_msg'] = exception_repr
obj.save(
status=obj.known_statuses.ERROR,
callback_pos=eng.state.callback_pos,
id_workflow=eng.uuid
)
eng.save(WorkflowStatus.ERROR)
db.session.commit()
# Call super which will reraise
super(InvenioTransitionAction, InvenioTransitionAction).Exception(
obj, eng, callbacks, exc_info
) | def function[Exception, parameter[obj, eng, callbacks, exc_info]]:
constant[Handle general exceptions in workflow, saving states.]
variable[exception_repr] assign[=] call[constant[].join, parameter[call[name[traceback].format_exception, parameter[<ast.Starred object at 0x7da20c7cab60>]]]]
variable[msg] assign[=] binary_operation[constant[Error:
%s] <ast.Mod object at 0x7da2590d6920> name[exception_repr]]
call[name[eng].log.error, parameter[name[msg]]]
if name[obj] begin[:]
call[name[obj].extra_data][constant[_error_msg]] assign[=] name[exception_repr]
call[name[obj].save, parameter[]]
call[name[eng].save, parameter[name[WorkflowStatus].ERROR]]
call[name[db].session.commit, parameter[]]
call[call[name[super], parameter[name[InvenioTransitionAction], name[InvenioTransitionAction]]].Exception, parameter[name[obj], name[eng], name[callbacks], name[exc_info]]] | keyword[def] identifier[Exception] ( identifier[obj] , identifier[eng] , identifier[callbacks] , identifier[exc_info] ):
literal[string]
identifier[exception_repr] = literal[string] . identifier[join] ( identifier[traceback] . identifier[format_exception] (* identifier[exc_info] ))
identifier[msg] = literal[string] %( identifier[exception_repr] )
identifier[eng] . identifier[log] . identifier[error] ( identifier[msg] )
keyword[if] identifier[obj] :
identifier[obj] . identifier[extra_data] [ literal[string] ]= identifier[exception_repr]
identifier[obj] . identifier[save] (
identifier[status] = identifier[obj] . identifier[known_statuses] . identifier[ERROR] ,
identifier[callback_pos] = identifier[eng] . identifier[state] . identifier[callback_pos] ,
identifier[id_workflow] = identifier[eng] . identifier[uuid]
)
identifier[eng] . identifier[save] ( identifier[WorkflowStatus] . identifier[ERROR] )
identifier[db] . identifier[session] . identifier[commit] ()
identifier[super] ( identifier[InvenioTransitionAction] , identifier[InvenioTransitionAction] ). identifier[Exception] (
identifier[obj] , identifier[eng] , identifier[callbacks] , identifier[exc_info]
) | def Exception(obj, eng, callbacks, exc_info):
"""Handle general exceptions in workflow, saving states."""
exception_repr = ''.join(traceback.format_exception(*exc_info))
msg = 'Error:\n%s' % exception_repr
eng.log.error(msg)
if obj:
# Sets an error message as a tuple (title, details)
obj.extra_data['_error_msg'] = exception_repr
obj.save(status=obj.known_statuses.ERROR, callback_pos=eng.state.callback_pos, id_workflow=eng.uuid) # depends on [control=['if'], data=[]]
eng.save(WorkflowStatus.ERROR)
db.session.commit()
# Call super which will reraise
super(InvenioTransitionAction, InvenioTransitionAction).Exception(obj, eng, callbacks, exc_info) |
def get_stories(label_type):
""" Returns a list of the stories in the Na corpus. """
prefixes = get_story_prefixes(label_type)
texts = list(set([prefix.split(".")[0].split("/")[1] for prefix in prefixes]))
return texts | def function[get_stories, parameter[label_type]]:
constant[ Returns a list of the stories in the Na corpus. ]
variable[prefixes] assign[=] call[name[get_story_prefixes], parameter[name[label_type]]]
variable[texts] assign[=] call[name[list], parameter[call[name[set], parameter[<ast.ListComp object at 0x7da1b11a55d0>]]]]
return[name[texts]] | keyword[def] identifier[get_stories] ( identifier[label_type] ):
literal[string]
identifier[prefixes] = identifier[get_story_prefixes] ( identifier[label_type] )
identifier[texts] = identifier[list] ( identifier[set] ([ identifier[prefix] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[prefix] keyword[in] identifier[prefixes] ]))
keyword[return] identifier[texts] | def get_stories(label_type):
""" Returns a list of the stories in the Na corpus. """
prefixes = get_story_prefixes(label_type)
texts = list(set([prefix.split('.')[0].split('/')[1] for prefix in prefixes]))
return texts |
def extract_commands(imported_vars):
"""
从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object)
"""
commands = dict()
for tup in imported_vars:
name, obj = tup
if is_command_object(obj):
commands.setdefault(name, obj)
return commands | def function[extract_commands, parameter[imported_vars]]:
constant[
从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object)
]
variable[commands] assign[=] call[name[dict], parameter[]]
for taget[name[tup]] in starred[name[imported_vars]] begin[:]
<ast.Tuple object at 0x7da1b26afd60> assign[=] name[tup]
if call[name[is_command_object], parameter[name[obj]]] begin[:]
call[name[commands].setdefault, parameter[name[name], name[obj]]]
return[name[commands]] | keyword[def] identifier[extract_commands] ( identifier[imported_vars] ):
literal[string]
identifier[commands] = identifier[dict] ()
keyword[for] identifier[tup] keyword[in] identifier[imported_vars] :
identifier[name] , identifier[obj] = identifier[tup]
keyword[if] identifier[is_command_object] ( identifier[obj] ):
identifier[commands] . identifier[setdefault] ( identifier[name] , identifier[obj] )
keyword[return] identifier[commands] | def extract_commands(imported_vars):
"""
从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object)
"""
commands = dict()
for tup in imported_vars:
(name, obj) = tup
if is_command_object(obj):
commands.setdefault(name, obj) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tup']]
return commands |
def _softmax(x, dim):
"""Computes softmax along a specified dim. Keras currently lacks this feature.
"""
if K.backend() == 'tensorflow':
import tensorflow as tf
return tf.nn.softmax(x, dim)
elif K.backend() is 'cntk':
import cntk
return cntk.softmax(x, dim)
elif K.backend() == 'theano':
# Theano cannot softmax along an arbitrary dim.
# So, we will shuffle `dim` to -1 and un-shuffle after softmax.
perm = np.arange(K.ndim(x))
perm[dim], perm[-1] = perm[-1], perm[dim]
x_perm = K.permute_dimensions(x, perm)
output = K.softmax(x_perm)
# Permute back
perm[dim], perm[-1] = perm[-1], perm[dim]
output = K.permute_dimensions(x, output)
return output
else:
raise ValueError("Backend '{}' not supported".format(K.backend())) | def function[_softmax, parameter[x, dim]]:
constant[Computes softmax along a specified dim. Keras currently lacks this feature.
]
if compare[call[name[K].backend, parameter[]] equal[==] constant[tensorflow]] begin[:]
import module[tensorflow] as alias[tf]
return[call[name[tf].nn.softmax, parameter[name[x], name[dim]]]] | keyword[def] identifier[_softmax] ( identifier[x] , identifier[dim] ):
literal[string]
keyword[if] identifier[K] . identifier[backend] ()== literal[string] :
keyword[import] identifier[tensorflow] keyword[as] identifier[tf]
keyword[return] identifier[tf] . identifier[nn] . identifier[softmax] ( identifier[x] , identifier[dim] )
keyword[elif] identifier[K] . identifier[backend] () keyword[is] literal[string] :
keyword[import] identifier[cntk]
keyword[return] identifier[cntk] . identifier[softmax] ( identifier[x] , identifier[dim] )
keyword[elif] identifier[K] . identifier[backend] ()== literal[string] :
identifier[perm] = identifier[np] . identifier[arange] ( identifier[K] . identifier[ndim] ( identifier[x] ))
identifier[perm] [ identifier[dim] ], identifier[perm] [- literal[int] ]= identifier[perm] [- literal[int] ], identifier[perm] [ identifier[dim] ]
identifier[x_perm] = identifier[K] . identifier[permute_dimensions] ( identifier[x] , identifier[perm] )
identifier[output] = identifier[K] . identifier[softmax] ( identifier[x_perm] )
identifier[perm] [ identifier[dim] ], identifier[perm] [- literal[int] ]= identifier[perm] [- literal[int] ], identifier[perm] [ identifier[dim] ]
identifier[output] = identifier[K] . identifier[permute_dimensions] ( identifier[x] , identifier[output] )
keyword[return] identifier[output]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[K] . identifier[backend] ())) | def _softmax(x, dim):
"""Computes softmax along a specified dim. Keras currently lacks this feature.
"""
if K.backend() == 'tensorflow':
import tensorflow as tf
return tf.nn.softmax(x, dim) # depends on [control=['if'], data=[]]
elif K.backend() is 'cntk':
import cntk
return cntk.softmax(x, dim) # depends on [control=['if'], data=[]]
elif K.backend() == 'theano':
# Theano cannot softmax along an arbitrary dim.
# So, we will shuffle `dim` to -1 and un-shuffle after softmax.
perm = np.arange(K.ndim(x))
(perm[dim], perm[-1]) = (perm[-1], perm[dim])
x_perm = K.permute_dimensions(x, perm)
output = K.softmax(x_perm)
# Permute back
(perm[dim], perm[-1]) = (perm[-1], perm[dim])
output = K.permute_dimensions(x, output)
return output # depends on [control=['if'], data=[]]
else:
raise ValueError("Backend '{}' not supported".format(K.backend())) |
def getexptimeimg(self,chip):
"""
Notes
=====
Return an array representing the exposure time per pixel for the detector.
This method will be overloaded for IR detectors which have their own
EXP arrays, namely, WFC3/IR and NICMOS images.
:units:
None
Returns
=======
exptimeimg : numpy array
The method will return an array of the same shape as the image.
"""
sci_chip = self._image[self.scienceExt,chip]
if sci_chip._wtscl_par == 'expsq':
wtscl = sci_chip._exptime*sci_chip._exptime
else:
wtscl = sci_chip._exptime
return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*wtscl | def function[getexptimeimg, parameter[self, chip]]:
constant[
Notes
=====
Return an array representing the exposure time per pixel for the detector.
This method will be overloaded for IR detectors which have their own
EXP arrays, namely, WFC3/IR and NICMOS images.
:units:
None
Returns
=======
exptimeimg : numpy array
The method will return an array of the same shape as the image.
]
variable[sci_chip] assign[=] call[name[self]._image][tuple[[<ast.Attribute object at 0x7da1b1be4310>, <ast.Name object at 0x7da1b1be43a0>]]]
if compare[name[sci_chip]._wtscl_par equal[==] constant[expsq]] begin[:]
variable[wtscl] assign[=] binary_operation[name[sci_chip]._exptime * name[sci_chip]._exptime]
return[binary_operation[call[name[np].ones, parameter[name[sci_chip].image_shape]] * name[wtscl]]] | keyword[def] identifier[getexptimeimg] ( identifier[self] , identifier[chip] ):
literal[string]
identifier[sci_chip] = identifier[self] . identifier[_image] [ identifier[self] . identifier[scienceExt] , identifier[chip] ]
keyword[if] identifier[sci_chip] . identifier[_wtscl_par] == literal[string] :
identifier[wtscl] = identifier[sci_chip] . identifier[_exptime] * identifier[sci_chip] . identifier[_exptime]
keyword[else] :
identifier[wtscl] = identifier[sci_chip] . identifier[_exptime]
keyword[return] identifier[np] . identifier[ones] ( identifier[sci_chip] . identifier[image_shape] , identifier[dtype] = identifier[sci_chip] . identifier[image_dtype] )* identifier[wtscl] | def getexptimeimg(self, chip):
"""
Notes
=====
Return an array representing the exposure time per pixel for the detector.
This method will be overloaded for IR detectors which have their own
EXP arrays, namely, WFC3/IR and NICMOS images.
:units:
None
Returns
=======
exptimeimg : numpy array
The method will return an array of the same shape as the image.
"""
sci_chip = self._image[self.scienceExt, chip]
if sci_chip._wtscl_par == 'expsq':
wtscl = sci_chip._exptime * sci_chip._exptime # depends on [control=['if'], data=[]]
else:
wtscl = sci_chip._exptime
return np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype) * wtscl |
def unwrap(self, value, session=None):
''' Validates ``value`` and then returns a dictionary with each key in
``value`` mapped to its value unwrapped using ``DictField.value_type``
'''
self.validate_unwrap(value)
ret = {}
for k, v in value.items():
ret[k] = self.value_type.unwrap(v, session=session)
return ret | def function[unwrap, parameter[self, value, session]]:
constant[ Validates ``value`` and then returns a dictionary with each key in
``value`` mapped to its value unwrapped using ``DictField.value_type``
]
call[name[self].validate_unwrap, parameter[name[value]]]
variable[ret] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b2347dc0>, <ast.Name object at 0x7da1b23457b0>]]] in starred[call[name[value].items, parameter[]]] begin[:]
call[name[ret]][name[k]] assign[=] call[name[self].value_type.unwrap, parameter[name[v]]]
return[name[ret]] | keyword[def] identifier[unwrap] ( identifier[self] , identifier[value] , identifier[session] = keyword[None] ):
literal[string]
identifier[self] . identifier[validate_unwrap] ( identifier[value] )
identifier[ret] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[value] . identifier[items] ():
identifier[ret] [ identifier[k] ]= identifier[self] . identifier[value_type] . identifier[unwrap] ( identifier[v] , identifier[session] = identifier[session] )
keyword[return] identifier[ret] | def unwrap(self, value, session=None):
""" Validates ``value`` and then returns a dictionary with each key in
``value`` mapped to its value unwrapped using ``DictField.value_type``
"""
self.validate_unwrap(value)
ret = {}
for (k, v) in value.items():
ret[k] = self.value_type.unwrap(v, session=session) # depends on [control=['for'], data=[]]
return ret |
def OpenChildren(self,
children=None,
mode="r",
limit=None,
chunk_limit=100000,
age=NEWEST_TIME):
"""Yields AFF4 Objects of all our direct children.
This method efficiently returns all attributes for our children directly, in
a few data store round trips. We use the directory indexes to query the data
store.
Args:
children: A list of children RDFURNs to open. If None open all our
children.
mode: The mode the files should be opened with.
limit: Total number of items we will attempt to retrieve.
chunk_limit: Maximum number of items to retrieve at a time.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
Instances for each direct child.
"""
if children is None:
# No age passed here to avoid ignoring indexes that were updated
# to a timestamp greater than the object's age.
subjects = list(self.ListChildren())
else:
subjects = list(children)
subjects.sort()
result_count = 0
# Read at most limit children at a time.
while subjects:
to_read = subjects[:chunk_limit]
subjects = subjects[chunk_limit:]
for child in FACTORY.MultiOpen(
to_read, mode=mode, token=self.token, age=age):
yield child
result_count += 1
if limit and result_count >= limit:
return | def function[OpenChildren, parameter[self, children, mode, limit, chunk_limit, age]]:
constant[Yields AFF4 Objects of all our direct children.
This method efficiently returns all attributes for our children directly, in
a few data store round trips. We use the directory indexes to query the data
store.
Args:
children: A list of children RDFURNs to open. If None open all our
children.
mode: The mode the files should be opened with.
limit: Total number of items we will attempt to retrieve.
chunk_limit: Maximum number of items to retrieve at a time.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
Instances for each direct child.
]
if compare[name[children] is constant[None]] begin[:]
variable[subjects] assign[=] call[name[list], parameter[call[name[self].ListChildren, parameter[]]]]
call[name[subjects].sort, parameter[]]
variable[result_count] assign[=] constant[0]
while name[subjects] begin[:]
variable[to_read] assign[=] call[name[subjects]][<ast.Slice object at 0x7da1b23470a0>]
variable[subjects] assign[=] call[name[subjects]][<ast.Slice object at 0x7da1b2346d10>]
for taget[name[child]] in starred[call[name[FACTORY].MultiOpen, parameter[name[to_read]]]] begin[:]
<ast.Yield object at 0x7da1b1b84e80>
<ast.AugAssign object at 0x7da1b1b858d0>
if <ast.BoolOp object at 0x7da1b1b843a0> begin[:]
return[None] | keyword[def] identifier[OpenChildren] ( identifier[self] ,
identifier[children] = keyword[None] ,
identifier[mode] = literal[string] ,
identifier[limit] = keyword[None] ,
identifier[chunk_limit] = literal[int] ,
identifier[age] = identifier[NEWEST_TIME] ):
literal[string]
keyword[if] identifier[children] keyword[is] keyword[None] :
identifier[subjects] = identifier[list] ( identifier[self] . identifier[ListChildren] ())
keyword[else] :
identifier[subjects] = identifier[list] ( identifier[children] )
identifier[subjects] . identifier[sort] ()
identifier[result_count] = literal[int]
keyword[while] identifier[subjects] :
identifier[to_read] = identifier[subjects] [: identifier[chunk_limit] ]
identifier[subjects] = identifier[subjects] [ identifier[chunk_limit] :]
keyword[for] identifier[child] keyword[in] identifier[FACTORY] . identifier[MultiOpen] (
identifier[to_read] , identifier[mode] = identifier[mode] , identifier[token] = identifier[self] . identifier[token] , identifier[age] = identifier[age] ):
keyword[yield] identifier[child]
identifier[result_count] += literal[int]
keyword[if] identifier[limit] keyword[and] identifier[result_count] >= identifier[limit] :
keyword[return] | def OpenChildren(self, children=None, mode='r', limit=None, chunk_limit=100000, age=NEWEST_TIME):
"""Yields AFF4 Objects of all our direct children.
This method efficiently returns all attributes for our children directly, in
a few data store round trips. We use the directory indexes to query the data
store.
Args:
children: A list of children RDFURNs to open. If None open all our
children.
mode: The mode the files should be opened with.
limit: Total number of items we will attempt to retrieve.
chunk_limit: Maximum number of items to retrieve at a time.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
Instances for each direct child.
"""
if children is None:
# No age passed here to avoid ignoring indexes that were updated
# to a timestamp greater than the object's age.
subjects = list(self.ListChildren()) # depends on [control=['if'], data=[]]
else:
subjects = list(children)
subjects.sort()
result_count = 0
# Read at most limit children at a time.
while subjects:
to_read = subjects[:chunk_limit]
subjects = subjects[chunk_limit:]
for child in FACTORY.MultiOpen(to_read, mode=mode, token=self.token, age=age):
yield child
result_count += 1
if limit and result_count >= limit:
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # depends on [control=['while'], data=[]] |
def plaintext_request(self, registration_id, data=None, collapse_key=None,
delay_while_idle=False, time_to_live=None, retries=5, dry_run=False):
"""
Makes a plaintext request to GCM servers
:param registration_id: string of the registration id
:param data: dict mapping of key-value pairs of messages
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
:raises GCMMissingRegistrationException: if registration_id is not provided
"""
if not registration_id:
raise GCMMissingRegistrationException("Missing registration_id")
payload = self.construct_payload(
registration_id, data, collapse_key,
delay_while_idle, time_to_live, False, dry_run
)
attempt = 0
backoff = self.BACKOFF_INITIAL_DELAY
for attempt in range(retries):
try:
response = self.make_request(payload, is_json=False)
return self.handle_plaintext_response(response)
except GCMUnavailableException:
sleep_time = backoff / 2 + random.randrange(backoff)
time.sleep(float(sleep_time) / 1000)
if 2 * backoff < self.MAX_BACKOFF_DELAY:
backoff *= 2
raise IOError("Could not make request after %d attempts" % attempt) | def function[plaintext_request, parameter[self, registration_id, data, collapse_key, delay_while_idle, time_to_live, retries, dry_run]]:
constant[
Makes a plaintext request to GCM servers
:param registration_id: string of the registration id
:param data: dict mapping of key-value pairs of messages
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
:raises GCMMissingRegistrationException: if registration_id is not provided
]
if <ast.UnaryOp object at 0x7da1b09b85e0> begin[:]
<ast.Raise object at 0x7da1b09b89d0>
variable[payload] assign[=] call[name[self].construct_payload, parameter[name[registration_id], name[data], name[collapse_key], name[delay_while_idle], name[time_to_live], constant[False], name[dry_run]]]
variable[attempt] assign[=] constant[0]
variable[backoff] assign[=] name[self].BACKOFF_INITIAL_DELAY
for taget[name[attempt]] in starred[call[name[range], parameter[name[retries]]]] begin[:]
<ast.Try object at 0x7da1b09ba5c0>
<ast.Raise object at 0x7da1b09bb0d0> | keyword[def] identifier[plaintext_request] ( identifier[self] , identifier[registration_id] , identifier[data] = keyword[None] , identifier[collapse_key] = keyword[None] ,
identifier[delay_while_idle] = keyword[False] , identifier[time_to_live] = keyword[None] , identifier[retries] = literal[int] , identifier[dry_run] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[registration_id] :
keyword[raise] identifier[GCMMissingRegistrationException] ( literal[string] )
identifier[payload] = identifier[self] . identifier[construct_payload] (
identifier[registration_id] , identifier[data] , identifier[collapse_key] ,
identifier[delay_while_idle] , identifier[time_to_live] , keyword[False] , identifier[dry_run]
)
identifier[attempt] = literal[int]
identifier[backoff] = identifier[self] . identifier[BACKOFF_INITIAL_DELAY]
keyword[for] identifier[attempt] keyword[in] identifier[range] ( identifier[retries] ):
keyword[try] :
identifier[response] = identifier[self] . identifier[make_request] ( identifier[payload] , identifier[is_json] = keyword[False] )
keyword[return] identifier[self] . identifier[handle_plaintext_response] ( identifier[response] )
keyword[except] identifier[GCMUnavailableException] :
identifier[sleep_time] = identifier[backoff] / literal[int] + identifier[random] . identifier[randrange] ( identifier[backoff] )
identifier[time] . identifier[sleep] ( identifier[float] ( identifier[sleep_time] )/ literal[int] )
keyword[if] literal[int] * identifier[backoff] < identifier[self] . identifier[MAX_BACKOFF_DELAY] :
identifier[backoff] *= literal[int]
keyword[raise] identifier[IOError] ( literal[string] % identifier[attempt] ) | def plaintext_request(self, registration_id, data=None, collapse_key=None, delay_while_idle=False, time_to_live=None, retries=5, dry_run=False):
"""
Makes a plaintext request to GCM servers
:param registration_id: string of the registration id
:param data: dict mapping of key-value pairs of messages
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
:raises GCMMissingRegistrationException: if registration_id is not provided
"""
if not registration_id:
raise GCMMissingRegistrationException('Missing registration_id') # depends on [control=['if'], data=[]]
payload = self.construct_payload(registration_id, data, collapse_key, delay_while_idle, time_to_live, False, dry_run)
attempt = 0
backoff = self.BACKOFF_INITIAL_DELAY
for attempt in range(retries):
try:
response = self.make_request(payload, is_json=False)
return self.handle_plaintext_response(response) # depends on [control=['try'], data=[]]
except GCMUnavailableException:
sleep_time = backoff / 2 + random.randrange(backoff)
time.sleep(float(sleep_time) / 1000)
if 2 * backoff < self.MAX_BACKOFF_DELAY:
backoff *= 2 # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
raise IOError('Could not make request after %d attempts' % attempt) |
def asyncPipeStrregex(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously replaces text using regexes. Each
has the general format: "In [field] replace [regex pattern] with [text]".
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{'match': {'value': <regex1>}, 'replace': {'value': <'text1'>}},
{'match': {'value': <regex2>}, 'replace': {'value': <'text2'>}},
{'match': {'value': <regex3>}, 'replace': {'value': <'text3'>}},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
"""
splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
first = partial(maybeDeferred, convert_func)
asyncFuncs = get_async_dispatch_funcs(first=first)
parsed = yield asyncDispatch(splits, *asyncFuncs)
_OUTPUT = yield asyncStarMap(asyncParseResult, parsed)
returnValue(iter(_OUTPUT)) | def function[asyncPipeStrregex, parameter[context, _INPUT, conf]]:
constant[A string module that asynchronously replaces text using regexes. Each
has the general format: "In [field] replace [regex pattern] with [text]".
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{'match': {'value': <regex1>}, 'replace': {'value': <'text1'>}},
{'match': {'value': <regex2>}, 'replace': {'value': <'text2'>}},
{'match': {'value': <regex3>}, 'replace': {'value': <'text3'>}},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
]
variable[splits] assign[=] <ast.Yield object at 0x7da1b0446770>
variable[first] assign[=] call[name[partial], parameter[name[maybeDeferred], name[convert_func]]]
variable[asyncFuncs] assign[=] call[name[get_async_dispatch_funcs], parameter[]]
variable[parsed] assign[=] <ast.Yield object at 0x7da1b0445060>
variable[_OUTPUT] assign[=] <ast.Yield object at 0x7da1b0445750>
call[name[returnValue], parameter[call[name[iter], parameter[name[_OUTPUT]]]]] | keyword[def] identifier[asyncPipeStrregex] ( identifier[context] = keyword[None] , identifier[_INPUT] = keyword[None] , identifier[conf] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[splits] = keyword[yield] identifier[asyncGetSplits] ( identifier[_INPUT] , identifier[conf] [ literal[string] ],** identifier[cdicts] ( identifier[opts] , identifier[kwargs] ))
identifier[first] = identifier[partial] ( identifier[maybeDeferred] , identifier[convert_func] )
identifier[asyncFuncs] = identifier[get_async_dispatch_funcs] ( identifier[first] = identifier[first] )
identifier[parsed] = keyword[yield] identifier[asyncDispatch] ( identifier[splits] ,* identifier[asyncFuncs] )
identifier[_OUTPUT] = keyword[yield] identifier[asyncStarMap] ( identifier[asyncParseResult] , identifier[parsed] )
identifier[returnValue] ( identifier[iter] ( identifier[_OUTPUT] )) | def asyncPipeStrregex(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously replaces text using regexes. Each
has the general format: "In [field] replace [regex pattern] with [text]".
Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'RULE': [
{'match': {'value': <regex1>}, 'replace': {'value': <'text1'>}},
{'match': {'value': <regex2>}, 'replace': {'value': <'text2'>}},
{'match': {'value': <regex3>}, 'replace': {'value': <'text3'>}},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
"""
splits = (yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs)))
first = partial(maybeDeferred, convert_func)
asyncFuncs = get_async_dispatch_funcs(first=first)
parsed = (yield asyncDispatch(splits, *asyncFuncs))
_OUTPUT = (yield asyncStarMap(asyncParseResult, parsed))
returnValue(iter(_OUTPUT)) |
def assignrepr(self, prefix: str = '') -> str:
"""Return a |repr| string with a prefixed assignment."""
lines = ['%sNode("%s", variable="%s",'
% (prefix, self.name, self.variable)]
if self.keywords:
subprefix = '%skeywords=' % (' '*(len(prefix)+5))
with objecttools.repr_.preserve_strings(True):
with objecttools.assignrepr_tuple.always_bracketed(False):
line = objecttools.assignrepr_list(
sorted(self.keywords), subprefix, width=70)
lines.append(line + ',')
lines[-1] = lines[-1][:-1]+')'
return '\n'.join(lines) | def function[assignrepr, parameter[self, prefix]]:
constant[Return a |repr| string with a prefixed assignment.]
variable[lines] assign[=] list[[<ast.BinOp object at 0x7da18f09d480>]]
if name[self].keywords begin[:]
variable[subprefix] assign[=] binary_operation[constant[%skeywords=] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[ ] * binary_operation[call[name[len], parameter[name[prefix]]] + constant[5]]]]
with call[name[objecttools].repr_.preserve_strings, parameter[constant[True]]] begin[:]
with call[name[objecttools].assignrepr_tuple.always_bracketed, parameter[constant[False]]] begin[:]
variable[line] assign[=] call[name[objecttools].assignrepr_list, parameter[call[name[sorted], parameter[name[self].keywords]], name[subprefix]]]
call[name[lines].append, parameter[binary_operation[name[line] + constant[,]]]]
call[name[lines]][<ast.UnaryOp object at 0x7da18f09f9d0>] assign[=] binary_operation[call[call[name[lines]][<ast.UnaryOp object at 0x7da18f09e1a0>]][<ast.Slice object at 0x7da18f09d300>] + constant[)]]
return[call[constant[
].join, parameter[name[lines]]]] | keyword[def] identifier[assignrepr] ( identifier[self] , identifier[prefix] : identifier[str] = literal[string] )-> identifier[str] :
literal[string]
identifier[lines] =[ literal[string]
%( identifier[prefix] , identifier[self] . identifier[name] , identifier[self] . identifier[variable] )]
keyword[if] identifier[self] . identifier[keywords] :
identifier[subprefix] = literal[string] %( literal[string] *( identifier[len] ( identifier[prefix] )+ literal[int] ))
keyword[with] identifier[objecttools] . identifier[repr_] . identifier[preserve_strings] ( keyword[True] ):
keyword[with] identifier[objecttools] . identifier[assignrepr_tuple] . identifier[always_bracketed] ( keyword[False] ):
identifier[line] = identifier[objecttools] . identifier[assignrepr_list] (
identifier[sorted] ( identifier[self] . identifier[keywords] ), identifier[subprefix] , identifier[width] = literal[int] )
identifier[lines] . identifier[append] ( identifier[line] + literal[string] )
identifier[lines] [- literal[int] ]= identifier[lines] [- literal[int] ][:- literal[int] ]+ literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[lines] ) | def assignrepr(self, prefix: str='') -> str:
"""Return a |repr| string with a prefixed assignment."""
lines = ['%sNode("%s", variable="%s",' % (prefix, self.name, self.variable)]
if self.keywords:
subprefix = '%skeywords=' % (' ' * (len(prefix) + 5))
with objecttools.repr_.preserve_strings(True):
with objecttools.assignrepr_tuple.always_bracketed(False):
line = objecttools.assignrepr_list(sorted(self.keywords), subprefix, width=70) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]]
lines.append(line + ',') # depends on [control=['if'], data=[]]
lines[-1] = lines[-1][:-1] + ')'
return '\n'.join(lines) |
def nexec(statement, globals=None, locals=None, **kwargs):
"""Execute *statement* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *statement* is transformed using
:class:`.NapiTransformer`."""
try:
import __builtin__ as builtins
except ImportError:
import builtins
from ast import parse
from napi.transformers import NapiTransformer
from ast import fix_missing_locations as fml
try:
node = parse(statement, '<string>', 'exec')
except ImportError:#KeyError:
exec(statement)
else:
if globals is None:
globals = builtins.globals()
if locals is None:
locals = {}
trans = NapiTransformer(globals=globals, locals=locals, **kwargs)
trans.visit(node)
code = compile(fml(node), '<string>', 'exec')
return builtins.eval(code, globals, locals) | def function[nexec, parameter[statement, globals, locals]]:
constant[Execute *statement* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *statement* is transformed using
:class:`.NapiTransformer`.]
<ast.Try object at 0x7da1b2886920>
from relative_module[ast] import module[parse]
from relative_module[napi.transformers] import module[NapiTransformer]
from relative_module[ast] import module[fix_missing_locations]
<ast.Try object at 0x7da1b27157e0> | keyword[def] identifier[nexec] ( identifier[statement] , identifier[globals] = keyword[None] , identifier[locals] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[import] identifier[__builtin__] keyword[as] identifier[builtins]
keyword[except] identifier[ImportError] :
keyword[import] identifier[builtins]
keyword[from] identifier[ast] keyword[import] identifier[parse]
keyword[from] identifier[napi] . identifier[transformers] keyword[import] identifier[NapiTransformer]
keyword[from] identifier[ast] keyword[import] identifier[fix_missing_locations] keyword[as] identifier[fml]
keyword[try] :
identifier[node] = identifier[parse] ( identifier[statement] , literal[string] , literal[string] )
keyword[except] identifier[ImportError] :
identifier[exec] ( identifier[statement] )
keyword[else] :
keyword[if] identifier[globals] keyword[is] keyword[None] :
identifier[globals] = identifier[builtins] . identifier[globals] ()
keyword[if] identifier[locals] keyword[is] keyword[None] :
identifier[locals] ={}
identifier[trans] = identifier[NapiTransformer] ( identifier[globals] = identifier[globals] , identifier[locals] = identifier[locals] ,** identifier[kwargs] )
identifier[trans] . identifier[visit] ( identifier[node] )
identifier[code] = identifier[compile] ( identifier[fml] ( identifier[node] ), literal[string] , literal[string] )
keyword[return] identifier[builtins] . identifier[eval] ( identifier[code] , identifier[globals] , identifier[locals] ) | def nexec(statement, globals=None, locals=None, **kwargs):
"""Execute *statement* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *statement* is transformed using
:class:`.NapiTransformer`."""
try:
import __builtin__ as builtins # depends on [control=['try'], data=[]]
except ImportError:
import builtins # depends on [control=['except'], data=[]]
from ast import parse
from napi.transformers import NapiTransformer
from ast import fix_missing_locations as fml
try:
node = parse(statement, '<string>', 'exec') # depends on [control=['try'], data=[]]
except ImportError: #KeyError:
exec(statement) # depends on [control=['except'], data=[]]
else:
if globals is None:
globals = builtins.globals() # depends on [control=['if'], data=['globals']]
if locals is None:
locals = {} # depends on [control=['if'], data=['locals']]
trans = NapiTransformer(globals=globals, locals=locals, **kwargs)
trans.visit(node)
code = compile(fml(node), '<string>', 'exec')
return builtins.eval(code, globals, locals) |
def chisqprob(x, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
x : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
if x <= 0:
return 1.0
if x == 0:
return 0.0
if df <= 0:
raise ValueError("Domain error.")
if x < 1.0 or x < df:
return 1.0 - _igam(0.5*df, 0.5*x)
return _igamc(0.5*df, 0.5*x) | def function[chisqprob, parameter[x, df]]:
constant[
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
x : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
]
if compare[name[x] less_or_equal[<=] constant[0]] begin[:]
return[constant[1.0]]
if compare[name[x] equal[==] constant[0]] begin[:]
return[constant[0.0]]
if compare[name[df] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b236c220>
if <ast.BoolOp object at 0x7da1b244ae30> begin[:]
return[binary_operation[constant[1.0] - call[name[_igam], parameter[binary_operation[constant[0.5] * name[df]], binary_operation[constant[0.5] * name[x]]]]]]
return[call[name[_igamc], parameter[binary_operation[constant[0.5] * name[df]], binary_operation[constant[0.5] * name[x]]]]] | keyword[def] identifier[chisqprob] ( identifier[x] , identifier[df] ):
literal[string]
keyword[if] identifier[x] <= literal[int] :
keyword[return] literal[int]
keyword[if] identifier[x] == literal[int] :
keyword[return] literal[int]
keyword[if] identifier[df] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[x] < literal[int] keyword[or] identifier[x] < identifier[df] :
keyword[return] literal[int] - identifier[_igam] ( literal[int] * identifier[df] , literal[int] * identifier[x] )
keyword[return] identifier[_igamc] ( literal[int] * identifier[df] , literal[int] * identifier[x] ) | def chisqprob(x, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
x : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
if x <= 0:
return 1.0 # depends on [control=['if'], data=[]]
if x == 0:
return 0.0 # depends on [control=['if'], data=[]]
if df <= 0:
raise ValueError('Domain error.') # depends on [control=['if'], data=[]]
if x < 1.0 or x < df:
return 1.0 - _igam(0.5 * df, 0.5 * x) # depends on [control=['if'], data=[]]
return _igamc(0.5 * df, 0.5 * x) |
def insertBulkBlock(self):
"""
API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict
"""
try:
body = request.body.read()
indata = cjson.decode(body)
if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])):
dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time",
self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.")
indata = validateJSONInputNoCopy("blockBulk", indata)
self.dbsBlockInsert.putBlock(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
#illegal variable name/number
if str(ex).find("ORA-01036") != -1:
dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex))
else:
sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | def function[insertBulkBlock, parameter[self]]:
constant[
API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict
]
<ast.Try object at 0x7da20c796b30> | keyword[def] identifier[insertBulkBlock] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[body] = identifier[request] . identifier[body] . identifier[read] ()
identifier[indata] = identifier[cjson] . identifier[decode] ( identifier[body] )
keyword[if] ( identifier[indata] . identifier[get] ( literal[string] ,[]) keyword[and] identifier[indata] . identifier[get] ( literal[string] ,[])):
identifier[dbsExceptionHandler] ( literal[string] , literal[string] ,
identifier[self] . identifier[logger] . identifier[exception] , literal[string] )
identifier[indata] = identifier[validateJSONInputNoCopy] ( literal[string] , identifier[indata] )
identifier[self] . identifier[dbsBlockInsert] . identifier[putBlock] ( identifier[indata] )
keyword[except] identifier[cjson] . identifier[DecodeError] keyword[as] identifier[dc] :
identifier[dbsExceptionHandler] ( literal[string] , literal[string] , identifier[self] . identifier[logger] . identifier[exception] , identifier[str] ( identifier[dc] ))
keyword[except] identifier[dbsException] keyword[as] identifier[de] :
identifier[dbsExceptionHandler] ( identifier[de] . identifier[eCode] , identifier[de] . identifier[message] , identifier[self] . identifier[logger] . identifier[exception] , identifier[de] . identifier[message] )
keyword[except] identifier[HTTPError] keyword[as] identifier[he] :
keyword[raise] identifier[he]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[if] identifier[str] ( identifier[ex] ). identifier[find] ( literal[string] )!=- literal[int] :
identifier[dbsExceptionHandler] ( literal[string] , literal[string] , identifier[self] . identifier[logger] . identifier[exception] , identifier[str] ( identifier[ex] ))
keyword[else] :
identifier[sError] = literal[string] %( identifier[ex] , identifier[traceback] . identifier[format_exc] ())
identifier[dbsExceptionHandler] ( literal[string] , identifier[dbsExceptionCode] [ literal[string] ], identifier[self] . identifier[logger] . identifier[exception] , identifier[sError] ) | def insertBulkBlock(self):
"""
API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict
"""
try:
body = request.body.read()
indata = cjson.decode(body)
if indata.get('file_parent_list', []) and indata.get('dataset_parent_list', []):
dbsExceptionHandler('dbsException-invalid-input2', 'insertBulkBlock: dataset and file parentages cannot be in the input at the same time', self.logger.exception, 'insertBulkBlock: datset and file parentages cannot be in the input at the same time.') # depends on [control=['if'], data=[]]
indata = validateJSONInputNoCopy('blockBulk', indata)
self.dbsBlockInsert.putBlock(indata) # depends on [control=['try'], data=[]]
except cjson.DecodeError as dc:
dbsExceptionHandler('dbsException-invalid-input2', 'Wrong format/data from insert BulkBlock input', self.logger.exception, str(dc)) # depends on [control=['except'], data=['dc']]
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) # depends on [control=['except'], data=['de']]
except HTTPError as he:
raise he # depends on [control=['except'], data=['he']]
except Exception as ex:
#illegal variable name/number
if str(ex).find('ORA-01036') != -1:
dbsExceptionHandler('dbsException-invalid-input2', 'illegal variable name/number from input', self.logger.exception, str(ex)) # depends on [control=['if'], data=[]]
else:
sError = 'DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s' % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) # depends on [control=['except'], data=['ex']] |
def setLocation(self, location):
""" Change the upper left-hand corner to a new ``Location``
Doesn't change width or height
"""
if not location or not isinstance(location, Location):
raise ValueError("setLocation expected a Location object")
self.x = location.x
self.y = location.y
return self | def function[setLocation, parameter[self, location]]:
constant[ Change the upper left-hand corner to a new ``Location``
Doesn't change width or height
]
if <ast.BoolOp object at 0x7da2041da4d0> begin[:]
<ast.Raise object at 0x7da2041d90f0>
name[self].x assign[=] name[location].x
name[self].y assign[=] name[location].y
return[name[self]] | keyword[def] identifier[setLocation] ( identifier[self] , identifier[location] ):
literal[string]
keyword[if] keyword[not] identifier[location] keyword[or] keyword[not] identifier[isinstance] ( identifier[location] , identifier[Location] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[x] = identifier[location] . identifier[x]
identifier[self] . identifier[y] = identifier[location] . identifier[y]
keyword[return] identifier[self] | def setLocation(self, location):
""" Change the upper left-hand corner to a new ``Location``
Doesn't change width or height
"""
if not location or not isinstance(location, Location):
raise ValueError('setLocation expected a Location object') # depends on [control=['if'], data=[]]
self.x = location.x
self.y = location.y
return self |
def load(cls, path, name):
"""Imports the specified ``proteindb`` file from the hard disk.
:param path: filedirectory of the ``proteindb`` file
:param name: filename without the file extension ".proteindb"
.. note:: this generates rather large files, which actually take longer
to import than to newly generate. Maybe saving / loading should be
limited to the protein database whitout in silico digestion
information.
"""
filepath = aux.joinpath(path, name + '.proteindb')
with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
proteinsString = io.TextIOWrapper(containerZip.open('proteins'),
encoding='utf-8'
).read()
peptidesString = io.TextIOWrapper(containerZip.open('peptides'),
encoding='utf-8'
).read()
infoString = io.TextIOWrapper(containerZip.open('info'),
encoding='utf-8'
).read()
newInstance = cls()
newInstance.proteins = json.loads(proteinsString,
object_hook=ProteinSequence.jsonHook)
newInstance.peptides = json.loads(peptidesString,
object_hook=PeptideSequence.jsonHook)
newInstance.info.update(json.loads(infoString))
return newInstance | def function[load, parameter[cls, path, name]]:
constant[Imports the specified ``proteindb`` file from the hard disk.
:param path: filedirectory of the ``proteindb`` file
:param name: filename without the file extension ".proteindb"
.. note:: this generates rather large files, which actually take longer
to import than to newly generate. Maybe saving / loading should be
limited to the protein database whitout in silico digestion
information.
]
variable[filepath] assign[=] call[name[aux].joinpath, parameter[name[path], binary_operation[name[name] + constant[.proteindb]]]]
with call[name[zipfile].ZipFile, parameter[name[filepath], constant[r]]] begin[:]
variable[proteinsString] assign[=] call[call[name[io].TextIOWrapper, parameter[call[name[containerZip].open, parameter[constant[proteins]]]]].read, parameter[]]
variable[peptidesString] assign[=] call[call[name[io].TextIOWrapper, parameter[call[name[containerZip].open, parameter[constant[peptides]]]]].read, parameter[]]
variable[infoString] assign[=] call[call[name[io].TextIOWrapper, parameter[call[name[containerZip].open, parameter[constant[info]]]]].read, parameter[]]
variable[newInstance] assign[=] call[name[cls], parameter[]]
name[newInstance].proteins assign[=] call[name[json].loads, parameter[name[proteinsString]]]
name[newInstance].peptides assign[=] call[name[json].loads, parameter[name[peptidesString]]]
call[name[newInstance].info.update, parameter[call[name[json].loads, parameter[name[infoString]]]]]
return[name[newInstance]] | keyword[def] identifier[load] ( identifier[cls] , identifier[path] , identifier[name] ):
literal[string]
identifier[filepath] = identifier[aux] . identifier[joinpath] ( identifier[path] , identifier[name] + literal[string] )
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[filepath] , literal[string] , identifier[allowZip64] = keyword[True] ) keyword[as] identifier[containerZip] :
identifier[proteinsString] = identifier[io] . identifier[TextIOWrapper] ( identifier[containerZip] . identifier[open] ( literal[string] ),
identifier[encoding] = literal[string]
). identifier[read] ()
identifier[peptidesString] = identifier[io] . identifier[TextIOWrapper] ( identifier[containerZip] . identifier[open] ( literal[string] ),
identifier[encoding] = literal[string]
). identifier[read] ()
identifier[infoString] = identifier[io] . identifier[TextIOWrapper] ( identifier[containerZip] . identifier[open] ( literal[string] ),
identifier[encoding] = literal[string]
). identifier[read] ()
identifier[newInstance] = identifier[cls] ()
identifier[newInstance] . identifier[proteins] = identifier[json] . identifier[loads] ( identifier[proteinsString] ,
identifier[object_hook] = identifier[ProteinSequence] . identifier[jsonHook] )
identifier[newInstance] . identifier[peptides] = identifier[json] . identifier[loads] ( identifier[peptidesString] ,
identifier[object_hook] = identifier[PeptideSequence] . identifier[jsonHook] )
identifier[newInstance] . identifier[info] . identifier[update] ( identifier[json] . identifier[loads] ( identifier[infoString] ))
keyword[return] identifier[newInstance] | def load(cls, path, name):
"""Imports the specified ``proteindb`` file from the hard disk.
:param path: filedirectory of the ``proteindb`` file
:param name: filename without the file extension ".proteindb"
.. note:: this generates rather large files, which actually take longer
to import than to newly generate. Maybe saving / loading should be
limited to the protein database whitout in silico digestion
information.
"""
filepath = aux.joinpath(path, name + '.proteindb')
with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
proteinsString = io.TextIOWrapper(containerZip.open('proteins'), encoding='utf-8').read()
peptidesString = io.TextIOWrapper(containerZip.open('peptides'), encoding='utf-8').read()
infoString = io.TextIOWrapper(containerZip.open('info'), encoding='utf-8').read() # depends on [control=['with'], data=['containerZip']]
newInstance = cls()
newInstance.proteins = json.loads(proteinsString, object_hook=ProteinSequence.jsonHook)
newInstance.peptides = json.loads(peptidesString, object_hook=PeptideSequence.jsonHook)
newInstance.info.update(json.loads(infoString))
return newInstance |
def get_all_environment_option_pool(self, id_environment=None, option_id=None, option_type=None):
"""Get all Option VIP by Environment .
:return: Dictionary with the following structure:
::
{[{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
etc to option pools ...] }
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
"""
url='api/pools/environment_options/'
if id_environment:
if option_id:
if option_type:
url = url + "?environment_id=" + str(id_environment)+ "&option_id=" + str(option_id) + "&option_type=" + option_type
else:
url = url + "?environment_id=" + str(id_environment)+ "&option_id=" + str(option_id)
else:
if option_type:
url = url + "?environment_id=" + str(id_environment) + "&option_type=" + option_type
else:
url = url + "?environment_id=" + str(id_environment)
elif option_id:
if option_type:
url = url + "?option_id=" + str(option_id) + "&option_type=" + option_type
else:
url = url + "?option_id=" + str(option_id)
elif option_type:
url = url + "?option_type=" + option_type
return self.get(url) | def function[get_all_environment_option_pool, parameter[self, id_environment, option_id, option_type]]:
constant[Get all Option VIP by Environment .
:return: Dictionary with the following structure:
::
{[{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
etc to option pools ...] }
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
]
variable[url] assign[=] constant[api/pools/environment_options/]
if name[id_environment] begin[:]
if name[option_id] begin[:]
if name[option_type] begin[:]
variable[url] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[url] + constant[?environment_id=]] + call[name[str], parameter[name[id_environment]]]] + constant[&option_id=]] + call[name[str], parameter[name[option_id]]]] + constant[&option_type=]] + name[option_type]]
return[call[name[self].get, parameter[name[url]]]] | keyword[def] identifier[get_all_environment_option_pool] ( identifier[self] , identifier[id_environment] = keyword[None] , identifier[option_id] = keyword[None] , identifier[option_type] = keyword[None] ):
literal[string]
identifier[url] = literal[string]
keyword[if] identifier[id_environment] :
keyword[if] identifier[option_id] :
keyword[if] identifier[option_type] :
identifier[url] = identifier[url] + literal[string] + identifier[str] ( identifier[id_environment] )+ literal[string] + identifier[str] ( identifier[option_id] )+ literal[string] + identifier[option_type]
keyword[else] :
identifier[url] = identifier[url] + literal[string] + identifier[str] ( identifier[id_environment] )+ literal[string] + identifier[str] ( identifier[option_id] )
keyword[else] :
keyword[if] identifier[option_type] :
identifier[url] = identifier[url] + literal[string] + identifier[str] ( identifier[id_environment] )+ literal[string] + identifier[option_type]
keyword[else] :
identifier[url] = identifier[url] + literal[string] + identifier[str] ( identifier[id_environment] )
keyword[elif] identifier[option_id] :
keyword[if] identifier[option_type] :
identifier[url] = identifier[url] + literal[string] + identifier[str] ( identifier[option_id] )+ literal[string] + identifier[option_type]
keyword[else] :
identifier[url] = identifier[url] + literal[string] + identifier[str] ( identifier[option_id] )
keyword[elif] identifier[option_type] :
identifier[url] = identifier[url] + literal[string] + identifier[option_type]
keyword[return] identifier[self] . identifier[get] ( identifier[url] ) | def get_all_environment_option_pool(self, id_environment=None, option_id=None, option_type=None):
"""Get all Option VIP by Environment .
:return: Dictionary with the following structure:
::
{[{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
etc to option pools ...] }
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
"""
url = 'api/pools/environment_options/'
if id_environment:
if option_id:
if option_type:
url = url + '?environment_id=' + str(id_environment) + '&option_id=' + str(option_id) + '&option_type=' + option_type # depends on [control=['if'], data=[]]
else:
url = url + '?environment_id=' + str(id_environment) + '&option_id=' + str(option_id) # depends on [control=['if'], data=[]]
elif option_type:
url = url + '?environment_id=' + str(id_environment) + '&option_type=' + option_type # depends on [control=['if'], data=[]]
else:
url = url + '?environment_id=' + str(id_environment) # depends on [control=['if'], data=[]]
elif option_id:
if option_type:
url = url + '?option_id=' + str(option_id) + '&option_type=' + option_type # depends on [control=['if'], data=[]]
else:
url = url + '?option_id=' + str(option_id) # depends on [control=['if'], data=[]]
elif option_type:
url = url + '?option_type=' + option_type # depends on [control=['if'], data=[]]
return self.get(url) |
def check_query(state, query, error_msg=None, expand_msg=None):
"""Run arbitrary queries against to the DB connection to verify the database state.
For queries that do not return any output (INSERTs, UPDATEs, ...),
you cannot use functions like ``check_col()`` and ``is_equal()`` to verify the query result.
``check_query()`` will rerun the solution query in the transaction prepared by sqlbackend,
and immediately afterwards run the query specified in ``query``.
Next, it will also run this query after rerunning the student query in a transaction.
Finally, it produces a child state with these results, that you can then chain off of
with functions like ``check_column()`` and ``has_equal_value()``.
Args:
query: A SQL query as a string that is executed after the student query is re-executed.
error_msg: if specified, this overrides the automatically generated feedback
message in case the query generated an error.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are checking whether an INSERT happened correctly: ::
INSERT INTO company VALUES (2, 'filip', 28, 'sql-lane', 42)
We can write the following SCT: ::
Ex().check_query('SELECT COUNT(*) AS c FROM company').has_equal_value()
"""
if error_msg is None:
error_msg = "Running `{{query}}` after your submission generated an error."
if expand_msg is None:
expand_msg = "The autograder verified the result of running `{{query}}` against the database. "
msg_kwargs = {"query": query}
# before redoing the query,
# make sure that it didn't generate any errors
has_no_error(state)
_msg = state.build_message(error_msg, fmt_kwargs=msg_kwargs)
# sqlbackend makes sure all queries are run in transactions.
# Rerun the solution code first, after which we run the provided query
with dbconn(state.solution_conn) as conn:
_ = runQuery(conn, state.solution_code)
sol_res = runQuery(conn, query)
if sol_res is None:
raise ValueError("Solution failed: " + _msg)
# sqlbackend makes sure all queries are run in transactions.
# Rerun the student code first, after wich we run the provided query
with dbconn(state.student_conn) as conn:
_ = runQuery(conn, state.student_code)
stu_res = runQuery(conn, query)
if stu_res is None:
state.do_test(_msg)
return state.to_child(
append_message={"msg": expand_msg, "kwargs": msg_kwargs},
student_result=stu_res,
solution_result=sol_res,
) | def function[check_query, parameter[state, query, error_msg, expand_msg]]:
constant[Run arbitrary queries against to the DB connection to verify the database state.
For queries that do not return any output (INSERTs, UPDATEs, ...),
you cannot use functions like ``check_col()`` and ``is_equal()`` to verify the query result.
``check_query()`` will rerun the solution query in the transaction prepared by sqlbackend,
and immediately afterwards run the query specified in ``query``.
Next, it will also run this query after rerunning the student query in a transaction.
Finally, it produces a child state with these results, that you can then chain off of
with functions like ``check_column()`` and ``has_equal_value()``.
Args:
query: A SQL query as a string that is executed after the student query is re-executed.
error_msg: if specified, this overrides the automatically generated feedback
message in case the query generated an error.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are checking whether an INSERT happened correctly: ::
INSERT INTO company VALUES (2, 'filip', 28, 'sql-lane', 42)
We can write the following SCT: ::
Ex().check_query('SELECT COUNT(*) AS c FROM company').has_equal_value()
]
if compare[name[error_msg] is constant[None]] begin[:]
variable[error_msg] assign[=] constant[Running `{{query}}` after your submission generated an error.]
if compare[name[expand_msg] is constant[None]] begin[:]
variable[expand_msg] assign[=] constant[The autograder verified the result of running `{{query}}` against the database. ]
variable[msg_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b03da560>], [<ast.Name object at 0x7da1b03db850>]]
call[name[has_no_error], parameter[name[state]]]
variable[_msg] assign[=] call[name[state].build_message, parameter[name[error_msg]]]
with call[name[dbconn], parameter[name[state].solution_conn]] begin[:]
variable[_] assign[=] call[name[runQuery], parameter[name[conn], name[state].solution_code]]
variable[sol_res] assign[=] call[name[runQuery], parameter[name[conn], name[query]]]
if compare[name[sol_res] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b02bc4c0>
with call[name[dbconn], parameter[name[state].student_conn]] begin[:]
variable[_] assign[=] call[name[runQuery], parameter[name[conn], name[state].student_code]]
variable[stu_res] assign[=] call[name[runQuery], parameter[name[conn], name[query]]]
if compare[name[stu_res] is constant[None]] begin[:]
call[name[state].do_test, parameter[name[_msg]]]
return[call[name[state].to_child, parameter[]]] | keyword[def] identifier[check_query] ( identifier[state] , identifier[query] , identifier[error_msg] = keyword[None] , identifier[expand_msg] = keyword[None] ):
literal[string]
keyword[if] identifier[error_msg] keyword[is] keyword[None] :
identifier[error_msg] = literal[string]
keyword[if] identifier[expand_msg] keyword[is] keyword[None] :
identifier[expand_msg] = literal[string]
identifier[msg_kwargs] ={ literal[string] : identifier[query] }
identifier[has_no_error] ( identifier[state] )
identifier[_msg] = identifier[state] . identifier[build_message] ( identifier[error_msg] , identifier[fmt_kwargs] = identifier[msg_kwargs] )
keyword[with] identifier[dbconn] ( identifier[state] . identifier[solution_conn] ) keyword[as] identifier[conn] :
identifier[_] = identifier[runQuery] ( identifier[conn] , identifier[state] . identifier[solution_code] )
identifier[sol_res] = identifier[runQuery] ( identifier[conn] , identifier[query] )
keyword[if] identifier[sol_res] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[_msg] )
keyword[with] identifier[dbconn] ( identifier[state] . identifier[student_conn] ) keyword[as] identifier[conn] :
identifier[_] = identifier[runQuery] ( identifier[conn] , identifier[state] . identifier[student_code] )
identifier[stu_res] = identifier[runQuery] ( identifier[conn] , identifier[query] )
keyword[if] identifier[stu_res] keyword[is] keyword[None] :
identifier[state] . identifier[do_test] ( identifier[_msg] )
keyword[return] identifier[state] . identifier[to_child] (
identifier[append_message] ={ literal[string] : identifier[expand_msg] , literal[string] : identifier[msg_kwargs] },
identifier[student_result] = identifier[stu_res] ,
identifier[solution_result] = identifier[sol_res] ,
) | def check_query(state, query, error_msg=None, expand_msg=None):
"""Run arbitrary queries against to the DB connection to verify the database state.
For queries that do not return any output (INSERTs, UPDATEs, ...),
you cannot use functions like ``check_col()`` and ``is_equal()`` to verify the query result.
``check_query()`` will rerun the solution query in the transaction prepared by sqlbackend,
and immediately afterwards run the query specified in ``query``.
Next, it will also run this query after rerunning the student query in a transaction.
Finally, it produces a child state with these results, that you can then chain off of
with functions like ``check_column()`` and ``has_equal_value()``.
Args:
query: A SQL query as a string that is executed after the student query is re-executed.
error_msg: if specified, this overrides the automatically generated feedback
message in case the query generated an error.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are checking whether an INSERT happened correctly: ::
INSERT INTO company VALUES (2, 'filip', 28, 'sql-lane', 42)
We can write the following SCT: ::
Ex().check_query('SELECT COUNT(*) AS c FROM company').has_equal_value()
"""
if error_msg is None:
error_msg = 'Running `{{query}}` after your submission generated an error.' # depends on [control=['if'], data=['error_msg']]
if expand_msg is None:
expand_msg = 'The autograder verified the result of running `{{query}}` against the database. ' # depends on [control=['if'], data=['expand_msg']]
msg_kwargs = {'query': query}
# before redoing the query,
# make sure that it didn't generate any errors
has_no_error(state)
_msg = state.build_message(error_msg, fmt_kwargs=msg_kwargs)
# sqlbackend makes sure all queries are run in transactions.
# Rerun the solution code first, after which we run the provided query
with dbconn(state.solution_conn) as conn:
_ = runQuery(conn, state.solution_code)
sol_res = runQuery(conn, query) # depends on [control=['with'], data=['conn']]
if sol_res is None:
raise ValueError('Solution failed: ' + _msg) # depends on [control=['if'], data=[]]
# sqlbackend makes sure all queries are run in transactions.
# Rerun the student code first, after wich we run the provided query
with dbconn(state.student_conn) as conn:
_ = runQuery(conn, state.student_code)
stu_res = runQuery(conn, query) # depends on [control=['with'], data=['conn']]
if stu_res is None:
state.do_test(_msg) # depends on [control=['if'], data=[]]
return state.to_child(append_message={'msg': expand_msg, 'kwargs': msg_kwargs}, student_result=stu_res, solution_result=sol_res) |
def pandas_mesh(df):
"""Create numpy 2-D "meshgrid" from 3+ columns in a Pandas DataFrame
Arguments:
df (DataFrame): Must have 3 or 4 columns of numerical data
Returns:
OrderedDict: column labels from the data frame are the keys, values are 2-D matrices
All matrices have shape NxM, where N = len(set(df.iloc[:,0])) and M = len(set(df.iloc[:,1]))
>>> pandas_mesh(pd.DataFrame(np.arange(18).reshape(3,6),
... columns=list('ABCDEF'))).values() # doctest: +NORMALIZE_WHITESPACE
[array([[ 0, 6, 12],
[ 0, 6, 12],
[ 0, 6, 12]]),
array([[ 1, 1, 1],
[ 7, 7, 7],
[13, 13, 13]]),
array([[ 2., nan, nan],
[ nan, 8., nan],
[ nan, nan, 14.]]),
array([[ 3., nan, nan],
[ nan, 9., nan],
[ nan, nan, 15.]]),
array([[ 4., nan, nan],
[ nan, 10., nan],
[ nan, nan, 16.]]),
array([[ 5., nan, nan],
[ nan, 11., nan],
[ nan, nan, 17.]])]
"""
xyz = [df[c].values for c in df.columns]
index = pd.MultiIndex.from_tuples(zip(xyz[0], xyz[1]), names=['x', 'y'])
# print(index)
series = [pd.Series(values, index=index) for values in xyz[2:]]
# print(series)
X, Y = np.meshgrid(sorted(list(set(xyz[0]))), sorted(list(set(xyz[1]))))
N, M = X.shape
Zs = []
# print(Zs)
for k, s in enumerate(series):
Z = np.empty(X.shape)
Z[:] = np.nan
for i, j in itertools.product(range(N), range(M)):
Z[i, j] = s.get((X[i, j], Y[i, j]), np.NAN)
Zs += [Z]
return OrderedDict((df.columns[i], m) for i, m in enumerate([X, Y] + Zs)) | def function[pandas_mesh, parameter[df]]:
constant[Create numpy 2-D "meshgrid" from 3+ columns in a Pandas DataFrame
Arguments:
df (DataFrame): Must have 3 or 4 columns of numerical data
Returns:
OrderedDict: column labels from the data frame are the keys, values are 2-D matrices
All matrices have shape NxM, where N = len(set(df.iloc[:,0])) and M = len(set(df.iloc[:,1]))
>>> pandas_mesh(pd.DataFrame(np.arange(18).reshape(3,6),
... columns=list('ABCDEF'))).values() # doctest: +NORMALIZE_WHITESPACE
[array([[ 0, 6, 12],
[ 0, 6, 12],
[ 0, 6, 12]]),
array([[ 1, 1, 1],
[ 7, 7, 7],
[13, 13, 13]]),
array([[ 2., nan, nan],
[ nan, 8., nan],
[ nan, nan, 14.]]),
array([[ 3., nan, nan],
[ nan, 9., nan],
[ nan, nan, 15.]]),
array([[ 4., nan, nan],
[ nan, 10., nan],
[ nan, nan, 16.]]),
array([[ 5., nan, nan],
[ nan, 11., nan],
[ nan, nan, 17.]])]
]
variable[xyz] assign[=] <ast.ListComp object at 0x7da1b14a8a30>
variable[index] assign[=] call[name[pd].MultiIndex.from_tuples, parameter[call[name[zip], parameter[call[name[xyz]][constant[0]], call[name[xyz]][constant[1]]]]]]
variable[series] assign[=] <ast.ListComp object at 0x7da1b14a8df0>
<ast.Tuple object at 0x7da1b14aba30> assign[=] call[name[np].meshgrid, parameter[call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[call[name[xyz]][constant[0]]]]]]]], call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[call[name[xyz]][constant[1]]]]]]]]]]
<ast.Tuple object at 0x7da1b16e22f0> assign[=] name[X].shape
variable[Zs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b16e2470>, <ast.Name object at 0x7da1b16e3b80>]]] in starred[call[name[enumerate], parameter[name[series]]]] begin[:]
variable[Z] assign[=] call[name[np].empty, parameter[name[X].shape]]
call[name[Z]][<ast.Slice object at 0x7da1b168f790>] assign[=] name[np].nan
for taget[tuple[[<ast.Name object at 0x7da1b168e020>, <ast.Name object at 0x7da1b168eb60>]]] in starred[call[name[itertools].product, parameter[call[name[range], parameter[name[N]]], call[name[range], parameter[name[M]]]]]] begin[:]
call[name[Z]][tuple[[<ast.Name object at 0x7da1b168d960>, <ast.Name object at 0x7da1b168e6b0>]]] assign[=] call[name[s].get, parameter[tuple[[<ast.Subscript object at 0x7da1b168e8f0>, <ast.Subscript object at 0x7da1b168e740>]], name[np].NAN]]
<ast.AugAssign object at 0x7da1b168e830>
return[call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da1b168fac0>]]] | keyword[def] identifier[pandas_mesh] ( identifier[df] ):
literal[string]
identifier[xyz] =[ identifier[df] [ identifier[c] ]. identifier[values] keyword[for] identifier[c] keyword[in] identifier[df] . identifier[columns] ]
identifier[index] = identifier[pd] . identifier[MultiIndex] . identifier[from_tuples] ( identifier[zip] ( identifier[xyz] [ literal[int] ], identifier[xyz] [ literal[int] ]), identifier[names] =[ literal[string] , literal[string] ])
identifier[series] =[ identifier[pd] . identifier[Series] ( identifier[values] , identifier[index] = identifier[index] ) keyword[for] identifier[values] keyword[in] identifier[xyz] [ literal[int] :]]
identifier[X] , identifier[Y] = identifier[np] . identifier[meshgrid] ( identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[xyz] [ literal[int] ]))), identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[xyz] [ literal[int] ]))))
identifier[N] , identifier[M] = identifier[X] . identifier[shape]
identifier[Zs] =[]
keyword[for] identifier[k] , identifier[s] keyword[in] identifier[enumerate] ( identifier[series] ):
identifier[Z] = identifier[np] . identifier[empty] ( identifier[X] . identifier[shape] )
identifier[Z] [:]= identifier[np] . identifier[nan]
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[itertools] . identifier[product] ( identifier[range] ( identifier[N] ), identifier[range] ( identifier[M] )):
identifier[Z] [ identifier[i] , identifier[j] ]= identifier[s] . identifier[get] (( identifier[X] [ identifier[i] , identifier[j] ], identifier[Y] [ identifier[i] , identifier[j] ]), identifier[np] . identifier[NAN] )
identifier[Zs] +=[ identifier[Z] ]
keyword[return] identifier[OrderedDict] (( identifier[df] . identifier[columns] [ identifier[i] ], identifier[m] ) keyword[for] identifier[i] , identifier[m] keyword[in] identifier[enumerate] ([ identifier[X] , identifier[Y] ]+ identifier[Zs] )) | def pandas_mesh(df):
"""Create numpy 2-D "meshgrid" from 3+ columns in a Pandas DataFrame
Arguments:
df (DataFrame): Must have 3 or 4 columns of numerical data
Returns:
OrderedDict: column labels from the data frame are the keys, values are 2-D matrices
All matrices have shape NxM, where N = len(set(df.iloc[:,0])) and M = len(set(df.iloc[:,1]))
>>> pandas_mesh(pd.DataFrame(np.arange(18).reshape(3,6),
... columns=list('ABCDEF'))).values() # doctest: +NORMALIZE_WHITESPACE
[array([[ 0, 6, 12],
[ 0, 6, 12],
[ 0, 6, 12]]),
array([[ 1, 1, 1],
[ 7, 7, 7],
[13, 13, 13]]),
array([[ 2., nan, nan],
[ nan, 8., nan],
[ nan, nan, 14.]]),
array([[ 3., nan, nan],
[ nan, 9., nan],
[ nan, nan, 15.]]),
array([[ 4., nan, nan],
[ nan, 10., nan],
[ nan, nan, 16.]]),
array([[ 5., nan, nan],
[ nan, 11., nan],
[ nan, nan, 17.]])]
"""
xyz = [df[c].values for c in df.columns]
index = pd.MultiIndex.from_tuples(zip(xyz[0], xyz[1]), names=['x', 'y'])
# print(index)
series = [pd.Series(values, index=index) for values in xyz[2:]]
# print(series)
(X, Y) = np.meshgrid(sorted(list(set(xyz[0]))), sorted(list(set(xyz[1]))))
(N, M) = X.shape
Zs = []
# print(Zs)
for (k, s) in enumerate(series):
Z = np.empty(X.shape)
Z[:] = np.nan
for (i, j) in itertools.product(range(N), range(M)):
Z[i, j] = s.get((X[i, j], Y[i, j]), np.NAN) # depends on [control=['for'], data=[]]
Zs += [Z] # depends on [control=['for'], data=[]]
return OrderedDict(((df.columns[i], m) for (i, m) in enumerate([X, Y] + Zs))) |
def peers(self):
"list of the (host, port) pairs of all connected peer Hubs"
return [addr for (addr, peer) in self._dispatcher.peers.items()
if peer.up] | def function[peers, parameter[self]]:
constant[list of the (host, port) pairs of all connected peer Hubs]
return[<ast.ListComp object at 0x7da2043462f0>] | keyword[def] identifier[peers] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[addr] keyword[for] ( identifier[addr] , identifier[peer] ) keyword[in] identifier[self] . identifier[_dispatcher] . identifier[peers] . identifier[items] ()
keyword[if] identifier[peer] . identifier[up] ] | def peers(self):
"""list of the (host, port) pairs of all connected peer Hubs"""
return [addr for (addr, peer) in self._dispatcher.peers.items() if peer.up] |
def removeChild(self, child):
"""
Remove a child from this element. The child element is
returned, and it's parentNode element is reset.
"""
super(Table, self).removeChild(child)
if child.tagName == ligolw.Column.tagName:
self._update_column_info()
return child | def function[removeChild, parameter[self, child]]:
constant[
Remove a child from this element. The child element is
returned, and it's parentNode element is reset.
]
call[call[name[super], parameter[name[Table], name[self]]].removeChild, parameter[name[child]]]
if compare[name[child].tagName equal[==] name[ligolw].Column.tagName] begin[:]
call[name[self]._update_column_info, parameter[]]
return[name[child]] | keyword[def] identifier[removeChild] ( identifier[self] , identifier[child] ):
literal[string]
identifier[super] ( identifier[Table] , identifier[self] ). identifier[removeChild] ( identifier[child] )
keyword[if] identifier[child] . identifier[tagName] == identifier[ligolw] . identifier[Column] . identifier[tagName] :
identifier[self] . identifier[_update_column_info] ()
keyword[return] identifier[child] | def removeChild(self, child):
"""
Remove a child from this element. The child element is
returned, and it's parentNode element is reset.
"""
super(Table, self).removeChild(child)
if child.tagName == ligolw.Column.tagName:
self._update_column_info() # depends on [control=['if'], data=[]]
return child |
def get_strain_label(entry, viral=False):
"""Try to extract a strain from an assemly summary entry.
First this checks 'infraspecific_name', then 'isolate', then
it tries to get it from 'organism_name'. If all fails, it
falls back to just returning the assembly accesion number.
"""
def get_strain(entry):
strain = entry['infraspecific_name']
if strain != '':
strain = strain.split('=')[-1]
return strain
strain = entry['isolate']
if strain != '':
return strain
if len(entry['organism_name'].split(' ')) > 2 and not viral:
strain = ' '.join(entry['organism_name'].split(' ')[2:])
return strain
return entry['assembly_accession']
def cleanup(strain):
strain = strain.strip()
strain = strain.replace(' ', '_')
strain = strain.replace(';', '_')
strain = strain.replace('/', '_')
strain = strain.replace('\\', '_')
return strain
return cleanup(get_strain(entry)) | def function[get_strain_label, parameter[entry, viral]]:
constant[Try to extract a strain from an assemly summary entry.
First this checks 'infraspecific_name', then 'isolate', then
it tries to get it from 'organism_name'. If all fails, it
falls back to just returning the assembly accesion number.
]
def function[get_strain, parameter[entry]]:
variable[strain] assign[=] call[name[entry]][constant[infraspecific_name]]
if compare[name[strain] not_equal[!=] constant[]] begin[:]
variable[strain] assign[=] call[call[name[strain].split, parameter[constant[=]]]][<ast.UnaryOp object at 0x7da2044c3a00>]
return[name[strain]]
variable[strain] assign[=] call[name[entry]][constant[isolate]]
if compare[name[strain] not_equal[!=] constant[]] begin[:]
return[name[strain]]
if <ast.BoolOp object at 0x7da2044c2170> begin[:]
variable[strain] assign[=] call[constant[ ].join, parameter[call[call[call[name[entry]][constant[organism_name]].split, parameter[constant[ ]]]][<ast.Slice object at 0x7da204344f70>]]]
return[name[strain]]
return[call[name[entry]][constant[assembly_accession]]]
def function[cleanup, parameter[strain]]:
variable[strain] assign[=] call[name[strain].strip, parameter[]]
variable[strain] assign[=] call[name[strain].replace, parameter[constant[ ], constant[_]]]
variable[strain] assign[=] call[name[strain].replace, parameter[constant[;], constant[_]]]
variable[strain] assign[=] call[name[strain].replace, parameter[constant[/], constant[_]]]
variable[strain] assign[=] call[name[strain].replace, parameter[constant[\], constant[_]]]
return[name[strain]]
return[call[name[cleanup], parameter[call[name[get_strain], parameter[name[entry]]]]]] | keyword[def] identifier[get_strain_label] ( identifier[entry] , identifier[viral] = keyword[False] ):
literal[string]
keyword[def] identifier[get_strain] ( identifier[entry] ):
identifier[strain] = identifier[entry] [ literal[string] ]
keyword[if] identifier[strain] != literal[string] :
identifier[strain] = identifier[strain] . identifier[split] ( literal[string] )[- literal[int] ]
keyword[return] identifier[strain]
identifier[strain] = identifier[entry] [ literal[string] ]
keyword[if] identifier[strain] != literal[string] :
keyword[return] identifier[strain]
keyword[if] identifier[len] ( identifier[entry] [ literal[string] ]. identifier[split] ( literal[string] ))> literal[int] keyword[and] keyword[not] identifier[viral] :
identifier[strain] = literal[string] . identifier[join] ( identifier[entry] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] :])
keyword[return] identifier[strain]
keyword[return] identifier[entry] [ literal[string] ]
keyword[def] identifier[cleanup] ( identifier[strain] ):
identifier[strain] = identifier[strain] . identifier[strip] ()
identifier[strain] = identifier[strain] . identifier[replace] ( literal[string] , literal[string] )
identifier[strain] = identifier[strain] . identifier[replace] ( literal[string] , literal[string] )
identifier[strain] = identifier[strain] . identifier[replace] ( literal[string] , literal[string] )
identifier[strain] = identifier[strain] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[strain]
keyword[return] identifier[cleanup] ( identifier[get_strain] ( identifier[entry] )) | def get_strain_label(entry, viral=False):
"""Try to extract a strain from an assemly summary entry.
First this checks 'infraspecific_name', then 'isolate', then
it tries to get it from 'organism_name'. If all fails, it
falls back to just returning the assembly accesion number.
"""
def get_strain(entry):
strain = entry['infraspecific_name']
if strain != '':
strain = strain.split('=')[-1]
return strain # depends on [control=['if'], data=['strain']]
strain = entry['isolate']
if strain != '':
return strain # depends on [control=['if'], data=['strain']]
if len(entry['organism_name'].split(' ')) > 2 and (not viral):
strain = ' '.join(entry['organism_name'].split(' ')[2:])
return strain # depends on [control=['if'], data=[]]
return entry['assembly_accession']
def cleanup(strain):
strain = strain.strip()
strain = strain.replace(' ', '_')
strain = strain.replace(';', '_')
strain = strain.replace('/', '_')
strain = strain.replace('\\', '_')
return strain
return cleanup(get_strain(entry)) |
def isclosed(self):
"""This function determines if a connected path is closed."""
assert len(self) != 0
assert self.iscontinuous()
return self.start == self.end | def function[isclosed, parameter[self]]:
constant[This function determines if a connected path is closed.]
assert[compare[call[name[len], parameter[name[self]]] not_equal[!=] constant[0]]]
assert[call[name[self].iscontinuous, parameter[]]]
return[compare[name[self].start equal[==] name[self].end]] | keyword[def] identifier[isclosed] ( identifier[self] ):
literal[string]
keyword[assert] identifier[len] ( identifier[self] )!= literal[int]
keyword[assert] identifier[self] . identifier[iscontinuous] ()
keyword[return] identifier[self] . identifier[start] == identifier[self] . identifier[end] | def isclosed(self):
"""This function determines if a connected path is closed."""
assert len(self) != 0
assert self.iscontinuous()
return self.start == self.end |
def encrypt(self, data, *args, **kwargs):
'''
Sign/Encrypt
:param data: Data to encrypt
:param recipients: Single key ID or list of mutiple IDs. Will be ignored if symmetric
:param sign_key: Key for signing data before encryption. No sign will be made when not given
:param passphrase: Password for key or symmetric cipher
:param always_trust: Skip key validation and assume that used keys are always fully trusted
:param output_filename: Encrypted data will be written to this file when not None
:param binary: If false, create ASCII armored output
:param symmetric: Encrypt with symmetric cipher only
:rtype: EncryptResult
'''
return self.encrypt_file(self.create_stream(data), *args, **kwargs) | def function[encrypt, parameter[self, data]]:
constant[
Sign/Encrypt
:param data: Data to encrypt
:param recipients: Single key ID or list of mutiple IDs. Will be ignored if symmetric
:param sign_key: Key for signing data before encryption. No sign will be made when not given
:param passphrase: Password for key or symmetric cipher
:param always_trust: Skip key validation and assume that used keys are always fully trusted
:param output_filename: Encrypted data will be written to this file when not None
:param binary: If false, create ASCII armored output
:param symmetric: Encrypt with symmetric cipher only
:rtype: EncryptResult
]
return[call[name[self].encrypt_file, parameter[call[name[self].create_stream, parameter[name[data]]], <ast.Starred object at 0x7da1b09269e0>]]] | keyword[def] identifier[encrypt] ( identifier[self] , identifier[data] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[encrypt_file] ( identifier[self] . identifier[create_stream] ( identifier[data] ),* identifier[args] ,** identifier[kwargs] ) | def encrypt(self, data, *args, **kwargs):
"""
Sign/Encrypt
:param data: Data to encrypt
:param recipients: Single key ID or list of mutiple IDs. Will be ignored if symmetric
:param sign_key: Key for signing data before encryption. No sign will be made when not given
:param passphrase: Password for key or symmetric cipher
:param always_trust: Skip key validation and assume that used keys are always fully trusted
:param output_filename: Encrypted data will be written to this file when not None
:param binary: If false, create ASCII armored output
:param symmetric: Encrypt with symmetric cipher only
:rtype: EncryptResult
"""
return self.encrypt_file(self.create_stream(data), *args, **kwargs) |
def _add_halfwave(data, events, s_freq, opts):
"""Find the next zero crossing and the intervening peak and add
them to events. If no zero found before max_dur, event is discarded. If
peak-to-peak is smaller than min_ptp, the event is discarded.
Parameters
----------
data : ndarray (dtype='float')
vector with the data
events : ndarray (dtype='int')
N x 3 matrix with start, trough, end samples
s_freq : float
sampling frequency
opts : instance of 'DetectSlowWave'
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
Returns
-------
ndarray (dtype='int')
N x 5 matrix with start, trough, - to + zero crossing, peak,
and end samples
"""
max_dur = opts.duration[1]
if max_dur is None:
max_dur = MAXIMUM_DURATION
window = int(s_freq * max_dur)
peak_and_end = zeros((events.shape[0], 2), dtype='int')
events = concatenate((events, peak_and_end), axis=1)
selected = []
for ev in events:
zero_crossings = where(diff(sign(data[ev[2]:ev[0] + window])))[0]
if zero_crossings.any():
ev[4] = ev[2] + zero_crossings[0] + 1
#lg.info('0cross is at ' + str(ev[4]))
else:
selected.append(False)
#lg.info('no 0cross, rejected')
continue
ev[3] = ev[2] + argmin(data[ev[2]:ev[4]])
if abs(data[ev[1]] - data[ev[3]]) < opts.min_ptp:
selected.append(False)
#lg.info('ptp too low, rejected: ' + str(abs(data[ev[1]] - data[ev[3]])))
continue
selected.append(True)
#lg.info('SW checks out, accepted! ptp is ' + str(abs(data[ev[1]] - data[ev[3]])))
return events[selected, :] | def function[_add_halfwave, parameter[data, events, s_freq, opts]]:
constant[Find the next zero crossing and the intervening peak and add
them to events. If no zero found before max_dur, event is discarded. If
peak-to-peak is smaller than min_ptp, the event is discarded.
Parameters
----------
data : ndarray (dtype='float')
vector with the data
events : ndarray (dtype='int')
N x 3 matrix with start, trough, end samples
s_freq : float
sampling frequency
opts : instance of 'DetectSlowWave'
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
Returns
-------
ndarray (dtype='int')
N x 5 matrix with start, trough, - to + zero crossing, peak,
and end samples
]
variable[max_dur] assign[=] call[name[opts].duration][constant[1]]
if compare[name[max_dur] is constant[None]] begin[:]
variable[max_dur] assign[=] name[MAXIMUM_DURATION]
variable[window] assign[=] call[name[int], parameter[binary_operation[name[s_freq] * name[max_dur]]]]
variable[peak_and_end] assign[=] call[name[zeros], parameter[tuple[[<ast.Subscript object at 0x7da1b0e75d20>, <ast.Constant object at 0x7da1b0e74d90>]]]]
variable[events] assign[=] call[name[concatenate], parameter[tuple[[<ast.Name object at 0x7da1b0e741f0>, <ast.Name object at 0x7da1b0e74580>]]]]
variable[selected] assign[=] list[[]]
for taget[name[ev]] in starred[name[events]] begin[:]
variable[zero_crossings] assign[=] call[call[name[where], parameter[call[name[diff], parameter[call[name[sign], parameter[call[name[data]][<ast.Slice object at 0x7da1b0d55c30>]]]]]]]][constant[0]]
if call[name[zero_crossings].any, parameter[]] begin[:]
call[name[ev]][constant[4]] assign[=] binary_operation[binary_operation[call[name[ev]][constant[2]] + call[name[zero_crossings]][constant[0]]] + constant[1]]
call[name[ev]][constant[3]] assign[=] binary_operation[call[name[ev]][constant[2]] + call[name[argmin], parameter[call[name[data]][<ast.Slice object at 0x7da1b0d54100>]]]]
if compare[call[name[abs], parameter[binary_operation[call[name[data]][call[name[ev]][constant[1]]] - call[name[data]][call[name[ev]][constant[3]]]]]] less[<] name[opts].min_ptp] begin[:]
call[name[selected].append, parameter[constant[False]]]
continue
call[name[selected].append, parameter[constant[True]]]
return[call[name[events]][tuple[[<ast.Name object at 0x7da1b0da0310>, <ast.Slice object at 0x7da1b0da1360>]]]] | keyword[def] identifier[_add_halfwave] ( identifier[data] , identifier[events] , identifier[s_freq] , identifier[opts] ):
literal[string]
identifier[max_dur] = identifier[opts] . identifier[duration] [ literal[int] ]
keyword[if] identifier[max_dur] keyword[is] keyword[None] :
identifier[max_dur] = identifier[MAXIMUM_DURATION]
identifier[window] = identifier[int] ( identifier[s_freq] * identifier[max_dur] )
identifier[peak_and_end] = identifier[zeros] (( identifier[events] . identifier[shape] [ literal[int] ], literal[int] ), identifier[dtype] = literal[string] )
identifier[events] = identifier[concatenate] (( identifier[events] , identifier[peak_and_end] ), identifier[axis] = literal[int] )
identifier[selected] =[]
keyword[for] identifier[ev] keyword[in] identifier[events] :
identifier[zero_crossings] = identifier[where] ( identifier[diff] ( identifier[sign] ( identifier[data] [ identifier[ev] [ literal[int] ]: identifier[ev] [ literal[int] ]+ identifier[window] ])))[ literal[int] ]
keyword[if] identifier[zero_crossings] . identifier[any] ():
identifier[ev] [ literal[int] ]= identifier[ev] [ literal[int] ]+ identifier[zero_crossings] [ literal[int] ]+ literal[int]
keyword[else] :
identifier[selected] . identifier[append] ( keyword[False] )
keyword[continue]
identifier[ev] [ literal[int] ]= identifier[ev] [ literal[int] ]+ identifier[argmin] ( identifier[data] [ identifier[ev] [ literal[int] ]: identifier[ev] [ literal[int] ]])
keyword[if] identifier[abs] ( identifier[data] [ identifier[ev] [ literal[int] ]]- identifier[data] [ identifier[ev] [ literal[int] ]])< identifier[opts] . identifier[min_ptp] :
identifier[selected] . identifier[append] ( keyword[False] )
keyword[continue]
identifier[selected] . identifier[append] ( keyword[True] )
keyword[return] identifier[events] [ identifier[selected] ,:] | def _add_halfwave(data, events, s_freq, opts):
"""Find the next zero crossing and the intervening peak and add
them to events. If no zero found before max_dur, event is discarded. If
peak-to-peak is smaller than min_ptp, the event is discarded.
Parameters
----------
data : ndarray (dtype='float')
vector with the data
events : ndarray (dtype='int')
N x 3 matrix with start, trough, end samples
s_freq : float
sampling frequency
opts : instance of 'DetectSlowWave'
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
Returns
-------
ndarray (dtype='int')
N x 5 matrix with start, trough, - to + zero crossing, peak,
and end samples
"""
max_dur = opts.duration[1]
if max_dur is None:
max_dur = MAXIMUM_DURATION # depends on [control=['if'], data=['max_dur']]
window = int(s_freq * max_dur)
peak_and_end = zeros((events.shape[0], 2), dtype='int')
events = concatenate((events, peak_and_end), axis=1)
selected = []
for ev in events:
zero_crossings = where(diff(sign(data[ev[2]:ev[0] + window])))[0]
if zero_crossings.any():
ev[4] = ev[2] + zero_crossings[0] + 1 # depends on [control=['if'], data=[]]
else:
#lg.info('0cross is at ' + str(ev[4]))
selected.append(False)
#lg.info('no 0cross, rejected')
continue
ev[3] = ev[2] + argmin(data[ev[2]:ev[4]])
if abs(data[ev[1]] - data[ev[3]]) < opts.min_ptp:
selected.append(False)
#lg.info('ptp too low, rejected: ' + str(abs(data[ev[1]] - data[ev[3]])))
continue # depends on [control=['if'], data=[]]
selected.append(True) # depends on [control=['for'], data=['ev']]
#lg.info('SW checks out, accepted! ptp is ' + str(abs(data[ev[1]] - data[ev[3]])))
return events[selected, :] |
def mquery(self, lpAddress):
"""
Query memory information from the address space of the process.
Returns a L{win32.MemoryBasicInformation} object.
@see: U{http://msdn.microsoft.com/en-us/library/aa366907(VS.85).aspx}
@type lpAddress: int
@param lpAddress: Address of memory to query.
@rtype: L{win32.MemoryBasicInformation}
@return: Memory region information.
@raise WindowsError: On error an exception is raised.
"""
hProcess = self.get_handle(win32.PROCESS_QUERY_INFORMATION)
return win32.VirtualQueryEx(hProcess, lpAddress) | def function[mquery, parameter[self, lpAddress]]:
constant[
Query memory information from the address space of the process.
Returns a L{win32.MemoryBasicInformation} object.
@see: U{http://msdn.microsoft.com/en-us/library/aa366907(VS.85).aspx}
@type lpAddress: int
@param lpAddress: Address of memory to query.
@rtype: L{win32.MemoryBasicInformation}
@return: Memory region information.
@raise WindowsError: On error an exception is raised.
]
variable[hProcess] assign[=] call[name[self].get_handle, parameter[name[win32].PROCESS_QUERY_INFORMATION]]
return[call[name[win32].VirtualQueryEx, parameter[name[hProcess], name[lpAddress]]]] | keyword[def] identifier[mquery] ( identifier[self] , identifier[lpAddress] ):
literal[string]
identifier[hProcess] = identifier[self] . identifier[get_handle] ( identifier[win32] . identifier[PROCESS_QUERY_INFORMATION] )
keyword[return] identifier[win32] . identifier[VirtualQueryEx] ( identifier[hProcess] , identifier[lpAddress] ) | def mquery(self, lpAddress):
"""
Query memory information from the address space of the process.
Returns a L{win32.MemoryBasicInformation} object.
@see: U{http://msdn.microsoft.com/en-us/library/aa366907(VS.85).aspx}
@type lpAddress: int
@param lpAddress: Address of memory to query.
@rtype: L{win32.MemoryBasicInformation}
@return: Memory region information.
@raise WindowsError: On error an exception is raised.
"""
hProcess = self.get_handle(win32.PROCESS_QUERY_INFORMATION)
return win32.VirtualQueryEx(hProcess, lpAddress) |
def p_pvar_inst_def(self, p):
'''pvar_inst_def : IDENT LPAREN lconst_list RPAREN SEMI
| IDENT SEMI
| NOT IDENT LPAREN lconst_list RPAREN SEMI
| NOT IDENT SEMI
| IDENT LPAREN lconst_list RPAREN ASSIGN_EQUAL range_const SEMI
| IDENT ASSIGN_EQUAL range_const SEMI'''
if len(p) == 6:
p[0] = ((p[1], p[3]), True)
elif len(p) == 3:
p[0] = ((p[1], None), True)
elif len(p) == 7:
p[0] = ((p[2], p[4]), False)
elif len(p) == 4:
p[0] = ((p[2], None), False)
elif len(p) == 8:
p[0] = ((p[1], p[3]), p[6])
elif len(p) == 5:
p[0] = ((p[1], None), p[3]) | def function[p_pvar_inst_def, parameter[self, p]]:
constant[pvar_inst_def : IDENT LPAREN lconst_list RPAREN SEMI
| IDENT SEMI
| NOT IDENT LPAREN lconst_list RPAREN SEMI
| NOT IDENT SEMI
| IDENT LPAREN lconst_list RPAREN ASSIGN_EQUAL range_const SEMI
| IDENT ASSIGN_EQUAL range_const SEMI]
if compare[call[name[len], parameter[name[p]]] equal[==] constant[6]] begin[:]
call[name[p]][constant[0]] assign[=] tuple[[<ast.Tuple object at 0x7da1b09152d0>, <ast.Constant object at 0x7da1b0917070>]] | keyword[def] identifier[p_pvar_inst_def] ( identifier[self] , identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]=(( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]), keyword[True] )
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]=(( identifier[p] [ literal[int] ], keyword[None] ), keyword[True] )
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]=(( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]), keyword[False] )
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]=(( identifier[p] [ literal[int] ], keyword[None] ), keyword[False] )
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]=(( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]), identifier[p] [ literal[int] ])
keyword[elif] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]=(( identifier[p] [ literal[int] ], keyword[None] ), identifier[p] [ literal[int] ]) | def p_pvar_inst_def(self, p):
"""pvar_inst_def : IDENT LPAREN lconst_list RPAREN SEMI
| IDENT SEMI
| NOT IDENT LPAREN lconst_list RPAREN SEMI
| NOT IDENT SEMI
| IDENT LPAREN lconst_list RPAREN ASSIGN_EQUAL range_const SEMI
| IDENT ASSIGN_EQUAL range_const SEMI"""
if len(p) == 6:
p[0] = ((p[1], p[3]), True) # depends on [control=['if'], data=[]]
elif len(p) == 3:
p[0] = ((p[1], None), True) # depends on [control=['if'], data=[]]
elif len(p) == 7:
p[0] = ((p[2], p[4]), False) # depends on [control=['if'], data=[]]
elif len(p) == 4:
p[0] = ((p[2], None), False) # depends on [control=['if'], data=[]]
elif len(p) == 8:
p[0] = ((p[1], p[3]), p[6]) # depends on [control=['if'], data=[]]
elif len(p) == 5:
p[0] = ((p[1], None), p[3]) # depends on [control=['if'], data=[]] |
def generateUnabridgedAPI(self):
'''
Generates the unabridged (full) API listing into ``self.unabridged_api_file``.
This is necessary as some items may not show up in either hierarchy view,
depending on:
1. The item. For example, if a namespace has only one member which is a
variable, then neither the namespace nor the variable will be declared in the
class view hierarchy. It will be present in the file page it was declared in
but not on the main library page.
2. The configurations of Doxygen. For example, see the warning in
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`. Items whose parents cannot
be rediscovered withouth the programlisting will still be documented, their
link appearing in the unabridged API listing.
Currently, the API is generated in the following (somewhat arbitrary) order:
- Namespaces
- Classes and Structs
- Enums
- Unions
- Functions
- Variables
- Defines
- Typedefs
- Directories
- Files
If you want to change the ordering, just change the order of the calls to
:func:`~exhale.graph.ExhaleRoot.enumerateAll` in this method.
'''
####flake8fail
# TODO: I've reverted my decision, the full API should include everything,
# including nested types. the code below invalidates certain portions of the
# docs and probably gets rid of a need for the recursive find methods
all_namespaces = []
all_class_like = []
all_enums = []
all_unions = []
all_functions = []
all_variables = []
all_defines = []
all_typedefs = []
all_dirs = []
all_files = []
for node in self.all_nodes:
if node.kind == "namespace":
all_namespaces.append(node)
elif node.kind == "class" or node.kind == "struct":
all_class_like.append(node)
elif node.kind == "enum":
all_enums.append(node)
elif node.kind == "union":
all_unions.append(node)
elif node.kind == "function":
all_functions.append(node)
elif node.kind == "variable":
all_variables.append(node)
elif node.kind == "define":
all_defines.append(node)
elif node.kind == "typedef":
all_typedefs.append(node)
elif node.kind == "dir":
all_dirs.append(node)
elif node.kind == "file":
all_files.append(node)
try:
with codecs.open(self.unabridged_api_file, "w", "utf-8") as full_api_file:
# write the header
full_api_file.write(textwrap.dedent('''
{heading}
{heading_mark}
'''.format(
heading=configs.fullApiSubSectionTitle,
heading_mark=utils.heading_mark(
configs.fullApiSubSectionTitle,
configs.SUB_SECTION_HEADING_CHAR
)
)))
# write everything to file: reorder these lines for different outcomes
self.enumerateAll( "Namespaces", all_namespaces, full_api_file)
self.enumerateAll("Classes and Structs", all_class_like, full_api_file)
self.enumerateAll( "Enums", all_enums, full_api_file)
self.enumerateAll( "Unions", all_unions, full_api_file)
self.enumerateAll( "Functions", all_functions, full_api_file)
self.enumerateAll( "Variables", all_variables, full_api_file)
self.enumerateAll( "Defines", all_defines, full_api_file)
self.enumerateAll( "Typedefs", all_typedefs, full_api_file)
self.enumerateAll( "Directories", all_dirs, full_api_file)
self.enumerateAll( "Files", all_files, full_api_file)
except:
utils.fancyError("Error writing the unabridged API.") | def function[generateUnabridgedAPI, parameter[self]]:
constant[
Generates the unabridged (full) API listing into ``self.unabridged_api_file``.
This is necessary as some items may not show up in either hierarchy view,
depending on:
1. The item. For example, if a namespace has only one member which is a
variable, then neither the namespace nor the variable will be declared in the
class view hierarchy. It will be present in the file page it was declared in
but not on the main library page.
2. The configurations of Doxygen. For example, see the warning in
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`. Items whose parents cannot
be rediscovered withouth the programlisting will still be documented, their
link appearing in the unabridged API listing.
Currently, the API is generated in the following (somewhat arbitrary) order:
- Namespaces
- Classes and Structs
- Enums
- Unions
- Functions
- Variables
- Defines
- Typedefs
- Directories
- Files
If you want to change the ordering, just change the order of the calls to
:func:`~exhale.graph.ExhaleRoot.enumerateAll` in this method.
]
variable[all_namespaces] assign[=] list[[]]
variable[all_class_like] assign[=] list[[]]
variable[all_enums] assign[=] list[[]]
variable[all_unions] assign[=] list[[]]
variable[all_functions] assign[=] list[[]]
variable[all_variables] assign[=] list[[]]
variable[all_defines] assign[=] list[[]]
variable[all_typedefs] assign[=] list[[]]
variable[all_dirs] assign[=] list[[]]
variable[all_files] assign[=] list[[]]
for taget[name[node]] in starred[name[self].all_nodes] begin[:]
if compare[name[node].kind equal[==] constant[namespace]] begin[:]
call[name[all_namespaces].append, parameter[name[node]]]
<ast.Try object at 0x7da1b068ecb0> | keyword[def] identifier[generateUnabridgedAPI] ( identifier[self] ):
literal[string]
identifier[all_namespaces] =[]
identifier[all_class_like] =[]
identifier[all_enums] =[]
identifier[all_unions] =[]
identifier[all_functions] =[]
identifier[all_variables] =[]
identifier[all_defines] =[]
identifier[all_typedefs] =[]
identifier[all_dirs] =[]
identifier[all_files] =[]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[all_nodes] :
keyword[if] identifier[node] . identifier[kind] == literal[string] :
identifier[all_namespaces] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] keyword[or] identifier[node] . identifier[kind] == literal[string] :
identifier[all_class_like] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] :
identifier[all_enums] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] :
identifier[all_unions] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] :
identifier[all_functions] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] :
identifier[all_variables] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] :
identifier[all_defines] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] :
identifier[all_typedefs] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] :
identifier[all_dirs] . identifier[append] ( identifier[node] )
keyword[elif] identifier[node] . identifier[kind] == literal[string] :
identifier[all_files] . identifier[append] ( identifier[node] )
keyword[try] :
keyword[with] identifier[codecs] . identifier[open] ( identifier[self] . identifier[unabridged_api_file] , literal[string] , literal[string] ) keyword[as] identifier[full_api_file] :
identifier[full_api_file] . identifier[write] ( identifier[textwrap] . identifier[dedent] ( literal[string] . identifier[format] (
identifier[heading] = identifier[configs] . identifier[fullApiSubSectionTitle] ,
identifier[heading_mark] = identifier[utils] . identifier[heading_mark] (
identifier[configs] . identifier[fullApiSubSectionTitle] ,
identifier[configs] . identifier[SUB_SECTION_HEADING_CHAR]
)
)))
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_namespaces] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_class_like] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_enums] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_unions] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_functions] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_variables] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_defines] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_typedefs] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_dirs] , identifier[full_api_file] )
identifier[self] . identifier[enumerateAll] ( literal[string] , identifier[all_files] , identifier[full_api_file] )
keyword[except] :
identifier[utils] . identifier[fancyError] ( literal[string] ) | def generateUnabridgedAPI(self):
"""
Generates the unabridged (full) API listing into ``self.unabridged_api_file``.
This is necessary as some items may not show up in either hierarchy view,
depending on:
1. The item. For example, if a namespace has only one member which is a
variable, then neither the namespace nor the variable will be declared in the
class view hierarchy. It will be present in the file page it was declared in
but not on the main library page.
2. The configurations of Doxygen. For example, see the warning in
:func:`~exhale.graph.ExhaleRoot.fileRefDiscovery`. Items whose parents cannot
be rediscovered withouth the programlisting will still be documented, their
link appearing in the unabridged API listing.
Currently, the API is generated in the following (somewhat arbitrary) order:
- Namespaces
- Classes and Structs
- Enums
- Unions
- Functions
- Variables
- Defines
- Typedefs
- Directories
- Files
If you want to change the ordering, just change the order of the calls to
:func:`~exhale.graph.ExhaleRoot.enumerateAll` in this method.
"""
####flake8fail
# TODO: I've reverted my decision, the full API should include everything,
# including nested types. the code below invalidates certain portions of the
# docs and probably gets rid of a need for the recursive find methods
all_namespaces = []
all_class_like = []
all_enums = []
all_unions = []
all_functions = []
all_variables = []
all_defines = []
all_typedefs = []
all_dirs = []
all_files = []
for node in self.all_nodes:
if node.kind == 'namespace':
all_namespaces.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'class' or node.kind == 'struct':
all_class_like.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'enum':
all_enums.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'union':
all_unions.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'function':
all_functions.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'variable':
all_variables.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'define':
all_defines.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'typedef':
all_typedefs.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'dir':
all_dirs.append(node) # depends on [control=['if'], data=[]]
elif node.kind == 'file':
all_files.append(node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
try:
with codecs.open(self.unabridged_api_file, 'w', 'utf-8') as full_api_file:
# write the header
full_api_file.write(textwrap.dedent('\n {heading}\n {heading_mark}\n\n '.format(heading=configs.fullApiSubSectionTitle, heading_mark=utils.heading_mark(configs.fullApiSubSectionTitle, configs.SUB_SECTION_HEADING_CHAR))))
# write everything to file: reorder these lines for different outcomes
self.enumerateAll('Namespaces', all_namespaces, full_api_file)
self.enumerateAll('Classes and Structs', all_class_like, full_api_file)
self.enumerateAll('Enums', all_enums, full_api_file)
self.enumerateAll('Unions', all_unions, full_api_file)
self.enumerateAll('Functions', all_functions, full_api_file)
self.enumerateAll('Variables', all_variables, full_api_file)
self.enumerateAll('Defines', all_defines, full_api_file)
self.enumerateAll('Typedefs', all_typedefs, full_api_file)
self.enumerateAll('Directories', all_dirs, full_api_file)
self.enumerateAll('Files', all_files, full_api_file) # depends on [control=['with'], data=['full_api_file']] # depends on [control=['try'], data=[]]
except:
utils.fancyError('Error writing the unabridged API.') # depends on [control=['except'], data=[]] |
def _other_wrapper(self, name, writing):
"""Wrap a stream attribute in an other_wrapper.
Args:
name: the name of the stream attribute to wrap.
Returns:
other_wrapper which is described below.
"""
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
"""Wrap all other calls to the stream Object.
We do this to track changes to the write pointer. Anything that
moves the write pointer in a file open for appending should move
the read pointer as well.
Args:
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
Wrapped stream object method.
"""
write_seek = self._io.tell()
ret_value = io_attr(*args, **kwargs)
if write_seek != self._io.tell():
self._read_seek = self._io.tell()
self._read_whence = 0
if not writing or not IS_PY2:
return ret_value
return other_wrapper | def function[_other_wrapper, parameter[self, name, writing]]:
constant[Wrap a stream attribute in an other_wrapper.
Args:
name: the name of the stream attribute to wrap.
Returns:
other_wrapper which is described below.
]
variable[io_attr] assign[=] call[name[getattr], parameter[name[self]._io, name[name]]]
def function[other_wrapper, parameter[]]:
constant[Wrap all other calls to the stream Object.
We do this to track changes to the write pointer. Anything that
moves the write pointer in a file open for appending should move
the read pointer as well.
Args:
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
Wrapped stream object method.
]
variable[write_seek] assign[=] call[name[self]._io.tell, parameter[]]
variable[ret_value] assign[=] call[name[io_attr], parameter[<ast.Starred object at 0x7da18ede7190>]]
if compare[name[write_seek] not_equal[!=] call[name[self]._io.tell, parameter[]]] begin[:]
name[self]._read_seek assign[=] call[name[self]._io.tell, parameter[]]
name[self]._read_whence assign[=] constant[0]
if <ast.BoolOp object at 0x7da18ede6590> begin[:]
return[name[ret_value]]
return[name[other_wrapper]] | keyword[def] identifier[_other_wrapper] ( identifier[self] , identifier[name] , identifier[writing] ):
literal[string]
identifier[io_attr] = identifier[getattr] ( identifier[self] . identifier[_io] , identifier[name] )
keyword[def] identifier[other_wrapper] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[write_seek] = identifier[self] . identifier[_io] . identifier[tell] ()
identifier[ret_value] = identifier[io_attr] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[write_seek] != identifier[self] . identifier[_io] . identifier[tell] ():
identifier[self] . identifier[_read_seek] = identifier[self] . identifier[_io] . identifier[tell] ()
identifier[self] . identifier[_read_whence] = literal[int]
keyword[if] keyword[not] identifier[writing] keyword[or] keyword[not] identifier[IS_PY2] :
keyword[return] identifier[ret_value]
keyword[return] identifier[other_wrapper] | def _other_wrapper(self, name, writing):
"""Wrap a stream attribute in an other_wrapper.
Args:
name: the name of the stream attribute to wrap.
Returns:
other_wrapper which is described below.
"""
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
"""Wrap all other calls to the stream Object.
We do this to track changes to the write pointer. Anything that
moves the write pointer in a file open for appending should move
the read pointer as well.
Args:
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
Wrapped stream object method.
"""
write_seek = self._io.tell()
ret_value = io_attr(*args, **kwargs)
if write_seek != self._io.tell():
self._read_seek = self._io.tell()
self._read_whence = 0 # depends on [control=['if'], data=[]]
if not writing or not IS_PY2:
return ret_value # depends on [control=['if'], data=[]]
return other_wrapper |
def hash_blocks(text, hashes):
"""Hashes HTML block tags.
PARAMETERS:
text -- str; Markdown text
hashes -- dict; a dictionary of all hashes, where keys are hashes
and values are their unhashed versions.
When HTML block tags are used, all content inside the tags is
preserved as-is, without any Markdown processing. See block_tags
for a list of block tags.
"""
def sub(match):
block = match.group(1)
hashed = hash_text(block, 'block')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_block.sub(sub, text) | def function[hash_blocks, parameter[text, hashes]]:
constant[Hashes HTML block tags.
PARAMETERS:
text -- str; Markdown text
hashes -- dict; a dictionary of all hashes, where keys are hashes
and values are their unhashed versions.
When HTML block tags are used, all content inside the tags is
preserved as-is, without any Markdown processing. See block_tags
for a list of block tags.
]
def function[sub, parameter[match]]:
variable[block] assign[=] call[name[match].group, parameter[constant[1]]]
variable[hashed] assign[=] call[name[hash_text], parameter[name[block], constant[block]]]
call[name[hashes]][name[hashed]] assign[=] name[block]
return[binary_operation[binary_operation[constant[
] + name[hashed]] + constant[
]]]
return[call[name[re_block].sub, parameter[name[sub], name[text]]]] | keyword[def] identifier[hash_blocks] ( identifier[text] , identifier[hashes] ):
literal[string]
keyword[def] identifier[sub] ( identifier[match] ):
identifier[block] = identifier[match] . identifier[group] ( literal[int] )
identifier[hashed] = identifier[hash_text] ( identifier[block] , literal[string] )
identifier[hashes] [ identifier[hashed] ]= identifier[block]
keyword[return] literal[string] + identifier[hashed] + literal[string]
keyword[return] identifier[re_block] . identifier[sub] ( identifier[sub] , identifier[text] ) | def hash_blocks(text, hashes):
"""Hashes HTML block tags.
PARAMETERS:
text -- str; Markdown text
hashes -- dict; a dictionary of all hashes, where keys are hashes
and values are their unhashed versions.
When HTML block tags are used, all content inside the tags is
preserved as-is, without any Markdown processing. See block_tags
for a list of block tags.
"""
def sub(match):
block = match.group(1)
hashed = hash_text(block, 'block')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_block.sub(sub, text) |
def get_fields(schema, exclude_dump_only=False):
"""Return fields from schema
:param Schema schema: A marshmallow Schema instance or a class object
:param bool exclude_dump_only: whether to filter fields in Meta.dump_only
:rtype: dict, of field name field object pairs
"""
if hasattr(schema, "fields"):
fields = schema.fields
elif hasattr(schema, "_declared_fields"):
fields = copy.deepcopy(schema._declared_fields)
else:
raise ValueError(
"{!r} doesn't have either `fields` or `_declared_fields`.".format(schema)
)
Meta = getattr(schema, "Meta", None)
warn_if_fields_defined_in_meta(fields, Meta)
return filter_excluded_fields(fields, Meta, exclude_dump_only) | def function[get_fields, parameter[schema, exclude_dump_only]]:
constant[Return fields from schema
:param Schema schema: A marshmallow Schema instance or a class object
:param bool exclude_dump_only: whether to filter fields in Meta.dump_only
:rtype: dict, of field name field object pairs
]
if call[name[hasattr], parameter[name[schema], constant[fields]]] begin[:]
variable[fields] assign[=] name[schema].fields
variable[Meta] assign[=] call[name[getattr], parameter[name[schema], constant[Meta], constant[None]]]
call[name[warn_if_fields_defined_in_meta], parameter[name[fields], name[Meta]]]
return[call[name[filter_excluded_fields], parameter[name[fields], name[Meta], name[exclude_dump_only]]]] | keyword[def] identifier[get_fields] ( identifier[schema] , identifier[exclude_dump_only] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[schema] , literal[string] ):
identifier[fields] = identifier[schema] . identifier[fields]
keyword[elif] identifier[hasattr] ( identifier[schema] , literal[string] ):
identifier[fields] = identifier[copy] . identifier[deepcopy] ( identifier[schema] . identifier[_declared_fields] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[schema] )
)
identifier[Meta] = identifier[getattr] ( identifier[schema] , literal[string] , keyword[None] )
identifier[warn_if_fields_defined_in_meta] ( identifier[fields] , identifier[Meta] )
keyword[return] identifier[filter_excluded_fields] ( identifier[fields] , identifier[Meta] , identifier[exclude_dump_only] ) | def get_fields(schema, exclude_dump_only=False):
"""Return fields from schema
:param Schema schema: A marshmallow Schema instance or a class object
:param bool exclude_dump_only: whether to filter fields in Meta.dump_only
:rtype: dict, of field name field object pairs
"""
if hasattr(schema, 'fields'):
fields = schema.fields # depends on [control=['if'], data=[]]
elif hasattr(schema, '_declared_fields'):
fields = copy.deepcopy(schema._declared_fields) # depends on [control=['if'], data=[]]
else:
raise ValueError("{!r} doesn't have either `fields` or `_declared_fields`.".format(schema))
Meta = getattr(schema, 'Meta', None)
warn_if_fields_defined_in_meta(fields, Meta)
return filter_excluded_fields(fields, Meta, exclude_dump_only) |
def clip_foreign(network):
"""
Delete all components and timelines located outside of Germany.
Add transborder flows divided by country of origin as
network.foreign_trade.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA
"""
# get foreign buses by country
foreign_buses = network.buses[network.buses.country_code != 'DE']
network.buses = network.buses.drop(
network.buses.loc[foreign_buses.index].index)
# identify transborder lines (one bus foreign, one bus not) and the country
# it is coming from
"""transborder_lines = pd.DataFrame(index=network.lines[
((network.lines['bus0'].isin(network.buses.index) == False) &
(network.lines['bus1'].isin(network.buses.index) == True)) |
((network.lines['bus0'].isin(network.buses.index) == True) &
(network.lines['bus1'].isin(network.buses.index) == False))].index)
transborder_lines['bus0'] = network.lines['bus0']
transborder_lines['bus1'] = network.lines['bus1']
transborder_lines['country'] = ""
for i in range(0, len(transborder_lines)):
if transborder_lines.iloc[i, 0] in foreign_buses.index:
transborder_lines['country'][i] = foreign_buses[str(
transborder_lines.iloc[i, 0])]
else:
transborder_lines['country'][i] = foreign_buses[str(
transborder_lines.iloc[i, 1])]
# identify amount of flows per line and group to get flow per country
transborder_flows = network.lines_t.p0[transborder_lines.index]
for i in transborder_flows.columns:
if network.lines.loc[str(i)]['bus1'] in foreign_buses.index:
transborder_flows.loc[:, str(
i)] = transborder_flows.loc[:, str(i)]*-1
network.foreign_trade = transborder_flows.\
groupby(transborder_lines['country'], axis=1).sum()"""
# drop foreign components
network.lines = network.lines.drop(network.lines[
(network.lines['bus0'].isin(network.buses.index) == False) |
(network.lines['bus1'].isin(network.buses.index) == False)].index)
network.links = network.links.drop(network.links[
(network.links['bus0'].isin(network.buses.index) == False) |
(network.links['bus1'].isin(network.buses.index) == False)].index)
network.transformers = network.transformers.drop(network.transformers[
(network.transformers['bus0'].isin(network.buses.index) == False) |
(network.transformers['bus1'].isin(network.
buses.index) == False)].index)
network.generators = network.generators.drop(network.generators[
(network.generators['bus'].isin(network.buses.index) == False)].index)
network.loads = network.loads.drop(network.loads[
(network.loads['bus'].isin(network.buses.index) == False)].index)
network.storage_units = network.storage_units.drop(network.storage_units[
(network.storage_units['bus'].isin(network.
buses.index) == False)].index)
components = ['loads', 'generators', 'lines', 'buses', 'transformers',
'links']
for g in components: # loads_t
h = g + '_t'
nw = getattr(network, h) # network.loads_t
for i in nw.keys(): # network.loads_t.p
cols = [j for j in getattr(
nw, i).columns if j not in getattr(network, g).index]
for k in cols:
del getattr(nw, i)[k]
return network | def function[clip_foreign, parameter[network]]:
constant[
Delete all components and timelines located outside of Germany.
Add transborder flows divided by country of origin as
network.foreign_trade.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA
]
variable[foreign_buses] assign[=] call[name[network].buses][compare[name[network].buses.country_code not_equal[!=] constant[DE]]]
name[network].buses assign[=] call[name[network].buses.drop, parameter[call[name[network].buses.loc][name[foreign_buses].index].index]]
constant[transborder_lines = pd.DataFrame(index=network.lines[
((network.lines['bus0'].isin(network.buses.index) == False) &
(network.lines['bus1'].isin(network.buses.index) == True)) |
((network.lines['bus0'].isin(network.buses.index) == True) &
(network.lines['bus1'].isin(network.buses.index) == False))].index)
transborder_lines['bus0'] = network.lines['bus0']
transborder_lines['bus1'] = network.lines['bus1']
transborder_lines['country'] = ""
for i in range(0, len(transborder_lines)):
if transborder_lines.iloc[i, 0] in foreign_buses.index:
transborder_lines['country'][i] = foreign_buses[str(
transborder_lines.iloc[i, 0])]
else:
transborder_lines['country'][i] = foreign_buses[str(
transborder_lines.iloc[i, 1])]
# identify amount of flows per line and group to get flow per country
transborder_flows = network.lines_t.p0[transborder_lines.index]
for i in transborder_flows.columns:
if network.lines.loc[str(i)]['bus1'] in foreign_buses.index:
transborder_flows.loc[:, str(
i)] = transborder_flows.loc[:, str(i)]*-1
network.foreign_trade = transborder_flows. groupby(transborder_lines['country'], axis=1).sum()]
name[network].lines assign[=] call[name[network].lines.drop, parameter[call[name[network].lines][binary_operation[compare[call[call[name[network].lines][constant[bus0]].isin, parameter[name[network].buses.index]] equal[==] constant[False]] <ast.BitOr object at 0x7da2590d6aa0> compare[call[call[name[network].lines][constant[bus1]].isin, parameter[name[network].buses.index]] equal[==] constant[False]]]].index]]
name[network].links assign[=] call[name[network].links.drop, parameter[call[name[network].links][binary_operation[compare[call[call[name[network].links][constant[bus0]].isin, parameter[name[network].buses.index]] equal[==] constant[False]] <ast.BitOr object at 0x7da2590d6aa0> compare[call[call[name[network].links][constant[bus1]].isin, parameter[name[network].buses.index]] equal[==] constant[False]]]].index]]
name[network].transformers assign[=] call[name[network].transformers.drop, parameter[call[name[network].transformers][binary_operation[compare[call[call[name[network].transformers][constant[bus0]].isin, parameter[name[network].buses.index]] equal[==] constant[False]] <ast.BitOr object at 0x7da2590d6aa0> compare[call[call[name[network].transformers][constant[bus1]].isin, parameter[name[network].buses.index]] equal[==] constant[False]]]].index]]
name[network].generators assign[=] call[name[network].generators.drop, parameter[call[name[network].generators][compare[call[call[name[network].generators][constant[bus]].isin, parameter[name[network].buses.index]] equal[==] constant[False]]].index]]
name[network].loads assign[=] call[name[network].loads.drop, parameter[call[name[network].loads][compare[call[call[name[network].loads][constant[bus]].isin, parameter[name[network].buses.index]] equal[==] constant[False]]].index]]
name[network].storage_units assign[=] call[name[network].storage_units.drop, parameter[call[name[network].storage_units][compare[call[call[name[network].storage_units][constant[bus]].isin, parameter[name[network].buses.index]] equal[==] constant[False]]].index]]
variable[components] assign[=] list[[<ast.Constant object at 0x7da2041dbdc0>, <ast.Constant object at 0x7da2041da260>, <ast.Constant object at 0x7da2041d9e40>, <ast.Constant object at 0x7da2041d9450>, <ast.Constant object at 0x7da2041daa40>, <ast.Constant object at 0x7da2041da800>]]
for taget[name[g]] in starred[name[components]] begin[:]
variable[h] assign[=] binary_operation[name[g] + constant[_t]]
variable[nw] assign[=] call[name[getattr], parameter[name[network], name[h]]]
for taget[name[i]] in starred[call[name[nw].keys, parameter[]]] begin[:]
variable[cols] assign[=] <ast.ListComp object at 0x7da2041d83d0>
for taget[name[k]] in starred[name[cols]] begin[:]
<ast.Delete object at 0x7da2041dbc10>
return[name[network]] | keyword[def] identifier[clip_foreign] ( identifier[network] ):
literal[string]
identifier[foreign_buses] = identifier[network] . identifier[buses] [ identifier[network] . identifier[buses] . identifier[country_code] != literal[string] ]
identifier[network] . identifier[buses] = identifier[network] . identifier[buses] . identifier[drop] (
identifier[network] . identifier[buses] . identifier[loc] [ identifier[foreign_buses] . identifier[index] ]. identifier[index] )
literal[string]
identifier[network] . identifier[lines] = identifier[network] . identifier[lines] . identifier[drop] ( identifier[network] . identifier[lines] [
( identifier[network] . identifier[lines] [ literal[string] ]. identifier[isin] ( identifier[network] . identifier[buses] . identifier[index] )== keyword[False] )|
( identifier[network] . identifier[lines] [ literal[string] ]. identifier[isin] ( identifier[network] . identifier[buses] . identifier[index] )== keyword[False] )]. identifier[index] )
identifier[network] . identifier[links] = identifier[network] . identifier[links] . identifier[drop] ( identifier[network] . identifier[links] [
( identifier[network] . identifier[links] [ literal[string] ]. identifier[isin] ( identifier[network] . identifier[buses] . identifier[index] )== keyword[False] )|
( identifier[network] . identifier[links] [ literal[string] ]. identifier[isin] ( identifier[network] . identifier[buses] . identifier[index] )== keyword[False] )]. identifier[index] )
identifier[network] . identifier[transformers] = identifier[network] . identifier[transformers] . identifier[drop] ( identifier[network] . identifier[transformers] [
( identifier[network] . identifier[transformers] [ literal[string] ]. identifier[isin] ( identifier[network] . identifier[buses] . identifier[index] )== keyword[False] )|
( identifier[network] . identifier[transformers] [ literal[string] ]. identifier[isin] ( identifier[network] .
identifier[buses] . identifier[index] )== keyword[False] )]. identifier[index] )
identifier[network] . identifier[generators] = identifier[network] . identifier[generators] . identifier[drop] ( identifier[network] . identifier[generators] [
( identifier[network] . identifier[generators] [ literal[string] ]. identifier[isin] ( identifier[network] . identifier[buses] . identifier[index] )== keyword[False] )]. identifier[index] )
identifier[network] . identifier[loads] = identifier[network] . identifier[loads] . identifier[drop] ( identifier[network] . identifier[loads] [
( identifier[network] . identifier[loads] [ literal[string] ]. identifier[isin] ( identifier[network] . identifier[buses] . identifier[index] )== keyword[False] )]. identifier[index] )
identifier[network] . identifier[storage_units] = identifier[network] . identifier[storage_units] . identifier[drop] ( identifier[network] . identifier[storage_units] [
( identifier[network] . identifier[storage_units] [ literal[string] ]. identifier[isin] ( identifier[network] .
identifier[buses] . identifier[index] )== keyword[False] )]. identifier[index] )
identifier[components] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ]
keyword[for] identifier[g] keyword[in] identifier[components] :
identifier[h] = identifier[g] + literal[string]
identifier[nw] = identifier[getattr] ( identifier[network] , identifier[h] )
keyword[for] identifier[i] keyword[in] identifier[nw] . identifier[keys] ():
identifier[cols] =[ identifier[j] keyword[for] identifier[j] keyword[in] identifier[getattr] (
identifier[nw] , identifier[i] ). identifier[columns] keyword[if] identifier[j] keyword[not] keyword[in] identifier[getattr] ( identifier[network] , identifier[g] ). identifier[index] ]
keyword[for] identifier[k] keyword[in] identifier[cols] :
keyword[del] identifier[getattr] ( identifier[nw] , identifier[i] )[ identifier[k] ]
keyword[return] identifier[network] | def clip_foreign(network):
"""
Delete all components and timelines located outside of Germany.
Add transborder flows divided by country of origin as
network.foreign_trade.
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA
"""
# get foreign buses by country
foreign_buses = network.buses[network.buses.country_code != 'DE']
network.buses = network.buses.drop(network.buses.loc[foreign_buses.index].index)
# identify transborder lines (one bus foreign, one bus not) and the country
# it is coming from
'transborder_lines = pd.DataFrame(index=network.lines[\n ((network.lines[\'bus0\'].isin(network.buses.index) == False) &\n (network.lines[\'bus1\'].isin(network.buses.index) == True)) |\n ((network.lines[\'bus0\'].isin(network.buses.index) == True) &\n (network.lines[\'bus1\'].isin(network.buses.index) == False))].index)\n transborder_lines[\'bus0\'] = network.lines[\'bus0\']\n transborder_lines[\'bus1\'] = network.lines[\'bus1\']\n transborder_lines[\'country\'] = ""\n for i in range(0, len(transborder_lines)):\n if transborder_lines.iloc[i, 0] in foreign_buses.index:\n transborder_lines[\'country\'][i] = foreign_buses[str(\n transborder_lines.iloc[i, 0])]\n else:\n transborder_lines[\'country\'][i] = foreign_buses[str(\n transborder_lines.iloc[i, 1])]\n\n # identify amount of flows per line and group to get flow per country\n transborder_flows = network.lines_t.p0[transborder_lines.index]\n for i in transborder_flows.columns:\n if network.lines.loc[str(i)][\'bus1\'] in foreign_buses.index:\n transborder_flows.loc[:, str(\n i)] = transborder_flows.loc[:, str(i)]*-1\n\n network.foreign_trade = transborder_flows. groupby(transborder_lines[\'country\'], axis=1).sum()'
# drop foreign components
network.lines = network.lines.drop(network.lines[(network.lines['bus0'].isin(network.buses.index) == False) | (network.lines['bus1'].isin(network.buses.index) == False)].index)
network.links = network.links.drop(network.links[(network.links['bus0'].isin(network.buses.index) == False) | (network.links['bus1'].isin(network.buses.index) == False)].index)
network.transformers = network.transformers.drop(network.transformers[(network.transformers['bus0'].isin(network.buses.index) == False) | (network.transformers['bus1'].isin(network.buses.index) == False)].index)
network.generators = network.generators.drop(network.generators[network.generators['bus'].isin(network.buses.index) == False].index)
network.loads = network.loads.drop(network.loads[network.loads['bus'].isin(network.buses.index) == False].index)
network.storage_units = network.storage_units.drop(network.storage_units[network.storage_units['bus'].isin(network.buses.index) == False].index)
components = ['loads', 'generators', 'lines', 'buses', 'transformers', 'links']
for g in components: # loads_t
h = g + '_t'
nw = getattr(network, h) # network.loads_t
for i in nw.keys(): # network.loads_t.p
cols = [j for j in getattr(nw, i).columns if j not in getattr(network, g).index]
for k in cols:
del getattr(nw, i)[k] # depends on [control=['for'], data=['k']] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['g']]
return network |
def GenomicRangeFromString(range_string,payload=None,dir=None):
"""Constructor for a GenomicRange object that takes a string"""
m = re.match('^(.+):(\d+)-(\d+)$',range_string)
if not m:
sys.stderr.write("ERROR bad genomic range string\n"+range_string+"\n")
sys.exit()
chr = m.group(1)
start = int(m.group(2))
end = int(m.group(3))
return GenomicRange(chr,start,end,payload,dir) | def function[GenomicRangeFromString, parameter[range_string, payload, dir]]:
constant[Constructor for a GenomicRange object that takes a string]
variable[m] assign[=] call[name[re].match, parameter[constant[^(.+):(\d+)-(\d+)$], name[range_string]]]
if <ast.UnaryOp object at 0x7da18f58f190> begin[:]
call[name[sys].stderr.write, parameter[binary_operation[binary_operation[constant[ERROR bad genomic range string
] + name[range_string]] + constant[
]]]]
call[name[sys].exit, parameter[]]
variable[chr] assign[=] call[name[m].group, parameter[constant[1]]]
variable[start] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[2]]]]]
variable[end] assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[3]]]]]
return[call[name[GenomicRange], parameter[name[chr], name[start], name[end], name[payload], name[dir]]]] | keyword[def] identifier[GenomicRangeFromString] ( identifier[range_string] , identifier[payload] = keyword[None] , identifier[dir] = keyword[None] ):
literal[string]
identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[range_string] )
keyword[if] keyword[not] identifier[m] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] + identifier[range_string] + literal[string] )
identifier[sys] . identifier[exit] ()
identifier[chr] = identifier[m] . identifier[group] ( literal[int] )
identifier[start] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
identifier[end] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] ))
keyword[return] identifier[GenomicRange] ( identifier[chr] , identifier[start] , identifier[end] , identifier[payload] , identifier[dir] ) | def GenomicRangeFromString(range_string, payload=None, dir=None):
"""Constructor for a GenomicRange object that takes a string"""
m = re.match('^(.+):(\\d+)-(\\d+)$', range_string)
if not m:
sys.stderr.write('ERROR bad genomic range string\n' + range_string + '\n')
sys.exit() # depends on [control=['if'], data=[]]
chr = m.group(1)
start = int(m.group(2))
end = int(m.group(3))
return GenomicRange(chr, start, end, payload, dir) |
def codepoint_included(self, codepoint):
"""Check if codepoint matches any of the defined codepoints."""
if self.codepoints == None:
return True
for cp in self.codepoints:
mismatch = False
for i in range(len(cp)):
if (cp[i] is not None) and (cp[i] != codepoint[i]):
mismatch = True
break
if not mismatch:
return True
return False | def function[codepoint_included, parameter[self, codepoint]]:
constant[Check if codepoint matches any of the defined codepoints.]
if compare[name[self].codepoints equal[==] constant[None]] begin[:]
return[constant[True]]
for taget[name[cp]] in starred[name[self].codepoints] begin[:]
variable[mismatch] assign[=] constant[False]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[cp]]]]]] begin[:]
if <ast.BoolOp object at 0x7da18f00d960> begin[:]
variable[mismatch] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da18f722260> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[codepoint_included] ( identifier[self] , identifier[codepoint] ):
literal[string]
keyword[if] identifier[self] . identifier[codepoints] == keyword[None] :
keyword[return] keyword[True]
keyword[for] identifier[cp] keyword[in] identifier[self] . identifier[codepoints] :
identifier[mismatch] = keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[cp] )):
keyword[if] ( identifier[cp] [ identifier[i] ] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[cp] [ identifier[i] ]!= identifier[codepoint] [ identifier[i] ]):
identifier[mismatch] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[mismatch] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def codepoint_included(self, codepoint):
"""Check if codepoint matches any of the defined codepoints."""
if self.codepoints == None:
return True # depends on [control=['if'], data=[]]
for cp in self.codepoints:
mismatch = False
for i in range(len(cp)):
if cp[i] is not None and cp[i] != codepoint[i]:
mismatch = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if not mismatch:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cp']]
return False |
def get_type(self):
"""Get the type of the item.
:return: the type of the item.
:returntype: `unicode`"""
item_type = self.xmlnode.prop("type")
if not item_type:
item_type = "?"
return item_type.decode("utf-8") | def function[get_type, parameter[self]]:
constant[Get the type of the item.
:return: the type of the item.
:returntype: `unicode`]
variable[item_type] assign[=] call[name[self].xmlnode.prop, parameter[constant[type]]]
if <ast.UnaryOp object at 0x7da18eb56ce0> begin[:]
variable[item_type] assign[=] constant[?]
return[call[name[item_type].decode, parameter[constant[utf-8]]]] | keyword[def] identifier[get_type] ( identifier[self] ):
literal[string]
identifier[item_type] = identifier[self] . identifier[xmlnode] . identifier[prop] ( literal[string] )
keyword[if] keyword[not] identifier[item_type] :
identifier[item_type] = literal[string]
keyword[return] identifier[item_type] . identifier[decode] ( literal[string] ) | def get_type(self):
"""Get the type of the item.
:return: the type of the item.
:returntype: `unicode`"""
item_type = self.xmlnode.prop('type')
if not item_type:
item_type = '?' # depends on [control=['if'], data=[]]
return item_type.decode('utf-8') |
def get_docargs(self, args=None, prt=None, **kws):
"""Pare down docopt. Return a minimal dictionary and a set containing runtime arg values."""
args_user = sys.argv[1:] if args is None else args
arg_kws = self._get_docargs(args_user, prt)
if 'intvals' in kws:
self._set_intvals(arg_kws, kws['intvals'])
return arg_kws | def function[get_docargs, parameter[self, args, prt]]:
constant[Pare down docopt. Return a minimal dictionary and a set containing runtime arg values.]
variable[args_user] assign[=] <ast.IfExp object at 0x7da20c6ab010>
variable[arg_kws] assign[=] call[name[self]._get_docargs, parameter[name[args_user], name[prt]]]
if compare[constant[intvals] in name[kws]] begin[:]
call[name[self]._set_intvals, parameter[name[arg_kws], call[name[kws]][constant[intvals]]]]
return[name[arg_kws]] | keyword[def] identifier[get_docargs] ( identifier[self] , identifier[args] = keyword[None] , identifier[prt] = keyword[None] ,** identifier[kws] ):
literal[string]
identifier[args_user] = identifier[sys] . identifier[argv] [ literal[int] :] keyword[if] identifier[args] keyword[is] keyword[None] keyword[else] identifier[args]
identifier[arg_kws] = identifier[self] . identifier[_get_docargs] ( identifier[args_user] , identifier[prt] )
keyword[if] literal[string] keyword[in] identifier[kws] :
identifier[self] . identifier[_set_intvals] ( identifier[arg_kws] , identifier[kws] [ literal[string] ])
keyword[return] identifier[arg_kws] | def get_docargs(self, args=None, prt=None, **kws):
"""Pare down docopt. Return a minimal dictionary and a set containing runtime arg values."""
args_user = sys.argv[1:] if args is None else args
arg_kws = self._get_docargs(args_user, prt)
if 'intvals' in kws:
self._set_intvals(arg_kws, kws['intvals']) # depends on [control=['if'], data=['kws']]
return arg_kws |
def set_string(_bytearray, byte_index, value, max_size):
"""
Set string value
:params value: string data
:params max_size: max possible string size
"""
if six.PY2:
assert isinstance(value, (str, unicode))
else:
assert isinstance(value, str)
size = len(value)
# FAIL HARD WHEN trying to write too much data into PLC
if size > max_size:
raise ValueError('size %s > max_size %s %s' % (size, max_size, value))
# set len count on first position
_bytearray[byte_index + 1] = len(value)
i = 0
# fill array which chr integers
for i, c in enumerate(value):
_bytearray[byte_index + 2 + i] = ord(c)
# fill the rest with empty space
for r in range(i + 1, _bytearray[byte_index]):
_bytearray[byte_index + 2 + r] = ord(' ') | def function[set_string, parameter[_bytearray, byte_index, value, max_size]]:
constant[
Set string value
:params value: string data
:params max_size: max possible string size
]
if name[six].PY2 begin[:]
assert[call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da204567d90>, <ast.Name object at 0x7da204567790>]]]]]
variable[size] assign[=] call[name[len], parameter[name[value]]]
if compare[name[size] greater[>] name[max_size]] begin[:]
<ast.Raise object at 0x7da2044c3130>
call[name[_bytearray]][binary_operation[name[byte_index] + constant[1]]] assign[=] call[name[len], parameter[name[value]]]
variable[i] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b26af940>, <ast.Name object at 0x7da1b26afc10>]]] in starred[call[name[enumerate], parameter[name[value]]]] begin[:]
call[name[_bytearray]][binary_operation[binary_operation[name[byte_index] + constant[2]] + name[i]]] assign[=] call[name[ord], parameter[name[c]]]
for taget[name[r]] in starred[call[name[range], parameter[binary_operation[name[i] + constant[1]], call[name[_bytearray]][name[byte_index]]]]] begin[:]
call[name[_bytearray]][binary_operation[binary_operation[name[byte_index] + constant[2]] + name[r]]] assign[=] call[name[ord], parameter[constant[ ]]] | keyword[def] identifier[set_string] ( identifier[_bytearray] , identifier[byte_index] , identifier[value] , identifier[max_size] ):
literal[string]
keyword[if] identifier[six] . identifier[PY2] :
keyword[assert] identifier[isinstance] ( identifier[value] ,( identifier[str] , identifier[unicode] ))
keyword[else] :
keyword[assert] identifier[isinstance] ( identifier[value] , identifier[str] )
identifier[size] = identifier[len] ( identifier[value] )
keyword[if] identifier[size] > identifier[max_size] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[size] , identifier[max_size] , identifier[value] ))
identifier[_bytearray] [ identifier[byte_index] + literal[int] ]= identifier[len] ( identifier[value] )
identifier[i] = literal[int]
keyword[for] identifier[i] , identifier[c] keyword[in] identifier[enumerate] ( identifier[value] ):
identifier[_bytearray] [ identifier[byte_index] + literal[int] + identifier[i] ]= identifier[ord] ( identifier[c] )
keyword[for] identifier[r] keyword[in] identifier[range] ( identifier[i] + literal[int] , identifier[_bytearray] [ identifier[byte_index] ]):
identifier[_bytearray] [ identifier[byte_index] + literal[int] + identifier[r] ]= identifier[ord] ( literal[string] ) | def set_string(_bytearray, byte_index, value, max_size):
"""
Set string value
:params value: string data
:params max_size: max possible string size
"""
if six.PY2:
assert isinstance(value, (str, unicode)) # depends on [control=['if'], data=[]]
else:
assert isinstance(value, str)
size = len(value)
# FAIL HARD WHEN trying to write too much data into PLC
if size > max_size:
raise ValueError('size %s > max_size %s %s' % (size, max_size, value)) # depends on [control=['if'], data=['size', 'max_size']]
# set len count on first position
_bytearray[byte_index + 1] = len(value)
i = 0
# fill array which chr integers
for (i, c) in enumerate(value):
_bytearray[byte_index + 2 + i] = ord(c) # depends on [control=['for'], data=[]]
# fill the rest with empty space
for r in range(i + 1, _bytearray[byte_index]):
_bytearray[byte_index + 2 + r] = ord(' ') # depends on [control=['for'], data=['r']] |
def encode(self):
"""Encode the DAT packet. This method populates self.buffer, and
returns self for easy method chaining."""
if len(self.data) == 0:
log.debug("Encoding an empty DAT packet")
data = self.data
if not isinstance(self.data, bytes):
data = self.data.encode('ascii')
fmt = b"!HH%ds" % len(data)
self.buffer = struct.pack(fmt,
self.opcode,
self.blocknumber,
data)
return self | def function[encode, parameter[self]]:
constant[Encode the DAT packet. This method populates self.buffer, and
returns self for easy method chaining.]
if compare[call[name[len], parameter[name[self].data]] equal[==] constant[0]] begin[:]
call[name[log].debug, parameter[constant[Encoding an empty DAT packet]]]
variable[data] assign[=] name[self].data
if <ast.UnaryOp object at 0x7da1b1290ee0> begin[:]
variable[data] assign[=] call[name[self].data.encode, parameter[constant[ascii]]]
variable[fmt] assign[=] binary_operation[constant[b'!HH%ds'] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[data]]]]
name[self].buffer assign[=] call[name[struct].pack, parameter[name[fmt], name[self].opcode, name[self].blocknumber, name[data]]]
return[name[self]] | keyword[def] identifier[encode] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[data] )== literal[int] :
identifier[log] . identifier[debug] ( literal[string] )
identifier[data] = identifier[self] . identifier[data]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[data] , identifier[bytes] ):
identifier[data] = identifier[self] . identifier[data] . identifier[encode] ( literal[string] )
identifier[fmt] = literal[string] % identifier[len] ( identifier[data] )
identifier[self] . identifier[buffer] = identifier[struct] . identifier[pack] ( identifier[fmt] ,
identifier[self] . identifier[opcode] ,
identifier[self] . identifier[blocknumber] ,
identifier[data] )
keyword[return] identifier[self] | def encode(self):
"""Encode the DAT packet. This method populates self.buffer, and
returns self for easy method chaining."""
if len(self.data) == 0:
log.debug('Encoding an empty DAT packet') # depends on [control=['if'], data=[]]
data = self.data
if not isinstance(self.data, bytes):
data = self.data.encode('ascii') # depends on [control=['if'], data=[]]
fmt = b'!HH%ds' % len(data)
self.buffer = struct.pack(fmt, self.opcode, self.blocknumber, data)
return self |
def _jit_get_rotation_matrix(axis, angle):
"""Returns the rotation matrix.
This function returns a matrix for the counterclockwise rotation
around the given axis.
The Input angle is in radians.
Args:
axis (vector):
angle (float):
Returns:
Rotation matrix (np.array):
"""
axis = _jit_normalize(axis)
a = m.cos(angle / 2)
b, c, d = axis * m.sin(angle / 2)
rot_matrix = np.empty((3, 3))
rot_matrix[0, 0] = a**2 + b**2 - c**2 - d**2
rot_matrix[0, 1] = 2. * (b * c - a * d)
rot_matrix[0, 2] = 2. * (b * d + a * c)
rot_matrix[1, 0] = 2. * (b * c + a * d)
rot_matrix[1, 1] = a**2 + c**2 - b**2 - d**2
rot_matrix[1, 2] = 2. * (c * d - a * b)
rot_matrix[2, 0] = 2. * (b * d - a * c)
rot_matrix[2, 1] = 2. * (c * d + a * b)
rot_matrix[2, 2] = a**2 + d**2 - b**2 - c**2
return rot_matrix | def function[_jit_get_rotation_matrix, parameter[axis, angle]]:
constant[Returns the rotation matrix.
This function returns a matrix for the counterclockwise rotation
around the given axis.
The Input angle is in radians.
Args:
axis (vector):
angle (float):
Returns:
Rotation matrix (np.array):
]
variable[axis] assign[=] call[name[_jit_normalize], parameter[name[axis]]]
variable[a] assign[=] call[name[m].cos, parameter[binary_operation[name[angle] / constant[2]]]]
<ast.Tuple object at 0x7da18f00faf0> assign[=] binary_operation[name[axis] * call[name[m].sin, parameter[binary_operation[name[angle] / constant[2]]]]]
variable[rot_matrix] assign[=] call[name[np].empty, parameter[tuple[[<ast.Constant object at 0x7da18f00fa30>, <ast.Constant object at 0x7da18f00f5b0>]]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da18f00d420>, <ast.Constant object at 0x7da18f00fbb0>]]] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[a] ** constant[2]] + binary_operation[name[b] ** constant[2]]] - binary_operation[name[c] ** constant[2]]] - binary_operation[name[d] ** constant[2]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da18f00fc40>, <ast.Constant object at 0x7da18f00db40>]]] assign[=] binary_operation[constant[2.0] * binary_operation[binary_operation[name[b] * name[c]] - binary_operation[name[a] * name[d]]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da18f00c2b0>, <ast.Constant object at 0x7da18f00dbd0>]]] assign[=] binary_operation[constant[2.0] * binary_operation[binary_operation[name[b] * name[d]] + binary_operation[name[a] * name[c]]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da20c6c5420>, <ast.Constant object at 0x7da20c6c6ce0>]]] assign[=] binary_operation[constant[2.0] * binary_operation[binary_operation[name[b] * name[c]] + binary_operation[name[a] * name[d]]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da20c6c5510>, <ast.Constant object at 0x7da20c6c4520>]]] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[a] ** constant[2]] + binary_operation[name[c] ** constant[2]]] - binary_operation[name[b] ** constant[2]]] - binary_operation[name[d] ** constant[2]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da20c6c5930>, <ast.Constant object at 0x7da20c6c5ea0>]]] assign[=] binary_operation[constant[2.0] * binary_operation[binary_operation[name[c] * name[d]] - binary_operation[name[a] * name[b]]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da20c6c5300>, <ast.Constant object at 0x7da20c6c4280>]]] assign[=] binary_operation[constant[2.0] * binary_operation[binary_operation[name[b] * name[d]] - binary_operation[name[a] * name[c]]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da20c6c6bc0>, <ast.Constant object at 0x7da20c6c5270>]]] assign[=] binary_operation[constant[2.0] * binary_operation[binary_operation[name[c] * name[d]] + binary_operation[name[a] * name[b]]]]
call[name[rot_matrix]][tuple[[<ast.Constant object at 0x7da20c6c5750>, <ast.Constant object at 0x7da20c6c7e20>]]] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[a] ** constant[2]] + binary_operation[name[d] ** constant[2]]] - binary_operation[name[b] ** constant[2]]] - binary_operation[name[c] ** constant[2]]]
return[name[rot_matrix]] | keyword[def] identifier[_jit_get_rotation_matrix] ( identifier[axis] , identifier[angle] ):
literal[string]
identifier[axis] = identifier[_jit_normalize] ( identifier[axis] )
identifier[a] = identifier[m] . identifier[cos] ( identifier[angle] / literal[int] )
identifier[b] , identifier[c] , identifier[d] = identifier[axis] * identifier[m] . identifier[sin] ( identifier[angle] / literal[int] )
identifier[rot_matrix] = identifier[np] . identifier[empty] (( literal[int] , literal[int] ))
identifier[rot_matrix] [ literal[int] , literal[int] ]= identifier[a] ** literal[int] + identifier[b] ** literal[int] - identifier[c] ** literal[int] - identifier[d] ** literal[int]
identifier[rot_matrix] [ literal[int] , literal[int] ]= literal[int] *( identifier[b] * identifier[c] - identifier[a] * identifier[d] )
identifier[rot_matrix] [ literal[int] , literal[int] ]= literal[int] *( identifier[b] * identifier[d] + identifier[a] * identifier[c] )
identifier[rot_matrix] [ literal[int] , literal[int] ]= literal[int] *( identifier[b] * identifier[c] + identifier[a] * identifier[d] )
identifier[rot_matrix] [ literal[int] , literal[int] ]= identifier[a] ** literal[int] + identifier[c] ** literal[int] - identifier[b] ** literal[int] - identifier[d] ** literal[int]
identifier[rot_matrix] [ literal[int] , literal[int] ]= literal[int] *( identifier[c] * identifier[d] - identifier[a] * identifier[b] )
identifier[rot_matrix] [ literal[int] , literal[int] ]= literal[int] *( identifier[b] * identifier[d] - identifier[a] * identifier[c] )
identifier[rot_matrix] [ literal[int] , literal[int] ]= literal[int] *( identifier[c] * identifier[d] + identifier[a] * identifier[b] )
identifier[rot_matrix] [ literal[int] , literal[int] ]= identifier[a] ** literal[int] + identifier[d] ** literal[int] - identifier[b] ** literal[int] - identifier[c] ** literal[int]
keyword[return] identifier[rot_matrix] | def _jit_get_rotation_matrix(axis, angle):
"""Returns the rotation matrix.
This function returns a matrix for the counterclockwise rotation
around the given axis.
The Input angle is in radians.
Args:
axis (vector):
angle (float):
Returns:
Rotation matrix (np.array):
"""
axis = _jit_normalize(axis)
a = m.cos(angle / 2)
(b, c, d) = axis * m.sin(angle / 2)
rot_matrix = np.empty((3, 3))
rot_matrix[0, 0] = a ** 2 + b ** 2 - c ** 2 - d ** 2
rot_matrix[0, 1] = 2.0 * (b * c - a * d)
rot_matrix[0, 2] = 2.0 * (b * d + a * c)
rot_matrix[1, 0] = 2.0 * (b * c + a * d)
rot_matrix[1, 1] = a ** 2 + c ** 2 - b ** 2 - d ** 2
rot_matrix[1, 2] = 2.0 * (c * d - a * b)
rot_matrix[2, 0] = 2.0 * (b * d - a * c)
rot_matrix[2, 1] = 2.0 * (c * d + a * b)
rot_matrix[2, 2] = a ** 2 + d ** 2 - b ** 2 - c ** 2
return rot_matrix |
def doc_unwrap(raw_doc):
"""
Applies two transformations to raw_doc:
1. N consecutive newlines are converted into N-1 newlines.
2. A lone newline is converted to a space, which basically unwraps text.
Returns a new string, or None if the input was None.
"""
if raw_doc is None:
return None
docstring = ''
consecutive_newlines = 0
# Remove all leading and trailing whitespace in the documentation block
for c in raw_doc.strip():
if c == '\n':
consecutive_newlines += 1
if consecutive_newlines > 1:
docstring += c
else:
if consecutive_newlines == 1:
docstring += ' '
consecutive_newlines = 0
docstring += c
return docstring | def function[doc_unwrap, parameter[raw_doc]]:
constant[
Applies two transformations to raw_doc:
1. N consecutive newlines are converted into N-1 newlines.
2. A lone newline is converted to a space, which basically unwraps text.
Returns a new string, or None if the input was None.
]
if compare[name[raw_doc] is constant[None]] begin[:]
return[constant[None]]
variable[docstring] assign[=] constant[]
variable[consecutive_newlines] assign[=] constant[0]
for taget[name[c]] in starred[call[name[raw_doc].strip, parameter[]]] begin[:]
if compare[name[c] equal[==] constant[
]] begin[:]
<ast.AugAssign object at 0x7da2044c3dc0>
if compare[name[consecutive_newlines] greater[>] constant[1]] begin[:]
<ast.AugAssign object at 0x7da2044c28f0>
return[name[docstring]] | keyword[def] identifier[doc_unwrap] ( identifier[raw_doc] ):
literal[string]
keyword[if] identifier[raw_doc] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[docstring] = literal[string]
identifier[consecutive_newlines] = literal[int]
keyword[for] identifier[c] keyword[in] identifier[raw_doc] . identifier[strip] ():
keyword[if] identifier[c] == literal[string] :
identifier[consecutive_newlines] += literal[int]
keyword[if] identifier[consecutive_newlines] > literal[int] :
identifier[docstring] += identifier[c]
keyword[else] :
keyword[if] identifier[consecutive_newlines] == literal[int] :
identifier[docstring] += literal[string]
identifier[consecutive_newlines] = literal[int]
identifier[docstring] += identifier[c]
keyword[return] identifier[docstring] | def doc_unwrap(raw_doc):
"""
Applies two transformations to raw_doc:
1. N consecutive newlines are converted into N-1 newlines.
2. A lone newline is converted to a space, which basically unwraps text.
Returns a new string, or None if the input was None.
"""
if raw_doc is None:
return None # depends on [control=['if'], data=[]]
docstring = ''
consecutive_newlines = 0
# Remove all leading and trailing whitespace in the documentation block
for c in raw_doc.strip():
if c == '\n':
consecutive_newlines += 1
if consecutive_newlines > 1:
docstring += c # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['c']]
else:
if consecutive_newlines == 1:
docstring += ' ' # depends on [control=['if'], data=[]]
consecutive_newlines = 0
docstring += c # depends on [control=['for'], data=['c']]
return docstring |
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers) | def function[iterencode, parameter[self, o]]:
constant[
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
]
if name[self].check_circular begin[:]
variable[markers] assign[=] dictionary[[], []]
return[call[name[self]._iterencode, parameter[name[o], name[markers]]]] | keyword[def] identifier[iterencode] ( identifier[self] , identifier[o] ):
literal[string]
keyword[if] identifier[self] . identifier[check_circular] :
identifier[markers] ={}
keyword[else] :
identifier[markers] = keyword[None]
keyword[return] identifier[self] . identifier[_iterencode] ( identifier[o] , identifier[markers] ) | def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {} # depends on [control=['if'], data=[]]
else:
markers = None
return self._iterencode(o, markers) |
def search_registered_query_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data | def function[search_registered_query_deleted_for_facet, parameter[self, facet]]:
constant[Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_registered_query_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].search_registered_query_deleted_for_facet_with_http_info, parameter[name[facet]]]] | keyword[def] identifier[search_registered_query_deleted_for_facet] ( identifier[self] , identifier[facet] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[search_registered_query_deleted_for_facet_with_http_info] ( identifier[facet] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[search_registered_query_deleted_for_facet_with_http_info] ( identifier[facet] ,** identifier[kwargs] )
keyword[return] identifier[data] | def search_registered_query_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"Lists the values of a specific facet over the customer's deleted derived metric definitions # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.search_registered_query_deleted_for_facet(facet, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str facet: (required)\n :param FacetSearchRequestContainer body:\n :return: ResponseContainerFacetResponse\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.search_registered_query_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data |
def _in_range(self, index):
""" Returns True if index is in range """
if isinstance(index, slice):
in_range = index.start < index.stop and \
index.start >= self.start and \
index.stop <= self.end
else:
in_range = index >= self.start and \
index <= self.end
return in_range | def function[_in_range, parameter[self, index]]:
constant[ Returns True if index is in range ]
if call[name[isinstance], parameter[name[index], name[slice]]] begin[:]
variable[in_range] assign[=] <ast.BoolOp object at 0x7da18fe92c20>
return[name[in_range]] | keyword[def] identifier[_in_range] ( identifier[self] , identifier[index] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[index] , identifier[slice] ):
identifier[in_range] = identifier[index] . identifier[start] < identifier[index] . identifier[stop] keyword[and] identifier[index] . identifier[start] >= identifier[self] . identifier[start] keyword[and] identifier[index] . identifier[stop] <= identifier[self] . identifier[end]
keyword[else] :
identifier[in_range] = identifier[index] >= identifier[self] . identifier[start] keyword[and] identifier[index] <= identifier[self] . identifier[end]
keyword[return] identifier[in_range] | def _in_range(self, index):
""" Returns True if index is in range """
if isinstance(index, slice):
in_range = index.start < index.stop and index.start >= self.start and (index.stop <= self.end) # depends on [control=['if'], data=[]]
else:
in_range = index >= self.start and index <= self.end
return in_range |
def handle_absolute(self, event):
"""Absolute mouse position on the screen."""
(x_val, y_val) = self._get_absolute(event)
x_event, y_event = self.emulate_abs(
int(x_val),
int(y_val),
self.timeval)
self.events.append(x_event)
self.events.append(y_event) | def function[handle_absolute, parameter[self, event]]:
constant[Absolute mouse position on the screen.]
<ast.Tuple object at 0x7da1b08e6ce0> assign[=] call[name[self]._get_absolute, parameter[name[event]]]
<ast.Tuple object at 0x7da1b08e45e0> assign[=] call[name[self].emulate_abs, parameter[call[name[int], parameter[name[x_val]]], call[name[int], parameter[name[y_val]]], name[self].timeval]]
call[name[self].events.append, parameter[name[x_event]]]
call[name[self].events.append, parameter[name[y_event]]] | keyword[def] identifier[handle_absolute] ( identifier[self] , identifier[event] ):
literal[string]
( identifier[x_val] , identifier[y_val] )= identifier[self] . identifier[_get_absolute] ( identifier[event] )
identifier[x_event] , identifier[y_event] = identifier[self] . identifier[emulate_abs] (
identifier[int] ( identifier[x_val] ),
identifier[int] ( identifier[y_val] ),
identifier[self] . identifier[timeval] )
identifier[self] . identifier[events] . identifier[append] ( identifier[x_event] )
identifier[self] . identifier[events] . identifier[append] ( identifier[y_event] ) | def handle_absolute(self, event):
"""Absolute mouse position on the screen."""
(x_val, y_val) = self._get_absolute(event)
(x_event, y_event) = self.emulate_abs(int(x_val), int(y_val), self.timeval)
self.events.append(x_event)
self.events.append(y_event) |
def patch_namespaced_endpoints(self, name, namespace, body, **kwargs):
"""
partially update the specified Endpoints
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_endpoints(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Endpoints (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Endpoints
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs)
return data | def function[patch_namespaced_endpoints, parameter[self, name, namespace, body]]:
constant[
partially update the specified Endpoints
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_endpoints(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Endpoints (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Endpoints
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].patch_namespaced_endpoints_with_http_info, parameter[name[name], name[namespace], name[body]]]] | keyword[def] identifier[patch_namespaced_endpoints] ( identifier[self] , identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[patch_namespaced_endpoints_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[patch_namespaced_endpoints_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def patch_namespaced_endpoints(self, name, namespace, body, **kwargs):
"""
partially update the specified Endpoints
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_endpoints(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Endpoints (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Endpoints
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs)
return data |
def stopped(name=None,
containers=None,
shutdown_timeout=None,
unpause=False,
error_on_absent=True,
**kwargs):
'''
Ensure that a container (or containers) is stopped
name
Name or ID of the container
containers
Run this state on more than one container at a time. The following two
examples accomplish the same thing:
.. code-block:: yaml
stopped_containers:
docker_container.stopped:
- names:
- foo
- bar
- baz
.. code-block:: yaml
stopped_containers:
docker_container.stopped:
- containers:
- foo
- bar
- baz
However, the second example will be a bit quicker since Salt will stop
all specified containers in a single run, rather than executing the
state separately on each image (as it would in the first example).
shutdown_timeout
Timeout for graceful shutdown of the container. If this timeout is
exceeded, the container will be killed. If this value is not passed,
then the container's configured ``stop_timeout`` will be observed. If
``stop_timeout`` was also unset on the container, then a timeout of 10
seconds will be used.
unpause : False
Set to ``True`` to unpause any paused containers before stopping. If
unset, then an error will be raised for any container that was paused.
error_on_absent : True
By default, this state will return an error if any of the specified
containers are absent. Set this to ``False`` to suppress that error.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not name and not containers:
ret['comment'] = 'One of \'name\' and \'containers\' must be provided'
return ret
if containers is not None:
if not isinstance(containers, list):
ret['comment'] = 'containers must be a list'
return ret
targets = []
for target in containers:
if not isinstance(target, six.string_types):
target = six.text_type(target)
targets.append(target)
elif name:
if not isinstance(name, six.string_types):
targets = [six.text_type(name)]
else:
targets = [name]
containers = {}
for target in targets:
try:
c_state = __salt__['docker.state'](target)
except CommandExecutionError:
containers.setdefault('absent', []).append(target)
else:
containers.setdefault(c_state, []).append(target)
errors = []
if error_on_absent and 'absent' in containers:
errors.append(
'The following container(s) are absent: {0}'.format(
', '.join(containers['absent'])
)
)
if not unpause and 'paused' in containers:
ret['result'] = False
errors.append(
'The following container(s) are paused: {0}'.format(
', '.join(containers['paused'])
)
)
if errors:
ret['result'] = False
ret['comment'] = '. '.join(errors)
return ret
to_stop = containers.get('running', []) + containers.get('paused', [])
if not to_stop:
ret['result'] = True
if len(targets) == 1:
ret['comment'] = 'Container \'{0}\' is '.format(targets[0])
else:
ret['comment'] = 'All specified containers are '
if 'absent' in containers:
ret['comment'] += 'absent or '
ret['comment'] += 'not running'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = (
'The following container(s) will be stopped: {0}'
.format(', '.join(to_stop))
)
return ret
stop_errors = []
for target in to_stop:
stop_kwargs = {'unpause': unpause}
if shutdown_timeout:
stop_kwargs['timeout'] = shutdown_timeout
changes = __salt__['docker.stop'](target, **stop_kwargs)
if changes['result'] is True:
ret['changes'][target] = changes
else:
if 'comment' in changes:
stop_errors.append(changes['comment'])
else:
stop_errors.append(
'Failed to stop container \'{0}\''.format(target)
)
if stop_errors:
ret['comment'] = '; '.join(stop_errors)
return ret
ret['result'] = True
ret['comment'] = (
'The following container(s) were stopped: {0}'
.format(', '.join(to_stop))
)
return ret | def function[stopped, parameter[name, containers, shutdown_timeout, unpause, error_on_absent]]:
constant[
Ensure that a container (or containers) is stopped
name
Name or ID of the container
containers
Run this state on more than one container at a time. The following two
examples accomplish the same thing:
.. code-block:: yaml
stopped_containers:
docker_container.stopped:
- names:
- foo
- bar
- baz
.. code-block:: yaml
stopped_containers:
docker_container.stopped:
- containers:
- foo
- bar
- baz
However, the second example will be a bit quicker since Salt will stop
all specified containers in a single run, rather than executing the
state separately on each image (as it would in the first example).
shutdown_timeout
Timeout for graceful shutdown of the container. If this timeout is
exceeded, the container will be killed. If this value is not passed,
then the container's configured ``stop_timeout`` will be observed. If
``stop_timeout`` was also unset on the container, then a timeout of 10
seconds will be used.
unpause : False
Set to ``True`` to unpause any paused containers before stopping. If
unset, then an error will be raised for any container that was paused.
error_on_absent : True
By default, this state will return an error if any of the specified
containers are absent. Set this to ``False`` to suppress that error.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b210b6a0>, <ast.Constant object at 0x7da1b2108a30>, <ast.Constant object at 0x7da1b210a050>, <ast.Constant object at 0x7da1b210b310>], [<ast.Name object at 0x7da1b210ad10>, <ast.Dict object at 0x7da1b2109fc0>, <ast.Constant object at 0x7da1b208a170>, <ast.Constant object at 0x7da1b208a1a0>]]
if <ast.BoolOp object at 0x7da1b208a200> begin[:]
call[name[ret]][constant[comment]] assign[=] constant[One of 'name' and 'containers' must be provided]
return[name[ret]]
if compare[name[containers] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b208bd30> begin[:]
call[name[ret]][constant[comment]] assign[=] constant[containers must be a list]
return[name[ret]]
variable[targets] assign[=] list[[]]
for taget[name[target]] in starred[name[containers]] begin[:]
if <ast.UnaryOp object at 0x7da1b208ad70> begin[:]
variable[target] assign[=] call[name[six].text_type, parameter[name[target]]]
call[name[targets].append, parameter[name[target]]]
variable[containers] assign[=] dictionary[[], []]
for taget[name[target]] in starred[name[targets]] begin[:]
<ast.Try object at 0x7da1b216a9b0>
variable[errors] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b2169900> begin[:]
call[name[errors].append, parameter[call[constant[The following container(s) are absent: {0}].format, parameter[call[constant[, ].join, parameter[call[name[containers]][constant[absent]]]]]]]]
if <ast.BoolOp object at 0x7da1b216a740> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[errors].append, parameter[call[constant[The following container(s) are paused: {0}].format, parameter[call[constant[, ].join, parameter[call[name[containers]][constant[paused]]]]]]]]
if name[errors] begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[. ].join, parameter[name[errors]]]
return[name[ret]]
variable[to_stop] assign[=] binary_operation[call[name[containers].get, parameter[constant[running], list[[]]]] + call[name[containers].get, parameter[constant[paused], list[[]]]]]
if <ast.UnaryOp object at 0x7da1b2169600> begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
if compare[call[name[len], parameter[name[targets]]] equal[==] constant[1]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Container '{0}' is ].format, parameter[call[name[targets]][constant[0]]]]
if compare[constant[absent] in name[containers]] begin[:]
<ast.AugAssign object at 0x7da18ede7a30>
<ast.AugAssign object at 0x7da18ede6140>
return[name[ret]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] call[constant[The following container(s) will be stopped: {0}].format, parameter[call[constant[, ].join, parameter[name[to_stop]]]]]
return[name[ret]]
variable[stop_errors] assign[=] list[[]]
for taget[name[target]] in starred[name[to_stop]] begin[:]
variable[stop_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da18ede5ed0>], [<ast.Name object at 0x7da18ede4310>]]
if name[shutdown_timeout] begin[:]
call[name[stop_kwargs]][constant[timeout]] assign[=] name[shutdown_timeout]
variable[changes] assign[=] call[call[name[__salt__]][constant[docker.stop]], parameter[name[target]]]
if compare[call[name[changes]][constant[result]] is constant[True]] begin[:]
call[call[name[ret]][constant[changes]]][name[target]] assign[=] name[changes]
if name[stop_errors] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[; ].join, parameter[name[stop_errors]]]
return[name[ret]]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[The following container(s) were stopped: {0}].format, parameter[call[constant[, ].join, parameter[name[to_stop]]]]]
return[name[ret]] | keyword[def] identifier[stopped] ( identifier[name] = keyword[None] ,
identifier[containers] = keyword[None] ,
identifier[shutdown_timeout] = keyword[None] ,
identifier[unpause] = keyword[False] ,
identifier[error_on_absent] = keyword[True] ,
** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[False] ,
literal[string] : literal[string] }
keyword[if] keyword[not] identifier[name] keyword[and] keyword[not] identifier[containers] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] identifier[containers] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[containers] , identifier[list] ):
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[targets] =[]
keyword[for] identifier[target] keyword[in] identifier[containers] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[target] , identifier[six] . identifier[string_types] ):
identifier[target] = identifier[six] . identifier[text_type] ( identifier[target] )
identifier[targets] . identifier[append] ( identifier[target] )
keyword[elif] identifier[name] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[six] . identifier[string_types] ):
identifier[targets] =[ identifier[six] . identifier[text_type] ( identifier[name] )]
keyword[else] :
identifier[targets] =[ identifier[name] ]
identifier[containers] ={}
keyword[for] identifier[target] keyword[in] identifier[targets] :
keyword[try] :
identifier[c_state] = identifier[__salt__] [ literal[string] ]( identifier[target] )
keyword[except] identifier[CommandExecutionError] :
identifier[containers] . identifier[setdefault] ( literal[string] ,[]). identifier[append] ( identifier[target] )
keyword[else] :
identifier[containers] . identifier[setdefault] ( identifier[c_state] ,[]). identifier[append] ( identifier[target] )
identifier[errors] =[]
keyword[if] identifier[error_on_absent] keyword[and] literal[string] keyword[in] identifier[containers] :
identifier[errors] . identifier[append] (
literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[containers] [ literal[string] ])
)
)
keyword[if] keyword[not] identifier[unpause] keyword[and] literal[string] keyword[in] identifier[containers] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[errors] . identifier[append] (
literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[containers] [ literal[string] ])
)
)
keyword[if] identifier[errors] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[join] ( identifier[errors] )
keyword[return] identifier[ret]
identifier[to_stop] = identifier[containers] . identifier[get] ( literal[string] ,[])+ identifier[containers] . identifier[get] ( literal[string] ,[])
keyword[if] keyword[not] identifier[to_stop] :
identifier[ret] [ literal[string] ]= keyword[True]
keyword[if] identifier[len] ( identifier[targets] )== literal[int] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[targets] [ literal[int] ])
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[in] identifier[containers] :
identifier[ret] [ literal[string] ]+= literal[string]
identifier[ret] [ literal[string] ]+= literal[string]
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]=(
literal[string]
. identifier[format] ( literal[string] . identifier[join] ( identifier[to_stop] ))
)
keyword[return] identifier[ret]
identifier[stop_errors] =[]
keyword[for] identifier[target] keyword[in] identifier[to_stop] :
identifier[stop_kwargs] ={ literal[string] : identifier[unpause] }
keyword[if] identifier[shutdown_timeout] :
identifier[stop_kwargs] [ literal[string] ]= identifier[shutdown_timeout]
identifier[changes] = identifier[__salt__] [ literal[string] ]( identifier[target] ,** identifier[stop_kwargs] )
keyword[if] identifier[changes] [ literal[string] ] keyword[is] keyword[True] :
identifier[ret] [ literal[string] ][ identifier[target] ]= identifier[changes]
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[changes] :
identifier[stop_errors] . identifier[append] ( identifier[changes] [ literal[string] ])
keyword[else] :
identifier[stop_errors] . identifier[append] (
literal[string] . identifier[format] ( identifier[target] )
)
keyword[if] identifier[stop_errors] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[join] ( identifier[stop_errors] )
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]=(
literal[string]
. identifier[format] ( literal[string] . identifier[join] ( identifier[to_stop] ))
)
keyword[return] identifier[ret] | def stopped(name=None, containers=None, shutdown_timeout=None, unpause=False, error_on_absent=True, **kwargs):
"""
Ensure that a container (or containers) is stopped
name
Name or ID of the container
containers
Run this state on more than one container at a time. The following two
examples accomplish the same thing:
.. code-block:: yaml
stopped_containers:
docker_container.stopped:
- names:
- foo
- bar
- baz
.. code-block:: yaml
stopped_containers:
docker_container.stopped:
- containers:
- foo
- bar
- baz
However, the second example will be a bit quicker since Salt will stop
all specified containers in a single run, rather than executing the
state separately on each image (as it would in the first example).
shutdown_timeout
Timeout for graceful shutdown of the container. If this timeout is
exceeded, the container will be killed. If this value is not passed,
then the container's configured ``stop_timeout`` will be observed. If
``stop_timeout`` was also unset on the container, then a timeout of 10
seconds will be used.
unpause : False
Set to ``True`` to unpause any paused containers before stopping. If
unset, then an error will be raised for any container that was paused.
error_on_absent : True
By default, this state will return an error if any of the specified
containers are absent. Set this to ``False`` to suppress that error.
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not name and (not containers):
ret['comment'] = "One of 'name' and 'containers' must be provided"
return ret # depends on [control=['if'], data=[]]
if containers is not None:
if not isinstance(containers, list):
ret['comment'] = 'containers must be a list'
return ret # depends on [control=['if'], data=[]]
targets = []
for target in containers:
if not isinstance(target, six.string_types):
target = six.text_type(target) # depends on [control=['if'], data=[]]
targets.append(target) # depends on [control=['for'], data=['target']] # depends on [control=['if'], data=['containers']]
elif name:
if not isinstance(name, six.string_types):
targets = [six.text_type(name)] # depends on [control=['if'], data=[]]
else:
targets = [name] # depends on [control=['if'], data=[]]
containers = {}
for target in targets:
try:
c_state = __salt__['docker.state'](target) # depends on [control=['try'], data=[]]
except CommandExecutionError:
containers.setdefault('absent', []).append(target) # depends on [control=['except'], data=[]]
else:
containers.setdefault(c_state, []).append(target) # depends on [control=['for'], data=['target']]
errors = []
if error_on_absent and 'absent' in containers:
errors.append('The following container(s) are absent: {0}'.format(', '.join(containers['absent']))) # depends on [control=['if'], data=[]]
if not unpause and 'paused' in containers:
ret['result'] = False
errors.append('The following container(s) are paused: {0}'.format(', '.join(containers['paused']))) # depends on [control=['if'], data=[]]
if errors:
ret['result'] = False
ret['comment'] = '. '.join(errors)
return ret # depends on [control=['if'], data=[]]
to_stop = containers.get('running', []) + containers.get('paused', [])
if not to_stop:
ret['result'] = True
if len(targets) == 1:
ret['comment'] = "Container '{0}' is ".format(targets[0]) # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'All specified containers are '
if 'absent' in containers:
ret['comment'] += 'absent or ' # depends on [control=['if'], data=[]]
ret['comment'] += 'not running'
return ret # depends on [control=['if'], data=[]]
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The following container(s) will be stopped: {0}'.format(', '.join(to_stop))
return ret # depends on [control=['if'], data=[]]
stop_errors = []
for target in to_stop:
stop_kwargs = {'unpause': unpause}
if shutdown_timeout:
stop_kwargs['timeout'] = shutdown_timeout # depends on [control=['if'], data=[]]
changes = __salt__['docker.stop'](target, **stop_kwargs)
if changes['result'] is True:
ret['changes'][target] = changes # depends on [control=['if'], data=[]]
elif 'comment' in changes:
stop_errors.append(changes['comment']) # depends on [control=['if'], data=['changes']]
else:
stop_errors.append("Failed to stop container '{0}'".format(target)) # depends on [control=['for'], data=['target']]
if stop_errors:
ret['comment'] = '; '.join(stop_errors)
return ret # depends on [control=['if'], data=[]]
ret['result'] = True
ret['comment'] = 'The following container(s) were stopped: {0}'.format(', '.join(to_stop))
return ret |
def _split_symbol_mappings(df, exchanges):
"""Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
"""
mappings = df[list(mapping_columns)]
with pd.option_context('mode.chained_assignment', None):
mappings['sid'] = mappings.index
mappings.reset_index(drop=True, inplace=True)
# take the most recent sid->exchange mapping based on end date
asset_exchange = df[
['exchange', 'end_date']
].sort_values('end_date').groupby(level=0)['exchange'].nth(-1)
_check_symbol_mappings(mappings, exchanges, asset_exchange)
return (
df.groupby(level=0).apply(_check_asset_group),
mappings,
) | def function[_split_symbol_mappings, parameter[df, exchanges]]:
constant[Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
]
variable[mappings] assign[=] call[name[df]][call[name[list], parameter[name[mapping_columns]]]]
with call[name[pd].option_context, parameter[constant[mode.chained_assignment], constant[None]]] begin[:]
call[name[mappings]][constant[sid]] assign[=] name[mappings].index
call[name[mappings].reset_index, parameter[]]
variable[asset_exchange] assign[=] call[call[call[call[call[name[df]][list[[<ast.Constant object at 0x7da1b2006e00>, <ast.Constant object at 0x7da1b2005900>]]].sort_values, parameter[constant[end_date]]].groupby, parameter[]]][constant[exchange]].nth, parameter[<ast.UnaryOp object at 0x7da1b2005ab0>]]
call[name[_check_symbol_mappings], parameter[name[mappings], name[exchanges], name[asset_exchange]]]
return[tuple[[<ast.Call object at 0x7da1b20050c0>, <ast.Name object at 0x7da1b2004d90>]]] | keyword[def] identifier[_split_symbol_mappings] ( identifier[df] , identifier[exchanges] ):
literal[string]
identifier[mappings] = identifier[df] [ identifier[list] ( identifier[mapping_columns] )]
keyword[with] identifier[pd] . identifier[option_context] ( literal[string] , keyword[None] ):
identifier[mappings] [ literal[string] ]= identifier[mappings] . identifier[index]
identifier[mappings] . identifier[reset_index] ( identifier[drop] = keyword[True] , identifier[inplace] = keyword[True] )
identifier[asset_exchange] = identifier[df] [
[ literal[string] , literal[string] ]
]. identifier[sort_values] ( literal[string] ). identifier[groupby] ( identifier[level] = literal[int] )[ literal[string] ]. identifier[nth] (- literal[int] )
identifier[_check_symbol_mappings] ( identifier[mappings] , identifier[exchanges] , identifier[asset_exchange] )
keyword[return] (
identifier[df] . identifier[groupby] ( identifier[level] = literal[int] ). identifier[apply] ( identifier[_check_asset_group] ),
identifier[mappings] ,
) | def _split_symbol_mappings(df, exchanges):
"""Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
"""
mappings = df[list(mapping_columns)]
with pd.option_context('mode.chained_assignment', None):
mappings['sid'] = mappings.index # depends on [control=['with'], data=[]]
mappings.reset_index(drop=True, inplace=True)
# take the most recent sid->exchange mapping based on end date
asset_exchange = df[['exchange', 'end_date']].sort_values('end_date').groupby(level=0)['exchange'].nth(-1)
_check_symbol_mappings(mappings, exchanges, asset_exchange)
return (df.groupby(level=0).apply(_check_asset_group), mappings) |
def agent_check_warn(consul_url=None, token=None, checkid=None, **kwargs):
'''
This endpoint is used with a check that is of the TTL type. When this
is called, the status of the check is set to warning and the TTL
clock is reset.
:param consul_url: The Consul server URL.
:param checkid: The ID of the check to deregister from Consul.
:param note: A human-readable message with the status of the check.
:return: Boolean and message indicating success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.'
'''
ret = {}
query_params = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
if not checkid:
raise SaltInvocationError('Required argument "checkid" is missing.')
if 'note' in kwargs:
query_params['note'] = kwargs['note']
function = 'agent/check/warn/{0}'.format(checkid)
res = _query(consul_url=consul_url,
function=function,
token=token,
query_params=query_params,
method='GET')
if res['res']:
ret['res'] = True
ret['message'] = 'Check {0} marked as warning.'.format(checkid)
else:
ret['res'] = False
ret['message'] = 'Unable to update check {0}.'.format(checkid)
return ret | def function[agent_check_warn, parameter[consul_url, token, checkid]]:
constant[
This endpoint is used with a check that is of the TTL type. When this
is called, the status of the check is set to warning and the TTL
clock is reset.
:param consul_url: The Consul server URL.
:param checkid: The ID of the check to deregister from Consul.
:param note: A human-readable message with the status of the check.
:return: Boolean and message indicating success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.'
]
variable[ret] assign[=] dictionary[[], []]
variable[query_params] assign[=] dictionary[[], []]
if <ast.UnaryOp object at 0x7da1b1c2e0b0> begin[:]
variable[consul_url] assign[=] call[name[_get_config], parameter[]]
if <ast.UnaryOp object at 0x7da1b1c2df00> begin[:]
call[name[log].error, parameter[constant[No Consul URL found.]]]
call[name[ret]][constant[message]] assign[=] constant[No Consul URL found.]
call[name[ret]][constant[res]] assign[=] constant[False]
return[name[ret]]
if <ast.UnaryOp object at 0x7da1b1c2e3e0> begin[:]
<ast.Raise object at 0x7da1b1c2d2a0>
if compare[constant[note] in name[kwargs]] begin[:]
call[name[query_params]][constant[note]] assign[=] call[name[kwargs]][constant[note]]
variable[function] assign[=] call[constant[agent/check/warn/{0}].format, parameter[name[checkid]]]
variable[res] assign[=] call[name[_query], parameter[]]
if call[name[res]][constant[res]] begin[:]
call[name[ret]][constant[res]] assign[=] constant[True]
call[name[ret]][constant[message]] assign[=] call[constant[Check {0} marked as warning.].format, parameter[name[checkid]]]
return[name[ret]] | keyword[def] identifier[agent_check_warn] ( identifier[consul_url] = keyword[None] , identifier[token] = keyword[None] , identifier[checkid] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={}
identifier[query_params] ={}
keyword[if] keyword[not] identifier[consul_url] :
identifier[consul_url] = identifier[_get_config] ()
keyword[if] keyword[not] identifier[consul_url] :
identifier[log] . identifier[error] ( literal[string] )
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[checkid] :
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[query_params] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
identifier[function] = literal[string] . identifier[format] ( identifier[checkid] )
identifier[res] = identifier[_query] ( identifier[consul_url] = identifier[consul_url] ,
identifier[function] = identifier[function] ,
identifier[token] = identifier[token] ,
identifier[query_params] = identifier[query_params] ,
identifier[method] = literal[string] )
keyword[if] identifier[res] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[checkid] )
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[checkid] )
keyword[return] identifier[ret] | def agent_check_warn(consul_url=None, token=None, checkid=None, **kwargs):
"""
This endpoint is used with a check that is of the TTL type. When this
is called, the status of the check is set to warning and the TTL
clock is reset.
:param consul_url: The Consul server URL.
:param checkid: The ID of the check to deregister from Consul.
:param note: A human-readable message with the status of the check.
:return: Boolean and message indicating success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.'
"""
ret = {}
query_params = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not checkid:
raise SaltInvocationError('Required argument "checkid" is missing.') # depends on [control=['if'], data=[]]
if 'note' in kwargs:
query_params['note'] = kwargs['note'] # depends on [control=['if'], data=['kwargs']]
function = 'agent/check/warn/{0}'.format(checkid)
res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET')
if res['res']:
ret['res'] = True
ret['message'] = 'Check {0} marked as warning.'.format(checkid) # depends on [control=['if'], data=[]]
else:
ret['res'] = False
ret['message'] = 'Unable to update check {0}.'.format(checkid)
return ret |
def retry(num_attempts=3, exception_class=Exception, log=None, sleeptime=1):
"""
>>> def fail():
... runs[0] += 1
... raise ValueError()
>>> runs = [0]; retry(sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[3]
>>> runs = [0]; retry(2, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[2]
>>> runs = [0]; retry(exception_class=IndexError, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[1]
>>> logger = DoctestLogger()
>>> runs = [0]; retry(log=logger, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[3]
>>> logger.print_logs()
Failed with error ValueError(), trying again
Failed with error ValueError(), trying again
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for i in range(num_attempts):
try:
return func(*args, **kwargs)
except exception_class as e:
if i == num_attempts - 1:
raise
else:
if log:
log.warn('Failed with error %r, trying again', e)
sleep(sleeptime)
return wrapper
return decorator | def function[retry, parameter[num_attempts, exception_class, log, sleeptime]]:
constant[
>>> def fail():
... runs[0] += 1
... raise ValueError()
>>> runs = [0]; retry(sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[3]
>>> runs = [0]; retry(2, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[2]
>>> runs = [0]; retry(exception_class=IndexError, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[1]
>>> logger = DoctestLogger()
>>> runs = [0]; retry(log=logger, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[3]
>>> logger.print_logs()
Failed with error ValueError(), trying again
Failed with error ValueError(), trying again
]
def function[decorator, parameter[func]]:
def function[wrapper, parameter[]]:
for taget[name[i]] in starred[call[name[range], parameter[name[num_attempts]]]] begin[:]
<ast.Try object at 0x7da2054a6ec0>
return[name[wrapper]]
return[name[decorator]] | keyword[def] identifier[retry] ( identifier[num_attempts] = literal[int] , identifier[exception_class] = identifier[Exception] , identifier[log] = keyword[None] , identifier[sleeptime] = literal[int] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[func] ):
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_attempts] ):
keyword[try] :
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[exception_class] keyword[as] identifier[e] :
keyword[if] identifier[i] == identifier[num_attempts] - literal[int] :
keyword[raise]
keyword[else] :
keyword[if] identifier[log] :
identifier[log] . identifier[warn] ( literal[string] , identifier[e] )
identifier[sleep] ( identifier[sleeptime] )
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator] | def retry(num_attempts=3, exception_class=Exception, log=None, sleeptime=1):
"""
>>> def fail():
... runs[0] += 1
... raise ValueError()
>>> runs = [0]; retry(sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[3]
>>> runs = [0]; retry(2, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[2]
>>> runs = [0]; retry(exception_class=IndexError, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[1]
>>> logger = DoctestLogger()
>>> runs = [0]; retry(log=logger, sleeptime=0)(fail)()
Traceback (most recent call last):
...
ValueError
>>> runs
[3]
>>> logger.print_logs()
Failed with error ValueError(), trying again
Failed with error ValueError(), trying again
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for i in range(num_attempts):
try:
return func(*args, **kwargs) # depends on [control=['try'], data=[]]
except exception_class as e:
if i == num_attempts - 1:
raise # depends on [control=['if'], data=[]]
else:
if log:
log.warn('Failed with error %r, trying again', e) # depends on [control=['if'], data=[]]
sleep(sleeptime) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['i']]
return wrapper
return decorator |
def remove_perm(perm, user_or_group, forum=None):
""" Remove a permission to a user (anonymous or not) or a group. """
user, group = get_identity(user_or_group)
perm = ForumPermission.objects.get(codename=perm)
if user:
UserForumPermission.objects.filter(
forum=forum,
permission=perm,
user=user if not user.is_anonymous else None,
anonymous_user=user.is_anonymous,
).delete()
if group:
GroupForumPermission.objects.filter(forum=forum, permission=perm, group=group).delete() | def function[remove_perm, parameter[perm, user_or_group, forum]]:
constant[ Remove a permission to a user (anonymous or not) or a group. ]
<ast.Tuple object at 0x7da2044c12a0> assign[=] call[name[get_identity], parameter[name[user_or_group]]]
variable[perm] assign[=] call[name[ForumPermission].objects.get, parameter[]]
if name[user] begin[:]
call[call[name[UserForumPermission].objects.filter, parameter[]].delete, parameter[]]
if name[group] begin[:]
call[call[name[GroupForumPermission].objects.filter, parameter[]].delete, parameter[]] | keyword[def] identifier[remove_perm] ( identifier[perm] , identifier[user_or_group] , identifier[forum] = keyword[None] ):
literal[string]
identifier[user] , identifier[group] = identifier[get_identity] ( identifier[user_or_group] )
identifier[perm] = identifier[ForumPermission] . identifier[objects] . identifier[get] ( identifier[codename] = identifier[perm] )
keyword[if] identifier[user] :
identifier[UserForumPermission] . identifier[objects] . identifier[filter] (
identifier[forum] = identifier[forum] ,
identifier[permission] = identifier[perm] ,
identifier[user] = identifier[user] keyword[if] keyword[not] identifier[user] . identifier[is_anonymous] keyword[else] keyword[None] ,
identifier[anonymous_user] = identifier[user] . identifier[is_anonymous] ,
). identifier[delete] ()
keyword[if] identifier[group] :
identifier[GroupForumPermission] . identifier[objects] . identifier[filter] ( identifier[forum] = identifier[forum] , identifier[permission] = identifier[perm] , identifier[group] = identifier[group] ). identifier[delete] () | def remove_perm(perm, user_or_group, forum=None):
""" Remove a permission to a user (anonymous or not) or a group. """
(user, group) = get_identity(user_or_group)
perm = ForumPermission.objects.get(codename=perm)
if user:
UserForumPermission.objects.filter(forum=forum, permission=perm, user=user if not user.is_anonymous else None, anonymous_user=user.is_anonymous).delete() # depends on [control=['if'], data=[]]
if group:
GroupForumPermission.objects.filter(forum=forum, permission=perm, group=group).delete() # depends on [control=['if'], data=[]] |
def _descend_namespace(caller_globals, name):
"""
Given a globals dictionary, and a name of the form "a.b.c.d", recursively
walk the globals expanding caller_globals['a']['b']['c']['d'] returning
the result. Raises an exception (IndexError) on failure.
"""
names = name.split('.')
cur = caller_globals
for i in names:
if type(cur) is dict:
cur = cur[i]
else:
cur = getattr(cur, i)
return cur | def function[_descend_namespace, parameter[caller_globals, name]]:
constant[
Given a globals dictionary, and a name of the form "a.b.c.d", recursively
walk the globals expanding caller_globals['a']['b']['c']['d'] returning
the result. Raises an exception (IndexError) on failure.
]
variable[names] assign[=] call[name[name].split, parameter[constant[.]]]
variable[cur] assign[=] name[caller_globals]
for taget[name[i]] in starred[name[names]] begin[:]
if compare[call[name[type], parameter[name[cur]]] is name[dict]] begin[:]
variable[cur] assign[=] call[name[cur]][name[i]]
return[name[cur]] | keyword[def] identifier[_descend_namespace] ( identifier[caller_globals] , identifier[name] ):
literal[string]
identifier[names] = identifier[name] . identifier[split] ( literal[string] )
identifier[cur] = identifier[caller_globals]
keyword[for] identifier[i] keyword[in] identifier[names] :
keyword[if] identifier[type] ( identifier[cur] ) keyword[is] identifier[dict] :
identifier[cur] = identifier[cur] [ identifier[i] ]
keyword[else] :
identifier[cur] = identifier[getattr] ( identifier[cur] , identifier[i] )
keyword[return] identifier[cur] | def _descend_namespace(caller_globals, name):
"""
Given a globals dictionary, and a name of the form "a.b.c.d", recursively
walk the globals expanding caller_globals['a']['b']['c']['d'] returning
the result. Raises an exception (IndexError) on failure.
"""
names = name.split('.')
cur = caller_globals
for i in names:
if type(cur) is dict:
cur = cur[i] # depends on [control=['if'], data=[]]
else:
cur = getattr(cur, i) # depends on [control=['for'], data=['i']]
return cur |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.