code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def load(kls, url, getter=None, parser=None, url_load_hook=None, sep=consts.private.SCOPE_SEPARATOR, prim=None, mime_codec=None, resolver=None):
""" load json as a raw App
:param str url: url of path of Swagger API definition
:param getter: customized Getter
:type getter: sub class/instance of Getter
:param parser: the parser to parse the loaded json.
:type parser: pyswagger.base.Context
:param dict app_cache: the cache shared by related App
:param func url_load_hook: hook to patch the url to load json
:param str sep: scope-separater used in this App
:param prim pyswager.primitives.Primitive: factory for primitives in Swagger
:param mime_codec pyswagger.primitives.MimeCodec: MIME codec
:param resolver: pyswagger.resolve.Resolver: customized resolver used as default when none is provided when resolving
:return: the created App object
:rtype: App
:raises ValueError: if url is wrong
:raises NotImplementedError: the swagger version is not supported.
"""
logger.info('load with [{0}]'.format(url))
url = utils.normalize_url(url)
app = kls(url, url_load_hook=url_load_hook, sep=sep, prim=prim, mime_codec=mime_codec, resolver=resolver)
app.__raw, app.__version = app.load_obj(url, getter=getter, parser=parser)
if app.__version not in ['1.2', '2.0']:
raise NotImplementedError('Unsupported Version: {0}'.format(self.__version))
# update scheme if any
p = six.moves.urllib.parse.urlparse(url)
if p.scheme:
app.schemes.append(p.scheme)
return app | load json as a raw App
:param str url: url of path of Swagger API definition
:param getter: customized Getter
:type getter: sub class/instance of Getter
:param parser: the parser to parse the loaded json.
:type parser: pyswagger.base.Context
:param dict app_cache: the cache shared by related App
:param func url_load_hook: hook to patch the url to load json
:param str sep: scope-separater used in this App
:param prim pyswager.primitives.Primitive: factory for primitives in Swagger
:param mime_codec pyswagger.primitives.MimeCodec: MIME codec
:param resolver: pyswagger.resolve.Resolver: customized resolver used as default when none is provided when resolving
:return: the created App object
:rtype: App
:raises ValueError: if url is wrong
:raises NotImplementedError: the swagger version is not supported. |
def muted(*streams):
"""A context manager to redirect stdout and/or stderr to /dev/null.
Examples:
with muted(sys.stdout):
...
with muted(sys.stderr):
...
with muted(sys.stdout, sys.stderr):
...
"""
devnull = open(os.devnull, 'w')
try:
old_streams = [os.dup(s.fileno()) for s in streams]
for s in streams:
os.dup2(devnull.fileno(), s.fileno())
yield
finally:
for o,n in zip(old_streams, streams):
os.dup2(o, n.fileno())
devnull.close() | A context manager to redirect stdout and/or stderr to /dev/null.
Examples:
with muted(sys.stdout):
...
with muted(sys.stderr):
...
with muted(sys.stdout, sys.stderr):
... |
def _handle_input_request(self, msg):
""" Handle requests for raw_input.
"""
self.log.debug("input: %s", msg.get('content', ''))
if self._hidden:
raise RuntimeError('Request for raw input during hidden execution.')
# Make sure that all output from the SUB channel has been processed
# before entering readline mode.
self.kernel_manager.sub_channel.flush()
def callback(line):
self.kernel_manager.stdin_channel.input(line)
if self._reading:
self.log.debug("Got second input request, assuming first was interrupted.")
self._reading = False
self._readline(msg['content']['prompt'], callback=callback) | Handle requests for raw_input. |
def tokenize(args):
"""
Tokenize a string (passed as argument or read from stdin)
segments [--profile=PATH/TO/PROFILE] tokenize [STRING]
"""
if args.profile and not Path(args.profile).exists(): # pragma: no cover
raise ParserError('--profile must be a path for an existing file')
_write(args, Tokenizer(profile=args.profile)(_read(args), column=args.mapping)) | Tokenize a string (passed as argument or read from stdin)
segments [--profile=PATH/TO/PROFILE] tokenize [STRING] |
def _populate_random_tournament_row_col(n, r, row, col):
"""
Populate ndarrays `row` and `col` with directed edge indices
determined by random numbers in `r` for a tournament graph with n
nodes, which has num_edges = n * (n-1) // 2 edges.
Parameters
----------
n : scalar(int)
Number of nodes.
r : ndarray(float, ndim=1)
ndarray of length num_edges containing random numbers in [0, 1).
row, col : ndarray(int, ndim=1)
ndarrays of length num_edges to be modified in place.
"""
k = 0
for i in range(n):
for j in range(i+1, n):
if r[k] < 0.5:
row[k], col[k] = i, j
else:
row[k], col[k] = j, i
k += 1 | Populate ndarrays `row` and `col` with directed edge indices
determined by random numbers in `r` for a tournament graph with n
nodes, which has num_edges = n * (n-1) // 2 edges.
Parameters
----------
n : scalar(int)
Number of nodes.
r : ndarray(float, ndim=1)
ndarray of length num_edges containing random numbers in [0, 1).
row, col : ndarray(int, ndim=1)
ndarrays of length num_edges to be modified in place. |
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app) | Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template. |
def _coarsenImage(image, f):
'''
seems to be a more precise (but slower)
way to down-scale an image
'''
from skimage.morphology import square
from skimage.filters import rank
from skimage.transform._warps import rescale
selem = square(f)
arri = rank.mean(image, selem=selem)
return rescale(arri, 1 / f, order=0) | seems to be a more precise (but slower)
way to down-scale an image |
def get_or_new_from_json_dict_with_types(
data, cls_map, type_key='type'
):
"""Get `cls` object w/ deserialization from json by using type key hint if needed.
If data is instance of one of cls, return data.
Else if data is instance of dict, create instance from dict.
Else, return None.
:param data:
:param cls_map:
:param type_key:
:rtype: object
:return:
"""
if isinstance(data, tuple(cls_map.values())):
return data
elif isinstance(data, dict):
type_val = data[type_key]
if type_val in cls_map:
return cls_map[type_val].new_from_json_dict(data)
return None | Get `cls` object w/ deserialization from json by using type key hint if needed.
If data is instance of one of cls, return data.
Else if data is instance of dict, create instance from dict.
Else, return None.
:param data:
:param cls_map:
:param type_key:
:rtype: object
:return: |
def gen_multi_data(n=5000):
"""
multivariate Logistic problem
"""
X, y = toy_classification(return_X_y=True, n=10000)
lgam = LogisticGAM(s(0) + s(1) + s(2) + s(3) + s(4) + f(5))
lgam.fit(X, y)
plt.figure()
for i, term in enumerate(lgam.terms):
if term.isintercept:
continue
plt.plot(lgam.partial_dependence(term=i))
plt.savefig('imgs/pygam_multi_pdep.png', dpi=300)
plt.figure()
plt.plot(lgam.logs_['deviance'])
plt.savefig('imgs/pygam_multi_deviance.png', dpi=300) | multivariate Logistic problem |
def _releaseConnection(self, dbConn, cursor):
""" Release database connection and cursor; passed as a callback to
ConnectionWrapper
"""
self._logger.debug("Releasing connection")
# Close the cursor
cursor.close()
# ... then close the database connection
dbConn.close()
return | Release database connection and cursor; passed as a callback to
ConnectionWrapper |
def run(self, node, expr=None, lineno=None, with_raise=True):
"""Execute parsed Ast representation for an expression."""
# Note: keep the 'node is None' test: internal code here may run
# run(None) and expect a None in return.
if time.time() - self.start_time > self.max_time:
raise RuntimeError(ERR_MAX_TIME.format(self.max_time))
out = None
if len(self.error) > 0:
return out
if node is None:
return out
if isinstance(node, str):
node = self.parse(node)
if lineno is not None:
self.lineno = lineno
if expr is not None:
self.expr = expr
# get handler for this node:
# on_xxx with handle nodes of type 'xxx', etc
try:
handler = self.node_handlers[node.__class__.__name__.lower()]
except KeyError:
return self.unimplemented(node)
# run the handler: this will likely generate
# recursive calls into this run method.
try:
ret = handler(node)
if isinstance(ret, enumerate):
ret = list(ret)
return ret
except:
if with_raise:
self.raise_exception(node, expr=expr) | Execute parsed Ast representation for an expression. |
def jinja_filter_param_value_str(value, str_quote_style="", bool_is_str=False):
""" Convert a parameter value to string suitable to be passed to an EDA tool
Rules:
- Booleans are represented as 0/1 or "true"/"false" depending on the
bool_is_str argument
- Strings are either passed through or enclosed in the characters specified
in str_quote_style (e.g. '"' or '\\"')
- Everything else (including int, float, etc.) are converted using the str()
function.
"""
if (type(value) == bool) and not bool_is_str:
if (value) == True:
return '1'
else:
return '0'
elif type(value) == str or ((type(value) == bool) and bool_is_str):
return str_quote_style + str(value) + str_quote_style
else:
return str(value) | Convert a parameter value to string suitable to be passed to an EDA tool
Rules:
- Booleans are represented as 0/1 or "true"/"false" depending on the
bool_is_str argument
- Strings are either passed through or enclosed in the characters specified
in str_quote_style (e.g. '"' or '\\"')
- Everything else (including int, float, etc.) are converted using the str()
function. |
def create_subtask(self, cor, name=None, stop_timeout=1.0):
"""Create and add a subtask from a coroutine.
This function will create a BackgroundTask and then
call self.add_subtask() on it.
Args:
cor (coroutine): The coroutine that should be wrapped
in a background task.
name (str): An optional name for the task.
stop_timeout (float): The maximum time to wait for this
subtask to die after stopping it.
Returns:
Backgroundtask: The created subtask.
"""
if self.stopped:
raise InternalError("Cannot add a subtask to a parent that is already stopped")
subtask = BackgroundTask(cor, name, loop=self._loop, stop_timeout=stop_timeout)
self.add_subtask(subtask)
return subtask | Create and add a subtask from a coroutine.
This function will create a BackgroundTask and then
call self.add_subtask() on it.
Args:
cor (coroutine): The coroutine that should be wrapped
in a background task.
name (str): An optional name for the task.
stop_timeout (float): The maximum time to wait for this
subtask to die after stopping it.
Returns:
Backgroundtask: The created subtask. |
def read_string(self, where, max_length=None, force=False):
"""
Read a NUL-terminated concrete buffer from memory. Stops reading at first symbolic byte.
:param int where: Address to read string from
:param int max_length:
The size in bytes to cap the string at, or None [default] for no
limit.
:param force: whether to ignore memory permissions
:return: string read
:rtype: str
"""
s = io.BytesIO()
while True:
c = self.read_int(where, 8, force)
if issymbolic(c) or c == 0:
break
if max_length is not None:
if max_length == 0:
break
max_length = max_length - 1
s.write(Operators.CHR(c))
where += 1
return s.getvalue().decode() | Read a NUL-terminated concrete buffer from memory. Stops reading at first symbolic byte.
:param int where: Address to read string from
:param int max_length:
The size in bytes to cap the string at, or None [default] for no
limit.
:param force: whether to ignore memory permissions
:return: string read
:rtype: str |
def kde_statsmodels_u(data, grid, **kwargs):
"""
Univariate Kernel Density Estimation with Statsmodels
Parameters
----------
data : numpy.array
Data points used to compute a density estimator. It
has `n x 1` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x 1` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions
"""
kde = KDEUnivariate(data)
kde.fit(**kwargs)
return kde.evaluate(grid) | Univariate Kernel Density Estimation with Statsmodels
Parameters
----------
data : numpy.array
Data points used to compute a density estimator. It
has `n x 1` dimensions, representing n points and p
variables.
grid : numpy.array
Data points at which the desity will be estimated. It
has `m x 1` dimensions, representing m points and p
variables.
Returns
-------
out : numpy.array
Density estimate. Has `m x 1` dimensions |
def get_commands(self, source=None):
"""Return a string containing multiple `reStructuredText`
replacements with the substitutions currently defined.
Some examples based on the subpackage |optiontools|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> from hydpy.core import optiontools
>>> substituter.add_module(optiontools)
When calling |Substituter.get_commands| with the `source`
argument, the complete `short2long` and `medium2long` mappings
are translated into replacement commands (only a few of them
are shown):
>>> print(substituter.get_commands())
.. |Options.autocompile| replace:: \
:const:`~hydpy.core.optiontools.Options.autocompile`
.. |Options.checkseries| replace:: \
:const:`~hydpy.core.optiontools.Options.checkseries`
...
.. |optiontools.Options.warntrim| replace:: \
:const:`~hydpy.core.optiontools.Options.warntrim`
.. |optiontools.Options| replace:: \
:class:`~hydpy.core.optiontools.Options`
Through passing a string (usually the source code of a file
to be documented), only the replacement commands relevant for
this string are translated:
>>> from hydpy.core import objecttools
>>> import inspect
>>> source = inspect.getsource(objecttools)
>>> print(substituter.get_commands(source))
.. |Options.reprdigits| replace:: \
:const:`~hydpy.core.optiontools.Options.reprdigits`
"""
commands = []
for key, value in self:
if (source is None) or (key in source):
commands.append('.. %s replace:: %s' % (key, value))
return '\n'.join(commands) | Return a string containing multiple `reStructuredText`
replacements with the substitutions currently defined.
Some examples based on the subpackage |optiontools|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> from hydpy.core import optiontools
>>> substituter.add_module(optiontools)
When calling |Substituter.get_commands| with the `source`
argument, the complete `short2long` and `medium2long` mappings
are translated into replacement commands (only a few of them
are shown):
>>> print(substituter.get_commands())
.. |Options.autocompile| replace:: \
:const:`~hydpy.core.optiontools.Options.autocompile`
.. |Options.checkseries| replace:: \
:const:`~hydpy.core.optiontools.Options.checkseries`
...
.. |optiontools.Options.warntrim| replace:: \
:const:`~hydpy.core.optiontools.Options.warntrim`
.. |optiontools.Options| replace:: \
:class:`~hydpy.core.optiontools.Options`
Through passing a string (usually the source code of a file
to be documented), only the replacement commands relevant for
this string are translated:
>>> from hydpy.core import objecttools
>>> import inspect
>>> source = inspect.getsource(objecttools)
>>> print(substituter.get_commands(source))
.. |Options.reprdigits| replace:: \
:const:`~hydpy.core.optiontools.Options.reprdigits` |
def clean_image(self):
"""
It seems like in Django 1.5 something has changed.
When Django tries to validate the form, it checks if the generated
filename fit into the max_length. But at this point, self.instance.user
is not yet set so our filename generation function cannot create
the new file path because it needs the user id. Setting
self.instance.user at this point seems to work as a workaround.
"""
self.instance.user = self.user
data = self.cleaned_data.get('image')
return data | It seems like in Django 1.5 something has changed.
When Django tries to validate the form, it checks if the generated
filename fit into the max_length. But at this point, self.instance.user
is not yet set so our filename generation function cannot create
the new file path because it needs the user id. Setting
self.instance.user at this point seems to work as a workaround. |
def update_definition_properties(self, document, project, definition_id):
"""UpdateDefinitionProperties.
[Preview API] Updates properties for a definition.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.build.models.[JsonPatchOperation]>` document: A json-patch document describing the properties to update.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:rtype: :class:`<object> <azure.devops.v5_0.build.models.object>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='PATCH',
location_id='d9826ad7-2a68-46a9-a6e9-677698777895',
version='5.0-preview.1',
route_values=route_values,
content=content,
media_type='application/json-patch+json')
return self._deserialize('object', response) | UpdateDefinitionProperties.
[Preview API] Updates properties for a definition.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.build.models.[JsonPatchOperation]>` document: A json-patch document describing the properties to update.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:rtype: :class:`<object> <azure.devops.v5_0.build.models.object>` |
def set_attribute_xsi_type(self, el, **kw):
'''if typed, set the xsi:type attribute
Paramters:
el -- MessageInterface representing the element
'''
if kw.get('typed', self.typed):
namespaceURI,typeName = kw.get('type', _get_xsitype(self))
if namespaceURI and typeName:
self.logger.debug("attribute: (%s, %s)", namespaceURI, typeName)
el.setAttributeType(namespaceURI, typeName) | if typed, set the xsi:type attribute
Paramters:
el -- MessageInterface representing the element |
def complete_definition(subj: Node,
source_graph: Graph,
target_graph: Optional[Graph]=None) -> PrettyGraph:
"""
Return the transitive closure of subject.
:param subj: URI or BNode for subject
:param source_graph: Graph containing defininition
:param target_graph: return graph (for recursion)
:return: target_graph
"""
if target_graph is None:
target_graph = PrettyGraph()
for p, o in source_graph.predicate_objects(subj):
target_graph.add((subj, p, o))
if isinstance(o, BNode):
complete_definition(o, source_graph, target_graph)
return target_graph | Return the transitive closure of subject.
:param subj: URI or BNode for subject
:param source_graph: Graph containing defininition
:param target_graph: return graph (for recursion)
:return: target_graph |
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False | Return True if the extracted wheel in wheeldir should go into purelib. |
def put(self, measurementId):
"""
Initiates a new measurement. Accepts a json payload with the following attributes;
* duration: in seconds
* startTime OR delay: a date in YMD_HMS format or a delay in seconds
* description: some free text information about the measurement
:return:
"""
json = request.get_json()
try:
start = self._calculateStartTime(json)
except ValueError:
return 'invalid date format in request', 400
duration = json['duration'] if 'duration' in json else 10
if start is None:
# should never happen but just in case
return 'no start time', 400
else:
scheduled, message = self._measurementController.schedule(measurementId, duration, start,
description=json.get('description'))
return message, 200 if scheduled else 400 | Initiates a new measurement. Accepts a json payload with the following attributes;
* duration: in seconds
* startTime OR delay: a date in YMD_HMS format or a delay in seconds
* description: some free text information about the measurement
:return: |
def teal(theTask, parent=None, loadOnly=False, returnAs="dict",
canExecute=True, strict=False, errorsToTerm=False,
autoClose=True, defaults=False):
# overrides=None):
""" Start the GUI session, or simply load a task's ConfigObj. """
if loadOnly: # this forces returnAs="dict"
obj = None
try:
obj = cfgpars.getObjectFromTaskArg(theTask, strict, defaults)
# obj.strictUpdate(overrides) # ! would need to re-verify after this !
except Exception as re: # catches RuntimeError and KeyError and ...
# Since we are loadOnly, don't pop up the GUI for this
if strict:
raise
else:
print(re.message.replace('\n\n','\n'))
return obj
else:
assert returnAs in ("dict", "status", None), \
"Invalid value for returnAs arg: "+str(returnAs)
dlg = None
try:
# if setting to all defaults, go ahead and load it here, pre-GUI
if defaults:
theTask = cfgpars.getObjectFromTaskArg(theTask, strict, True)
# now create/run the dialog
dlg = ConfigObjEparDialog(theTask, parent=parent,
autoClose=autoClose,
strict=strict,
canExecute=canExecute)
# overrides=overrides)
except cfgpars.NoCfgFileError as ncf:
log_last_error()
if errorsToTerm:
print(str(ncf).replace('\n\n','\n'))
else:
popUpErr(parent=parent,message=str(ncf),title="Unfound Task")
except Exception as re: # catches RuntimeError and KeyError and ...
log_last_error()
if errorsToTerm:
print(re.message.replace('\n\n','\n'))
else:
popUpErr(parent=parent, message=re.message,
title="Bad Parameters")
# Return, depending on the mode in which we are operating
if returnAs is None:
return
if returnAs == "dict":
if dlg is None or dlg.canceled():
return None
else:
return dlg.getTaskParsObj()
# else, returnAs == "status"
if dlg is None or dlg.canceled():
return -1
if dlg.executed():
return 1
return 0 | Start the GUI session, or simply load a task's ConfigObj. |
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(s):
return np.fromstring(s.strip('{}'), dtype='float', sep=',')
def default(s):
return s.strip('{}')
parse = {'wavelength': parsevec,
'fwhm': parsevec}
parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()}
return parsed_meta | Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values |
def get_hops(self, start, end=None, forward=True):
"""
Computes the hop distance to all nodes centered around a specified node.
First order neighbours are at hop 1, their neigbours are at hop 2 etc.
Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward
parameter. If the distance between all neighbouring nodes is 1 the hop
number corresponds to the shortest distance between the nodes.
:param start: the starting node
:param end: ending node (optional). When not specified will search the whole graph.
:param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
:return: returns a list of tuples where each tuple contains the node and the hop.
Typical usage::
>>> print graph.get_hops(1, 8)
>>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
# node 1 is at 0 hops
# node 2 is at 1 hop
# ...
# node 8 is at 5 hops
"""
if forward:
return list(self._iterbfs(start=start, end=end, forward=True))
else:
return list(self._iterbfs(start=start, end=end, forward=False)) | Computes the hop distance to all nodes centered around a specified node.
First order neighbours are at hop 1, their neigbours are at hop 2 etc.
Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward
parameter. If the distance between all neighbouring nodes is 1 the hop
number corresponds to the shortest distance between the nodes.
:param start: the starting node
:param end: ending node (optional). When not specified will search the whole graph.
:param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
:return: returns a list of tuples where each tuple contains the node and the hop.
Typical usage::
>>> print graph.get_hops(1, 8)
>>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
# node 1 is at 0 hops
# node 2 is at 1 hop
# ...
# node 8 is at 5 hops |
def write_observation_zone(self, **kw):
"""
Write an observation zone declaration to the file::
writer.write_observation_zone(
type=ObservationZoneType.CYLINDER,
radius=30000,
)
# <ObservationZone type="Cylinder" radius="30000"/>
The required parameters depend on the type parameter. Different
observation zone types require different parameters.
:param type: observation zone type (one of the constants in
:class:`~aerofiles.xcsoar.constants.ObservationZoneType`)
:param length: length of the line
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.LINE`)
:param radius: (outer) radius of the observation zone
(used with types
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CYLINDER`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SYMMETRIC_QUADRANT` and
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param inner_radius: inner radius of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param angle: angle of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param start_radial: start radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
:param end_radial: end radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
"""
assert 'type' in kw
if kw['type'] == ObservationZoneType.LINE:
assert 'length' in kw
elif kw['type'] == ObservationZoneType.CYLINDER:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.SECTOR:
assert 'radius' in kw
assert 'start_radial' in kw
assert 'end_radial' in kw
elif kw['type'] == ObservationZoneType.SYMMETRIC_QUADRANT:
assert 'radius' in kw
elif kw['type'] == ObservationZoneType.CUSTOM_KEYHOLE:
assert 'radius' in kw
assert 'inner_radius' in kw
assert 'angle' in kw
self.write_tag('ObservationZone', **kw) | Write an observation zone declaration to the file::
writer.write_observation_zone(
type=ObservationZoneType.CYLINDER,
radius=30000,
)
# <ObservationZone type="Cylinder" radius="30000"/>
The required parameters depend on the type parameter. Different
observation zone types require different parameters.
:param type: observation zone type (one of the constants in
:class:`~aerofiles.xcsoar.constants.ObservationZoneType`)
:param length: length of the line
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.LINE`)
:param radius: (outer) radius of the observation zone
(used with types
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CYLINDER`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`,
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SYMMETRIC_QUADRANT` and
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param inner_radius: inner radius of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param angle: angle of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.CUSTOM_KEYHOLE`)
:param start_radial: start radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`)
:param end_radial: end radial of the observation zone
(only used with type
:const:`~aerofiles.xcsoar.constants.ObservationZoneType.SECTOR`) |
def _call_scope(self, scope, *args, **kwargs):
"""
Call the given model scope.
:param scope: The scope to call
:type scope: str
"""
query = self.get_query()
# We will keep track of how many wheres are on the query before running the
# scope so that we can properly group the added scope constraints in the
# query as their own isolated nested where statement and avoid issues.
original_where_count = len(query.wheres)
result = getattr(self._model, scope)(self, *args, **kwargs)
if self._should_nest_wheres_for_scope(query, original_where_count):
self._nest_wheres_for_scope(
query, [0, original_where_count, len(query.wheres)]
)
return result or self | Call the given model scope.
:param scope: The scope to call
:type scope: str |
def chisquare(observe, expect, error, ddof, verbose=True):
"""
Finds the reduced chi square difference of *observe* and *expect* with a given *error* and *ddof* degrees of freedom.
*verbose* flag determines if the reduced chi square is printed to the terminal.
"""
chisq = 0
error = error.flatten()
observe = observe.flatten()
expect = expect.flatten()
for i, el in enumerate(observe):
chisq = chisq + _np.power((el - expect[i]) / error[i], 2)
red_chisq = chisq / (len(observe) - ddof)
if verbose:
# print 'Chi-Squared is {}.'.format(chisq)
print('Reduced Chi-Squared is {}.'.format(red_chisq))
return red_chisq | Finds the reduced chi square difference of *observe* and *expect* with a given *error* and *ddof* degrees of freedom.
*verbose* flag determines if the reduced chi square is printed to the terminal. |
def process_exception(self, e, uuid, routing_key, body, tb=None):
"""
Callback called when exception was raised.
This method serializes the exception and sends it over AMQP back
to caller.
Args:
e (obj): Instance of the exception.
uuid (str): UUID of the message that caused the exception to raise.
routing_key (str): Which routing key was used.
body (str): Body of the exception - the longer text.
tb (str, default None): Traceback (stacktrace)v of the exception.
"""
# get informations about message
msg = e.message if hasattr(e, "message") else str(e)
exception_type = str(e.__class__)
exception_name = str(e.__class__.__name__)
print "Sending exception %s: %s for UUID %s." % (
exception_name,
msg,
uuid
)
self.sendMessage(
self.output_exchange,
routing_key,
str(body),
properties=pika.BasicProperties(
content_type="application/text",
delivery_mode=2,
headers={
"exception": msg,
"exception_type": exception_type,
"exception_name": exception_name,
"traceback": tb,
"UUID": uuid
}
)
) | Callback called when exception was raised.
This method serializes the exception and sends it over AMQP back
to caller.
Args:
e (obj): Instance of the exception.
uuid (str): UUID of the message that caused the exception to raise.
routing_key (str): Which routing key was used.
body (str): Body of the exception - the longer text.
tb (str, default None): Traceback (stacktrace)v of the exception. |
def get_bounding_box(points):
"""Get the bounding box of a list of points.
Parameters
----------
points : list of points
Returns
-------
BoundingBox
"""
assert len(points) > 0, "At least one point has to be given."
min_x, max_x = points[0]['x'], points[0]['x']
min_y, max_y = points[0]['y'], points[0]['y']
for point in points:
min_x, max_x = min(min_x, point['x']), max(max_x, point['x'])
min_y, max_y = min(min_y, point['y']), max(max_y, point['y'])
p1 = Point(min_x, min_y)
p2 = Point(max_x, max_y)
return BoundingBox(p1, p2) | Get the bounding box of a list of points.
Parameters
----------
points : list of points
Returns
-------
BoundingBox |
def ising_simulated_annealing(h, J, beta_range=None, num_sweeps=1000):
"""Tries to find the spins that minimize the given Ising problem.
Args:
h (dict): A dictionary of the linear biases in the Ising
problem. Should be of the form {v: bias, ...} for each
variable v in the Ising problem.
J (dict): A dictionary of the quadratic biases in the Ising
problem. Should be a dict of the form {(u, v): bias, ...}
for each edge (u, v) in the Ising problem. If J[(u, v)] and
J[(v, u)] exist then the biases are added.
beta_range (tuple, optional): A 2-tuple defining the
beginning and end of the beta schedule (beta is the
inverse temperature). The schedule is applied linearly
in beta. Default is chosen based on the total bias associated
with each node.
num_sweeps (int, optional): The number of sweeps or steps.
Default is 1000.
Returns:
dict: A sample as a dictionary of spins.
float: The energy of the returned sample.
Raises:
TypeError: If the values in `beta_range` are not numeric.
TypeError: If `num_sweeps` is not an int.
TypeError: If `beta_range` is not a tuple.
ValueError: If the values in `beta_range` are not positive.
ValueError: If `beta_range` is not a 2-tuple.
ValueError: If `num_sweeps` is not positive.
https://en.wikipedia.org/wiki/Simulated_annealing
"""
if beta_range is None:
beta_init = .1
sigmas = {v: abs(h[v]) for v in h}
for u, v in J:
sigmas[u] += abs(J[(u, v)])
sigmas[v] += abs(J[(u, v)])
if sigmas:
beta_final = 2. * max(itervalues(sigmas))
else:
beta_final = 0.0
else:
if not isinstance(beta_range, (tuple, list)):
raise TypeError("'beta_range' should be a tuple of length 2")
if any(not isinstance(b, (int, float)) for b in beta_range):
raise TypeError("values in 'beta_range' should be numeric")
if any(b <= 0 for b in beta_range):
raise ValueError("beta values in 'beta_range' should be positive")
if len(beta_range) != 2:
raise ValueError("'beta_range' should be a tuple of length 2")
beta_init, beta_final = beta_range
if not isinstance(num_sweeps, int):
raise TypeError("'sweeps' should be a positive int")
if num_sweeps <= 0:
raise ValueError("'sweeps' should be a positive int")
# We want the schedule to be linear in beta (inverse temperature)
betas = [beta_init + i * (beta_final - beta_init) / (num_sweeps - 1.)
for i in range(num_sweeps)]
# set up the adjacency matrix. We can rely on every node in J already being in h
adj = {n: set() for n in h}
for n0, n1 in J:
adj[n0].add(n1)
adj[n1].add(n0)
# we will use a vertex coloring for the graph and update the nodes by color. A quick
# greedy coloring will be sufficient.
__, colors = greedy_coloring(adj)
# let's make our initial guess (randomly)
spins = {v: random.choice((-1, 1)) for v in h}
# there are exactly as many betas as sweeps
for beta in betas:
# we want to know the gain in energy for flipping each of the spins.
# We can calculate all of the linear terms simultaneously
energy_diff_h = {v: -2 * spins[v] * h[v] for v in h}
# for each color, do updates
for color in colors:
nodes = colors[color]
# we now want to know the energy change for flipping the spins within
# the color class
energy_diff_J = {}
for v0 in nodes:
ediff = 0
for v1 in adj[v0]:
if (v0, v1) in J:
ediff += spins[v0] * spins[v1] * J[(v0, v1)]
if (v1, v0) in J:
ediff += spins[v0] * spins[v1] * J[(v1, v0)]
energy_diff_J[v0] = -2. * ediff
# now decide whether to flip spins according to the
# following scheme:
# p ~ Uniform(0, 1)
# log(p) < -beta * (energy_diff)
for v in nodes:
logp = math.log(random.uniform(0, 1))
if logp < -1. * beta * (energy_diff_h[v] + energy_diff_J[v]):
# flip the variable in the spins
spins[v] *= -1
return spins, ising_energy(spins, h, J) | Tries to find the spins that minimize the given Ising problem.
Args:
h (dict): A dictionary of the linear biases in the Ising
problem. Should be of the form {v: bias, ...} for each
variable v in the Ising problem.
J (dict): A dictionary of the quadratic biases in the Ising
problem. Should be a dict of the form {(u, v): bias, ...}
for each edge (u, v) in the Ising problem. If J[(u, v)] and
J[(v, u)] exist then the biases are added.
beta_range (tuple, optional): A 2-tuple defining the
beginning and end of the beta schedule (beta is the
inverse temperature). The schedule is applied linearly
in beta. Default is chosen based on the total bias associated
with each node.
num_sweeps (int, optional): The number of sweeps or steps.
Default is 1000.
Returns:
dict: A sample as a dictionary of spins.
float: The energy of the returned sample.
Raises:
TypeError: If the values in `beta_range` are not numeric.
TypeError: If `num_sweeps` is not an int.
TypeError: If `beta_range` is not a tuple.
ValueError: If the values in `beta_range` are not positive.
ValueError: If `beta_range` is not a 2-tuple.
ValueError: If `num_sweeps` is not positive.
https://en.wikipedia.org/wiki/Simulated_annealing |
def description(self, description):
"""
Updates the security labels description.
Args:
description:
"""
self._data['description'] = description
request = self._base_request
request['description'] = description
return self._tc_requests.update(request, owner=self.owner) | Updates the security labels description.
Args:
description: |
def authors(self):
"""A list of namedtuples storing author information,
where each namedtuple corresponds to one author.
The information in each namedtuple is (name surname initials id url).
All entries are strings.
"""
out = []
order = 'name surname initials id url'
auth = namedtuple('Author', order)
for author in self._citeInfoMatrix.get('author'):
author = {k.split(":", 1)[-1]: v for k, v in author.items()}
new = auth(name=author.get('index-name'), id=author.get('authid'),
surname=author.get('surname'),
initials=author.get('initials'),
url=author.get('author-url'))
out.append(new)
return out or None | A list of namedtuples storing author information,
where each namedtuple corresponds to one author.
The information in each namedtuple is (name surname initials id url).
All entries are strings. |
def refresh(self, conditional=False):
"""Re-retrieve the information for this object and returns the
refreshed instance.
:param bool conditional: If True, then we will search for a stored
header ('Last-Modified', or 'ETag') on the object and send that
as described in the `Conditional Requests`_ section of the docs
:returns: self
The reasoning for the return value is the following example: ::
repos = [r.refresh() for r in g.iter_repos('kennethreitz')]
Without the return value, that would be an array of ``None``'s and you
would otherwise have to do: ::
repos = [r for i in g.iter_repos('kennethreitz')]
[r.refresh() for r in repos]
Which is really an anti-pattern.
.. versionchanged:: 0.5
.. _Conditional Requests:
http://developer.github.com/v3/#conditional-requests
"""
headers = {}
if conditional:
if self.last_modified:
headers['If-Modified-Since'] = self.last_modified
elif self.etag:
headers['If-None-Match'] = self.etag
headers = headers or None
json = self._json(self._get(self._api, headers=headers), 200)
if json is not None:
self.__init__(json, self._session)
return self | Re-retrieve the information for this object and returns the
refreshed instance.
:param bool conditional: If True, then we will search for a stored
header ('Last-Modified', or 'ETag') on the object and send that
as described in the `Conditional Requests`_ section of the docs
:returns: self
The reasoning for the return value is the following example: ::
repos = [r.refresh() for r in g.iter_repos('kennethreitz')]
Without the return value, that would be an array of ``None``'s and you
would otherwise have to do: ::
repos = [r for i in g.iter_repos('kennethreitz')]
[r.refresh() for r in repos]
Which is really an anti-pattern.
.. versionchanged:: 0.5
.. _Conditional Requests:
http://developer.github.com/v3/#conditional-requests |
def release(self, shortname):
"""
Get a specific release by its shortname.
:param shortname: str, eg. "ceph-3-0"
:returns: deferred that when fired returns a Release (Munch, dict-like)
object representing this release.
:raises: ReleaseNotFoundException if this release does not exist.
"""
url = 'api/v6/releases/?shortname=%s' % shortname
releases = yield self._get(url)
# Note, even if this shortname does not exist, _get() will not errback
# for this url. It simply returns an empty list. So check that here:
if not releases:
raise ReleaseNotFoundException('no release %s' % shortname)
release = Release.fromDict(releases[0])
release.connection = self
defer.returnValue(release) | Get a specific release by its shortname.
:param shortname: str, eg. "ceph-3-0"
:returns: deferred that when fired returns a Release (Munch, dict-like)
object representing this release.
:raises: ReleaseNotFoundException if this release does not exist. |
def make_hidden(self, request, queryset):
"""
Set entries selected as hidden.
"""
queryset.update(status=HIDDEN)
EntryPublishedVectorBuilder().cache_flush()
self.message_user(
request, _('The selected entries are now marked as hidden.')) | Set entries selected as hidden. |
def remove(self, uuid, project=None):
"""Remove a task from Taskwarrior
uuid -- the UID of the task
project -- not used
"""
uuid = uuid.split('@')[0]
with self._lock:
run(['task', 'rc.verbose=nothing', 'rc.data.location={self._data_location}'.format(**locals()), 'rc.confirmation=no', uuid, 'delete']) | Remove a task from Taskwarrior
uuid -- the UID of the task
project -- not used |
def build_template(self, template, template_file, package):
"""
Compile the cheetah template in src into a python file in build
"""
try:
from Cheetah.Compiler import Compiler
except ImportError:
self.announce("unable to import Cheetah.Compiler, build failed")
raise
else:
comp = Compiler(file=template_file, moduleName=template)
# load configuration if it exists
conf_fn = DEFAULT_CONFIG
if exists(conf_fn):
with open(conf_fn, "rt") as config:
comp.updateSettingsFromConfigFileObj(config)
# and just why can't I configure these?
comp.setShBang("")
comp.addModuleHeader("pylint: disable=C,W,R,F")
outfd = join(self.build_lib, *package.split("."))
outfn = join(outfd, template + ".py")
if not exists(outfd):
makedirs(outfd)
if newer(template_file, outfn):
self.announce("compiling %s -> %s" % (template_file, outfd), 2)
with open(outfn, "w") as output:
output.write(str(comp)) | Compile the cheetah template in src into a python file in build |
def histogram(self, counts, bin_edges, linestyle='solid'):
"""Plot a polar histogram.
The user needs to supply the histogram. This method only plots
the results. You can use NumPy's histogram function.
:param counts: array containing the count values.
:param bin_edges: array containing the bin edges in degrees
(or radians).
:param linestyle: the line style used to connect the data points.
May be None, or any line style accepted by TikZ (e.g. solid,
dashed, dotted, thick, or even combinations like
"red,thick,dashed").
Example::
>>> plot = artist.PolarPlot()
>>> x = np.random.uniform(0, 360, size=1000)
>>> n, bins = np.histogram(x, bins=np.linspace(0, 360, 37))
>>> plot.histogram(n, bins)
"""
if len(bin_edges) - 1 != len(counts):
raise RuntimeError(
'The length of bin_edges should be length of counts + 1')
x = []
y = []
if self.use_radians:
circle = 2 * np.pi
else:
circle = 360.
step = circle / 1800.
for i in range(len(bin_edges) - 1):
for bin_edge in np.arange(bin_edges[i], bin_edges[i + 1],
step=step):
x.append(bin_edge)
y.append(counts[i])
x.append(bin_edges[i + 1])
y.append(counts[i])
# If last edge is same as first bin edge, connect the ends.
if bin_edges[-1] % circle == bin_edges[0] % circle:
x.append(bin_edges[0])
y.append(counts[0])
self.plot(x, y, mark=None, linestyle=linestyle) | Plot a polar histogram.
The user needs to supply the histogram. This method only plots
the results. You can use NumPy's histogram function.
:param counts: array containing the count values.
:param bin_edges: array containing the bin edges in degrees
(or radians).
:param linestyle: the line style used to connect the data points.
May be None, or any line style accepted by TikZ (e.g. solid,
dashed, dotted, thick, or even combinations like
"red,thick,dashed").
Example::
>>> plot = artist.PolarPlot()
>>> x = np.random.uniform(0, 360, size=1000)
>>> n, bins = np.histogram(x, bins=np.linspace(0, 360, 37))
>>> plot.histogram(n, bins) |
def markowitz_portfolio(cov_mat, exp_rets, target_ret,
allow_short=False, market_neutral=False):
"""
Computes a Markowitz portfolio.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
exp_rets: pandas.Series
Expected asset returns (often historical returns).
target_ret: float
Target return of portfolio.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
market_neutral: bool, optional
If 'False' sum of weights equals one.
If 'True' sum of weights equal zero, i.e. create a
market neutral portfolio (implies allow_short=True).
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
if not isinstance(exp_rets, pd.Series):
raise ValueError("Expected returns is not a Series")
if not isinstance(target_ret, float):
raise ValueError("Target return is not a float")
if not cov_mat.index.equals(exp_rets.index):
raise ValueError("Indices do not match")
if market_neutral and not allow_short:
warnings.warn("A market neutral portfolio implies shorting")
allow_short=True
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# exp_rets*x >= target_ret and x >= 0
G = opt.matrix(np.vstack((-exp_rets.values,
-np.identity(n))))
h = opt.matrix(np.vstack((-target_ret,
+np.zeros((n, 1)))))
else:
# exp_rets*x >= target_ret
G = opt.matrix(-exp_rets.values).T
h = opt.matrix(-target_ret)
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
if not market_neutral:
b = opt.matrix(1.0)
else:
b = opt.matrix(0.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights | Computes a Markowitz portfolio.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
exp_rets: pandas.Series
Expected asset returns (often historical returns).
target_ret: float
Target return of portfolio.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
market_neutral: bool, optional
If 'False' sum of weights equals one.
If 'True' sum of weights equal zero, i.e. create a
market neutral portfolio (implies allow_short=True).
Returns
-------
weights: pandas.Series
Optimal asset weights. |
def scan_file(path):
"""
Scan `path` for viruses using ``clamd`` or ``clamscan`` (depends on
:attr:`settings.USE_CLAMD`.
Args:
path (str): Relative or absolute path of file/directory you need to
scan.
Returns:
dict: ``{filename: ("FOUND", "virus type")}`` or blank dict.
Raises:
ValueError: When the server is not running.
AssertionError: When the internal file doesn't exists.
"""
path = os.path.abspath(path)
if settings.USE_CLAMD:
return clamd.scan_file(path)
else:
return clamscan.scan_file(path) | Scan `path` for viruses using ``clamd`` or ``clamscan`` (depends on
:attr:`settings.USE_CLAMD`.
Args:
path (str): Relative or absolute path of file/directory you need to
scan.
Returns:
dict: ``{filename: ("FOUND", "virus type")}`` or blank dict.
Raises:
ValueError: When the server is not running.
AssertionError: When the internal file doesn't exists. |
def _get_line_offset(self):
"""Get line offset for the current segment
Read line offset from the file and adapt it to the current segment
or half disk scan so that
y(l) ~ l - loff
because this is what get_geostationary_area_extent() expects.
"""
# Get line offset from the file
nlines = int(self.mda['number_of_lines'])
loff = np.float32(self.mda['loff'])
# Adapt it to the current segment
if self.is_segmented:
# loff in the file specifies the offset of the full disk image
# centre (1375/2750 for VIS/IR)
segment_number = self.mda['segment_sequence_number'] - 1
loff -= (self.mda['total_no_image_segm'] - segment_number - 1) * nlines
elif self.area_id in (NORTH_HEMIS, SOUTH_HEMIS):
# loff in the file specifies the start line of the half disk image
# in the full disk image
loff = nlines - loff
elif self.area_id == UNKNOWN_AREA:
logger.error('Cannot compute line offset for unknown area')
return loff | Get line offset for the current segment
Read line offset from the file and adapt it to the current segment
or half disk scan so that
y(l) ~ l - loff
because this is what get_geostationary_area_extent() expects. |
def page_not_found(request, template_name="errors/404.html"):
"""
Mimics Django's 404 handler but with a different template path.
"""
context = {
"STATIC_URL": settings.STATIC_URL,
"request_path": request.path,
}
t = get_template(template_name)
return HttpResponseNotFound(t.render(context, request)) | Mimics Django's 404 handler but with a different template path. |
def non_rotational_device(self, name, controller_port, device, non_rotational):
"""Sets a flag in the device information which indicates that the medium
is not based on rotational technology, i.e. that the access times are
more or less independent of the position on the medium. This may or may
not be supported by a particular drive, and is silently ignored in the
latter case. At the moment only hard disks (which is a misnomer in this
context) accept this setting. Changing the setting while the VM is
running is forbidden. The device must already exist; see
:py:func:`IMachine.attach_device` for how to attach a new device.
The @a controllerPort and @a device parameters specify the device slot and
have have the same meaning as with :py:func:`IMachine.attach_device` .
in name of type str
Name of the storage controller.
in controller_port of type int
Storage controller port.
in device of type int
Device slot in the given port.
in non_rotational of type bool
New value for the non-rotational device flag.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to modify an unregistered virtual machine.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(controller_port, baseinteger):
raise TypeError("controller_port can only be an instance of type baseinteger")
if not isinstance(device, baseinteger):
raise TypeError("device can only be an instance of type baseinteger")
if not isinstance(non_rotational, bool):
raise TypeError("non_rotational can only be an instance of type bool")
self._call("nonRotationalDevice",
in_p=[name, controller_port, device, non_rotational]) | Sets a flag in the device information which indicates that the medium
is not based on rotational technology, i.e. that the access times are
more or less independent of the position on the medium. This may or may
not be supported by a particular drive, and is silently ignored in the
latter case. At the moment only hard disks (which is a misnomer in this
context) accept this setting. Changing the setting while the VM is
running is forbidden. The device must already exist; see
:py:func:`IMachine.attach_device` for how to attach a new device.
The @a controllerPort and @a device parameters specify the device slot and
have have the same meaning as with :py:func:`IMachine.attach_device` .
in name of type str
Name of the storage controller.
in controller_port of type int
Storage controller port.
in device of type int
Device slot in the given port.
in non_rotational of type bool
New value for the non-rotational device flag.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to modify an unregistered virtual machine.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state. |
def alliance(self) -> Union[EveAllianceInfo, None]:
"""
Pseudo foreign key from alliance_id to EveAllianceInfo
:raises: EveAllianceInfo.DoesNotExist
:return: EveAllianceInfo or None
"""
if self.alliance_id is None:
return None
return EveAllianceInfo.objects.get(alliance_id=self.alliance_id) | Pseudo foreign key from alliance_id to EveAllianceInfo
:raises: EveAllianceInfo.DoesNotExist
:return: EveAllianceInfo or None |
def find(key):
"""Return the value associated with a key.
If there is no value with the given key, returns ``None``.
"""
docs = list(collection.find({KEY_FIELD: key}))
# Return None if we didn't find anything.
if not docs:
return None
pickled_value = docs[0][VALUE_FIELD]
# Unpickle and return the value.
return pickle.loads(pickled_value) | Return the value associated with a key.
If there is no value with the given key, returns ``None``. |
def _tf_repeat(self, a, repeats):
"""Tensorflow version of np.repeat for 1D"""
# https://github.com/tensorflow/tensorflow/issues/8521
if len(a.get_shape()) != 1:
raise AssertionError("This is not a 1D Tensor")
a = tf.expand_dims(a, -1)
a = tf.tile(a, [1, repeats])
a = self.tf_flatten(a)
return a | Tensorflow version of np.repeat for 1D |
def file_and_line(self):
"""Return the filename and line number where this rule originally
appears, in the form "foo.scss:3". Used for error messages.
"""
ret = "%s:%d" % (self.source_file.path, self.lineno)
if self.from_source_file:
ret += " (%s:%d)" % (self.from_source_file.path, self.from_lineno)
return ret | Return the filename and line number where this rule originally
appears, in the form "foo.scss:3". Used for error messages. |
def dump(voevent, file, pretty_print=True, xml_declaration=True):
"""Writes the voevent to the file object.
e.g.::
with open('/tmp/myvoevent.xml','wb') as f:
voeventparse.dump(v, f)
Args:
voevent(:class:`Voevent`): Root node of the VOevent etree.
file (io.IOBase): An open (binary mode) file object for writing.
pretty_print
pretty_print(bool): See :func:`dumps`
xml_declaration(bool): See :func:`dumps`
"""
file.write(dumps(voevent, pretty_print, xml_declaration)) | Writes the voevent to the file object.
e.g.::
with open('/tmp/myvoevent.xml','wb') as f:
voeventparse.dump(v, f)
Args:
voevent(:class:`Voevent`): Root node of the VOevent etree.
file (io.IOBase): An open (binary mode) file object for writing.
pretty_print
pretty_print(bool): See :func:`dumps`
xml_declaration(bool): See :func:`dumps` |
def _get_branches(self, closed=False):
"""
Get's branches for this repository
Returns only not closed branches by default
:param closed: return also closed branches for mercurial
"""
if self._empty:
return {}
def _branchtags(localrepo):
"""
Patched version of mercurial branchtags to not return the closed
branches
:param localrepo: locarepository instance
"""
bt = {}
bt_closed = {}
for bn, heads in localrepo.branchmap().iteritems():
tip = heads[-1]
if 'close' in localrepo.changelog.read(tip)[5]:
bt_closed[bn] = tip
else:
bt[bn] = tip
if closed:
bt.update(bt_closed)
return bt
sortkey = lambda ctx: ctx[0] # sort by name
_branches = [(safe_unicode(n), hex(h),) for n, h in
_branchtags(self._repo).items()]
return OrderedDict(sorted(_branches, key=sortkey, reverse=False)) | Get's branches for this repository
Returns only not closed branches by default
:param closed: return also closed branches for mercurial |
def data(self):
"""
A :class:`dict` of data parsed from :attr:`.content`.
"""
if not self._data:
self._data = self.content_parser(self.content)
return self._data | A :class:`dict` of data parsed from :attr:`.content`. |
def collect_fields(node):
"""
Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230
"""
fields = set()
for leaf in node:
if leaf.get('kind', None) == "Field":
fields.add(leaf["name"]["value"])
if leaf.get("selection_set", None):
fields = fields.union(collect_fields(leaf["selection_set"]["selections"]))
return fields | Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230 |
def invariant(self):
"""Verify the validity of the node spec object
The type of each sub-object is verified and then
the validity of each node spec item is verified by calling
it invariant() method. It also makes sure that there is at most
one default input and one default output.
"""
# Verify the description and singleNodeOnly attributes
assert isinstance(self.description, str)
assert isinstance(self.singleNodeOnly, bool)
# Make sure that all items dicts are really dicts
assert isinstance(self.inputs, dict)
assert isinstance(self.outputs, dict)
assert isinstance(self.parameters, dict)
assert isinstance(self.commands, dict)
# Verify all item dicts
hasDefaultInput = False
for k, v in self.inputs.items():
assert isinstance(k, str)
assert isinstance(v, InputSpec)
v.invariant()
if v.isDefaultInput:
assert not hasDefaultInput
hasDefaultInput = True
hasDefaultOutput = False
for k, v in self.outputs.items():
assert isinstance(k, str)
assert isinstance(v, OutputSpec)
v.invariant()
if v.isDefaultOutput:
assert not hasDefaultOutput
hasDefaultOutput = True
for k, v in self.parameters.items():
assert isinstance(k, str)
assert isinstance(v, ParameterSpec)
v.invariant()
for k, v in self.commands.items():
assert isinstance(k, str)
assert isinstance(v, CommandSpec)
v.invariant() | Verify the validity of the node spec object
The type of each sub-object is verified and then
the validity of each node spec item is verified by calling
it invariant() method. It also makes sure that there is at most
one default input and one default output. |
def query_mxrecords(self):
"""
Looks up for the MX DNS records of the recipient SMTP server
"""
import dns.resolver
logging.info('Resolving DNS query...')
answers = dns.resolver.query(self.domain, 'MX')
addresses = [answer.exchange.to_text() for answer in answers]
logging.info(
'{} records found:\n{}'.format(
len(addresses), '\n '.join(addresses)))
return addresses | Looks up for the MX DNS records of the recipient SMTP server |
def update_message_type(self, message_type):
"""
Update an existing message type
:param message_type: is the updated message type that the
client wants to update
"""
self._validate_uuid(message_type.message_type_id)
url = "/notification/v1/message-type/{}".format(
message_type.message_type_id)
response = NWS_DAO().putURL(
url, self._write_headers(), self._json_body(
message_type.json_data()))
if response.status != 204:
raise DataFailureException(url, response.status, response.data)
return response.status | Update an existing message type
:param message_type: is the updated message type that the
client wants to update |
def get_projection_on_elements(self, structure):
"""
Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
"""
dico = {}
for spin in self.data.keys():
dico[spin] = [[defaultdict(float)
for i in range(self.nkpoints)]
for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
for k, b in itertools.product(range(self.nkpoints),
range(self.nbands)):
dico[spin][b][k][name] = np.sum(d[k, b, iat, :])
return dico | Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]] |
def inorder(self, funct, stopOn=None):
""" Iterates in order, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false.
"""
if stopOn is None:
for i in self.children:
i.inorder(funct)
else:
for i in self.children:
if i.inorder(funct) == stopOn:
return stopOn
return funct(self) | Iterates in order, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false. |
def p_file_chksum_1(self, p):
"""file_chksum : FILE_CHKSUM CHKSUM"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_file_chksum(self.document, value)
except OrderError:
self.order_error('FileChecksum', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('FileChecksum', p.lineno(1)) | file_chksum : FILE_CHKSUM CHKSUM |
def wait(self, timeout=None):
"""Wait until the result is available or until roughly timeout seconds
pass."""
logger = logging.getLogger(__name__)
if int(self.max_sleep_interval) < int(self._min_sleep_interval):
self.max_sleep_interval = int(self._min_sleep_interval)
t0 = time.time()
sleep_seconds = min(5, self.max_sleep_interval)
status = self.status
prev_status = status
while status < COMPLETED:
logger.debug("sleep for %d seconds", sleep_seconds)
time.sleep(sleep_seconds)
if 2*sleep_seconds <= self.max_sleep_interval:
sleep_seconds *= 2
if timeout is not None:
if int(time.time() - t0) > int(timeout):
return
status = self.status
if status != prev_status:
sleep_seconds = min(5, self.max_sleep_interval)
prev_status = status | Wait until the result is available or until roughly timeout seconds
pass. |
def _extract_ips(self, data):
'''
Extract ip addressess from openstack structure
{
'pl-krk-2-int-301-c2-int-1': [
{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:29:f1:bb',
'version': 4,
'addr': '10.185.138.36',
'OS-EXT-IPS:type': 'fixed'
}
]
}
:arg data: dict
:returns list
'''
result = []
for region in data.items():
for interface in region[1]:
result.append(interface['addr'])
return result | Extract ip addressess from openstack structure
{
'pl-krk-2-int-301-c2-int-1': [
{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:29:f1:bb',
'version': 4,
'addr': '10.185.138.36',
'OS-EXT-IPS:type': 'fixed'
}
]
}
:arg data: dict
:returns list |
def _set_show_vcs(self, v, load=False):
"""
Setter method for show_vcs, mapped from YANG variable /brocade_vcs_rpc/show_vcs (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_vcs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_vcs() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_vcs.show_vcs, is_leaf=True, yang_name="show-vcs", rest_name="show-vcs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getclusterinfo-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_vcs must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_vcs.show_vcs, is_leaf=True, yang_name="show-vcs", rest_name="show-vcs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getclusterinfo-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='rpc', is_config=True)""",
})
self.__show_vcs = t
if hasattr(self, '_set'):
self._set() | Setter method for show_vcs, mapped from YANG variable /brocade_vcs_rpc/show_vcs (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_vcs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_vcs() directly. |
def Woldesemayat_Ghajar(x, rhol, rhog, sigma, m, D, P, angle=0, g=g):
r'''Calculates void fraction in two-phase flow according to the model of
[1]_.
.. math::
\alpha = \frac{v_{gs}}{v_{gs}\left(1 + \left(\frac{v_{ls}}{v_{gs}}
\right)^{\left(\frac{\rho_g}{\rho_l}\right)^{0.1}}\right)
+ 2.9\left[\frac{gD\sigma(1+\cos\theta)(\rho_l-\rho_g)}
{\rho_l^2}\right]^{0.25}(1.22 + 1.22\sin\theta)^{\frac{P}{P_{atm}}}}
.. math::
v_{gs} = \frac{mx}{\rho_g \frac{\pi}{4}D^2}
.. math::
v_{ls} = \frac{m(1-x)}{\rho_l \frac{\pi}{4}D^2}
Parameters
----------
x : float
Quality at the specific tube interval []
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
sigma : float
Surface tension of liquid [N/m]
m : float
Mass flow rate of both phases, [kg/s]
D : float
Diameter of the channel, [m]
P : float
Pressure of the fluid, [Pa]
angle : float
Angle of the channel with respect to the horizontal (vertical = 90),
[degrees]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
alpha : float
Void fraction (area of gas / total area of channel), [-]
Notes
-----
Strongly recommended.
Examples
--------
>>> Woldesemayat_Ghajar(0.4, 800., 2.5, sigma=0.2, m=1, D=0.3, P=1E6, angle=45)
0.7640815513429202
References
----------
.. [1] Woldesemayat, Melkamu A., and Afshin J. Ghajar. "Comparison of Void
Fraction Correlations for Different Flow Patterns in Horizontal and
Upward Inclined Pipes." International Journal of Multiphase Flow 33,
no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004.
'''
vgs = m*x/(rhog*pi/4*D**2)
vls = m*(1-x)/(rhol*pi/4*D**2)
first = vgs*(1 + (vls/vgs)**((rhog/rhol)**0.1))
second = 2.9*((g*D*sigma*(1 + cos(radians(angle)))*(rhol-rhog))/rhol**2)**0.25
third = (1.22 + 1.22*sin(radians(angle)))**(101325./P)
return vgs/(first + second*third) | r'''Calculates void fraction in two-phase flow according to the model of
[1]_.
.. math::
\alpha = \frac{v_{gs}}{v_{gs}\left(1 + \left(\frac{v_{ls}}{v_{gs}}
\right)^{\left(\frac{\rho_g}{\rho_l}\right)^{0.1}}\right)
+ 2.9\left[\frac{gD\sigma(1+\cos\theta)(\rho_l-\rho_g)}
{\rho_l^2}\right]^{0.25}(1.22 + 1.22\sin\theta)^{\frac{P}{P_{atm}}}}
.. math::
v_{gs} = \frac{mx}{\rho_g \frac{\pi}{4}D^2}
.. math::
v_{ls} = \frac{m(1-x)}{\rho_l \frac{\pi}{4}D^2}
Parameters
----------
x : float
Quality at the specific tube interval []
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
sigma : float
Surface tension of liquid [N/m]
m : float
Mass flow rate of both phases, [kg/s]
D : float
Diameter of the channel, [m]
P : float
Pressure of the fluid, [Pa]
angle : float
Angle of the channel with respect to the horizontal (vertical = 90),
[degrees]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
alpha : float
Void fraction (area of gas / total area of channel), [-]
Notes
-----
Strongly recommended.
Examples
--------
>>> Woldesemayat_Ghajar(0.4, 800., 2.5, sigma=0.2, m=1, D=0.3, P=1E6, angle=45)
0.7640815513429202
References
----------
.. [1] Woldesemayat, Melkamu A., and Afshin J. Ghajar. "Comparison of Void
Fraction Correlations for Different Flow Patterns in Horizontal and
Upward Inclined Pipes." International Journal of Multiphase Flow 33,
no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004. |
def entrez(args):
"""
%prog entrez <filename|term>
`filename` contains a list of terms to search. Or just one term. If the
results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed
the download.
"""
p = OptionParser(entrez.__doc__)
allowed_databases = {"fasta": ["genome", "nuccore", "nucgss", "protein", "nucest"],
"asn.1": ["genome", "nuccore", "nucgss", "protein", "gene"],
"xml": ["genome", "nuccore", "nucgss", "nucest", "gene"],
"gb": ["genome", "nuccore", "nucgss"],
"est": ["nucest"],
"gss": ["nucgss"],
"acc": ["nuccore"],
}
valid_formats = tuple(allowed_databases.keys())
valid_databases = ("genome", "nuccore", "nucest",
"nucgss", "protein", "gene")
p.add_option("--noversion", dest="noversion",
default=False, action="store_true",
help="Remove trailing accession versions")
p.add_option("--format", default="fasta", choices=valid_formats,
help="download format [default: %default]")
p.add_option("--database", default="nuccore", choices=valid_databases,
help="search database [default: %default]")
p.add_option("--retmax", default=1000000, type="int",
help="how many results to return [default: %default]")
p.add_option("--skipcheck", default=False, action="store_true",
help="turn off prompt to check file existence [default: %default]")
p.add_option("--batchsize", default=500, type="int",
help="download the results in batch for speed-up [default: %default]")
p.set_outdir(outdir=None)
p.add_option("--outprefix", default="out",
help="output file name prefix [default: %default]")
p.set_email()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
filename, = args
if op.exists(filename):
pf = filename.rsplit(".", 1)[0]
list_of_terms = [row.strip() for row in open(filename)]
if opts.noversion:
list_of_terms = [x.rsplit(".", 1)[0] for x in list_of_terms]
else:
pf = filename
# the filename is the search term
list_of_terms = [filename.strip()]
fmt = opts.format
database = opts.database
batchsize = opts.batchsize
assert database in allowed_databases[fmt], \
"For output format '{0}', allowed databases are: {1}".\
format(fmt, allowed_databases[fmt])
assert batchsize >= 1, "batchsize must >= 1"
if " " in pf:
pf = opts.outprefix
outfile = "{0}.{1}".format(pf, fmt)
outdir = opts.outdir
if outdir:
mkdir(outdir)
# If noprompt, will not check file existence
if not outdir:
fw = must_open(outfile, "w", checkexists=True,
skipcheck=opts.skipcheck)
if fw is None:
return
seen = set()
totalsize = 0
for id, size, term, handle in batch_entrez(list_of_terms, retmax=opts.retmax,
rettype=fmt, db=database, batchsize=batchsize,
email=opts.email):
if outdir:
outfile = urljoin(outdir, "{0}.{1}".format(term, fmt))
fw = must_open(outfile, "w", checkexists=True,
skipcheck=opts.skipcheck)
if fw is None:
continue
rec = handle.read()
if id in seen:
logging.error("Duplicate key ({0}) found".format(rec))
continue
totalsize += size
print(rec, file=fw)
print(file=fw)
seen.add(id)
if seen:
print("A total of {0} {1} records downloaded.".
format(totalsize, fmt.upper()), file=sys.stderr)
return outfile | %prog entrez <filename|term>
`filename` contains a list of terms to search. Or just one term. If the
results are small in size, e.g. "--format=acc", use "--batchsize=100" to speed
the download. |
async def _auth_plain(self, username, password):
"""
Performs an authentication attempt using the PLAIN mechanism.
Protocol:
1. Format the username and password in a suitable way ;
2. The formatted string is base64-encoded ;
3. The string 'AUTH PLAIN' and a space character are prepended to
the base64-encoded username and password and sent to the
server ;
4. If the server replies with a 235 return code, user is
authenticated.
Args:
username (str): Identifier of the user trying to authenticate.
password (str): Password for the user.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPAuthenticationError: If the authentication attempt fails.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response.
"""
mechanism = "PLAIN"
credentials = "\0{}\0{}".format(username, password)
encoded_credentials = SMTP.b64enc(credentials)
try:
code, message = await self.do_cmd(
"AUTH", mechanism, encoded_credentials, success=(235, 503)
)
except SMTPCommandFailedError as e:
raise SMTPAuthenticationError(e.code, e.message, mechanism)
return code, message | Performs an authentication attempt using the PLAIN mechanism.
Protocol:
1. Format the username and password in a suitable way ;
2. The formatted string is base64-encoded ;
3. The string 'AUTH PLAIN' and a space character are prepended to
the base64-encoded username and password and sent to the
server ;
4. If the server replies with a 235 return code, user is
authenticated.
Args:
username (str): Identifier of the user trying to authenticate.
password (str): Password for the user.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPAuthenticationError: If the authentication attempt fails.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response. |
def readExcel(usr_path=""):
"""
Read Excel file(s)
Enter a file path, directory path, or leave args blank to trigger gui.
:param str usr_path: Path to file / directory (optional)
:return str cwd: Current working directory
"""
global cwd, files
start = clock()
files[".xls"] = []
__read(usr_path, ".xls")
end = clock()
logger_benchmark.info(log_benchmark("readExcel", start, end))
return cwd | Read Excel file(s)
Enter a file path, directory path, or leave args blank to trigger gui.
:param str usr_path: Path to file / directory (optional)
:return str cwd: Current working directory |
def get_relationship_dicts(self):
"""Given GO DAG relationships, return summaries per GO ID."""
if not self.relationships:
return None
for goid, goobj in self.go2obj.items():
for reltyp, relset in goobj.relationship.items():
relfwd_goids = set(o.id for o in relset)
# for relfwd_goid in relfwd_goids:
# assert relfwd_goid in self.go2obj, "{GO} {REL} NOT FOUND {GO_R}".format(
# GO=goid, REL=reltyp, GO_R=relfwd_goid)
print("CountRelativesInit RELLLLS", goid, goobj.id, reltyp, relfwd_goids) | Given GO DAG relationships, return summaries per GO ID. |
def _to_query_json(self):
""" Return the options as a dictionary to be used as JSON in a query job. """
return {
'quote': self._quote,
'fieldDelimiter': self._delimiter,
'encoding': self._encoding.upper(),
'skipLeadingRows': self._skip_leading_rows,
'allowQuotedNewlines': self._allow_quoted_newlines,
'allowJaggedRows': self._allow_jagged_rows
} | Return the options as a dictionary to be used as JSON in a query job. |
def _get_section(name, source):
# type: (str, str) -> Optional[str]
"""Extract the named section from the source.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A string containing only the requested section. If the section appears
multiple times, each instance will be merged into a single section.
"""
pattern = re.compile(
'^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name),
re.IGNORECASE | re.MULTILINE)
usage = None
for section in pattern.findall(source):
usage = _merge_section(usage, section.strip())
return usage | Extract the named section from the source.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A string containing only the requested section. If the section appears
multiple times, each instance will be merged into a single section. |
def create_from_file_extension(cls, file_extension):
"""
Create a table writer class instance from a file extension.
Supported file extensions are as follows:
================== ===================================
Extension Writer Class
================== ===================================
``".csv"`` :py:class:`~.CsvTableWriter`
``".htm"`` :py:class:`~.HtmlTableWriter`
``".html"`` :py:class:`~.HtmlTableWriter`
``".js"`` :py:class:`~.JavaScriptTableWriter`
``".json"`` :py:class:`~.JsonTableWriter`
``".jsonl"`` :py:class:`~.JsonLinesTableWriter`
``".ltsv"`` :py:class:`~.LtsvTableWriter`
``".ldjson"`` :py:class:`~.JsonLinesTableWriter`
``".md"`` :py:class:`~.MarkdownTableWriter`
``".ndjson"`` :py:class:`~.JsonLinesTableWriter`
``".py"`` :py:class:`~.PythonCodeTableWriter`
``".rst"`` :py:class:`~.RstGridTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".xls"`` :py:class:`~.ExcelXlsTableWriter`
``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter`
``".sqlite"`` :py:class:`~.SqliteTableWriter`
``".sqlite3"`` :py:class:`~.SqliteTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".toml"`` :py:class:`~.TomlTableWriter`
================== ===================================
:param str file_extension:
File extension string (case insensitive).
:return:
Writer instance that coincides with the ``file_extension``.
:rtype:
:py:class:`~pytablewriter.writer._table_writer.TableWriterInterface`
:raises pytablewriter.WriterNotFoundError:
|WriterNotFoundError_desc| the file extension.
"""
ext = os.path.splitext(file_extension)[1]
if typepy.is_null_string(ext):
file_extension = file_extension
else:
file_extension = ext
file_extension = file_extension.lstrip(".").lower()
for table_format in TableFormat:
if file_extension not in table_format.file_extensions:
continue
if table_format.format_attribute & FormatAttr.SECONDARY_EXT:
continue
return table_format.writer_class()
raise WriterNotFoundError(
"\n".join(
[
"{:s} (unknown file extension).".format(file_extension),
"",
"acceptable file extensions are: {}.".format(", ".join(cls.get_extensions())),
]
)
) | Create a table writer class instance from a file extension.
Supported file extensions are as follows:
================== ===================================
Extension Writer Class
================== ===================================
``".csv"`` :py:class:`~.CsvTableWriter`
``".htm"`` :py:class:`~.HtmlTableWriter`
``".html"`` :py:class:`~.HtmlTableWriter`
``".js"`` :py:class:`~.JavaScriptTableWriter`
``".json"`` :py:class:`~.JsonTableWriter`
``".jsonl"`` :py:class:`~.JsonLinesTableWriter`
``".ltsv"`` :py:class:`~.LtsvTableWriter`
``".ldjson"`` :py:class:`~.JsonLinesTableWriter`
``".md"`` :py:class:`~.MarkdownTableWriter`
``".ndjson"`` :py:class:`~.JsonLinesTableWriter`
``".py"`` :py:class:`~.PythonCodeTableWriter`
``".rst"`` :py:class:`~.RstGridTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".xls"`` :py:class:`~.ExcelXlsTableWriter`
``".xlsx"`` :py:class:`~.ExcelXlsxTableWriter`
``".sqlite"`` :py:class:`~.SqliteTableWriter`
``".sqlite3"`` :py:class:`~.SqliteTableWriter`
``".tsv"`` :py:class:`~.TsvTableWriter`
``".toml"`` :py:class:`~.TomlTableWriter`
================== ===================================
:param str file_extension:
File extension string (case insensitive).
:return:
Writer instance that coincides with the ``file_extension``.
:rtype:
:py:class:`~pytablewriter.writer._table_writer.TableWriterInterface`
:raises pytablewriter.WriterNotFoundError:
|WriterNotFoundError_desc| the file extension. |
def send(self, message, binary=False):
"""
Send a frame over the websocket with message as its payload
"""
if binary is None:
binary = not isinstance(message, six.string_types)
opcode = self.OPCODE_BINARY if binary else self.OPCODE_TEXT
try:
self.send_frame(message, opcode)
except WebSocketError:
raise WebSocketError("Socket is dead") | Send a frame over the websocket with message as its payload |
def stem(self, word):
"""Return CLEF German stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = CLEFGerman()
>>> stmr.stem('lesen')
'lese'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabier'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# remove umlauts
word = word.translate(self._umlauts)
# remove plurals
wlen = len(word) - 1
if wlen > 3:
if wlen > 5:
if word[-3:] == 'nen':
return word[:-3]
if wlen > 4:
if word[-2:] in {'en', 'se', 'es', 'er'}:
return word[:-2]
if word[-1] in {'e', 'n', 'r', 's'}:
return word[:-1]
return word | Return CLEF German stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = CLEFGerman()
>>> stmr.stem('lesen')
'lese'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabier' |
def get_message(self, set_slave_ok, sock_info, use_cmd=False):
"""Get a query message, possibly setting the slaveOk bit."""
if set_slave_ok:
# Set the slaveOk bit.
flags = self.flags | 4
else:
flags = self.flags
ns = self.namespace()
spec = self.spec
if use_cmd:
spec = self.as_command(sock_info)[0]
if sock_info.op_msg_enabled:
request_id, msg, size, _ = _op_msg(
0, spec, self.db, self.read_preference,
set_slave_ok, False, self.codec_options,
ctx=sock_info.compression_context)
return request_id, msg, size
ns = _UJOIN % (self.db, "$cmd")
ntoreturn = -1 # All DB commands return 1 document
else:
# OP_QUERY treats ntoreturn of -1 and 1 the same, return
# one document and close the cursor. We have to use 2 for
# batch size if 1 is specified.
ntoreturn = self.batch_size == 1 and 2 or self.batch_size
if self.limit:
if ntoreturn:
ntoreturn = min(self.limit, ntoreturn)
else:
ntoreturn = self.limit
if sock_info.is_mongos:
spec = _maybe_add_read_preference(spec,
self.read_preference)
return query(flags, ns, self.ntoskip, ntoreturn,
spec, None if use_cmd else self.fields,
self.codec_options, ctx=sock_info.compression_context) | Get a query message, possibly setting the slaveOk bit. |
def indexbox(message="Shall I continue?", title="", choices=["Yes", "No"]):
"""
Original doc:
Display a buttonbox with the specified choices.
Return the index of the choice selected.
"""
reply = buttonbox(message, title, choices)
index = -1
for choice in choices:
index = index + 1
if reply == choice:
return index | Original doc:
Display a buttonbox with the specified choices.
Return the index of the choice selected. |
async def submit_batches(self, request):
"""Accepts a binary encoded BatchList and submits it to the validator.
Request:
body: octet-stream BatchList of one or more Batches
Response:
status:
- 202: Batches submitted and pending
link: /batches or /batch_statuses link for submitted batches
"""
timer_ctx = self._post_batches_total_time.time()
self._post_batches_count.inc()
# Parse request
if request.headers['Content-Type'] != 'application/octet-stream':
LOGGER.debug(
'Submission headers had wrong Content-Type: %s',
request.headers['Content-Type'])
self._post_batches_error.inc()
raise errors.SubmissionWrongContentType()
body = await request.read()
if not body:
LOGGER.debug('Submission contained an empty body')
self._post_batches_error.inc()
raise errors.NoBatchesSubmitted()
try:
batch_list = BatchList()
batch_list.ParseFromString(body)
except DecodeError:
LOGGER.debug('Submission body could not be decoded: %s', body)
self._post_batches_error.inc()
raise errors.BadProtobufSubmitted()
# Query validator
error_traps = [error_handlers.BatchInvalidTrap,
error_handlers.BatchQueueFullTrap]
validator_query = client_batch_submit_pb2.ClientBatchSubmitRequest(
batches=batch_list.batches)
with self._post_batches_validator_time.time():
await self._query_validator(
Message.CLIENT_BATCH_SUBMIT_REQUEST,
client_batch_submit_pb2.ClientBatchSubmitResponse,
validator_query,
error_traps)
# Build response envelope
id_string = ','.join(b.header_signature for b in batch_list.batches)
status = 202
link = self._build_url(request, path='/batch_statuses', id=id_string)
retval = self._wrap_response(
request,
metadata={'link': link},
status=status)
timer_ctx.stop()
return retval | Accepts a binary encoded BatchList and submits it to the validator.
Request:
body: octet-stream BatchList of one or more Batches
Response:
status:
- 202: Batches submitted and pending
link: /batches or /batch_statuses link for submitted batches |
def deploy_master_contract(self, deployer_account=None, deployer_private_key=None) -> str:
"""
Deploy master contract. Takes deployer_account (if unlocked in the node) or the deployer private key
:param deployer_account: Unlocked ethereum account
:param deployer_private_key: Private key of an ethereum account
:return: deployed contract address
"""
assert deployer_account or deployer_private_key
deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key)
safe_contract = self.get_contract()
tx = safe_contract.constructor().buildTransaction({'from': deployer_address})
tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key,
public_key=deployer_account)
tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60)
assert tx_receipt.status
contract_address = tx_receipt.contractAddress
# Init master copy
master_safe = self.get_contract(contract_address)
tx = master_safe.functions.setup(
# We use 2 owners that nobody controls for the master copy
["0x0000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000003"],
2, # Threshold. Maximum security
NULL_ADDRESS, # Address for optional DELEGATE CALL
b'', # Data for optional DELEGATE CALL
NULL_ADDRESS, # Payment token
0, # Payment
NULL_ADDRESS # Refund receiver
).buildTransaction({'from': deployer_address})
tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key,
public_key=deployer_account)
tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60)
assert tx_receipt.status
logger.info("Deployed and initialized Safe Master Contract=%s by %s", contract_address, deployer_address)
return contract_address | Deploy master contract. Takes deployer_account (if unlocked in the node) or the deployer private key
:param deployer_account: Unlocked ethereum account
:param deployer_private_key: Private key of an ethereum account
:return: deployed contract address |
def soaproot(self, node):
"""
Get whether the specified I{node} is a soap encoded root.
This is determined by examining @soapenc:root='1'.
The node is considered to be a root when the attribute
is not specified.
@param node: A node to evaluate.
@type node: L{Element}
@return: True if a soap encoded root.
@rtype: bool
"""
root = node.getAttribute('root', ns=soapenc)
if root is None:
return True
else:
return root.value == '1' | Get whether the specified I{node} is a soap encoded root.
This is determined by examining @soapenc:root='1'.
The node is considered to be a root when the attribute
is not specified.
@param node: A node to evaluate.
@type node: L{Element}
@return: True if a soap encoded root.
@rtype: bool |
def _build_session(self, name, start_info, end_info):
"""Builds a session object."""
assert start_info is not None
result = api_pb2.Session(
name=name,
start_time_secs=start_info.start_time_secs,
model_uri=start_info.model_uri,
metric_values=self._build_session_metric_values(name),
monitor_url=start_info.monitor_url)
if end_info is not None:
result.status = end_info.status
result.end_time_secs = end_info.end_time_secs
return result | Builds a session object. |
def expire_soon(self, seconds):
"""
Returns ``True`` if credentials expire sooner than specified.
:param int seconds:
Number of seconds.
:returns:
``True`` if credentials expire sooner than specified,
else ``False``.
"""
if self.expiration_time:
return self.expiration_time < int(time.time()) + int(seconds)
else:
return False | Returns ``True`` if credentials expire sooner than specified.
:param int seconds:
Number of seconds.
:returns:
``True`` if credentials expire sooner than specified,
else ``False``. |
def show_corrections(self, status=None, nids=None):
"""
Show the corrections applied to the flow at run-time.
Args:
status: if not None, only the tasks with this status are select.
nids: optional list of node identifiers used to filter the tasks.
Return: The number of corrections found.
"""
nrows, ncols = get_terminal_size()
count = 0
for task in self.iflat_tasks(status=status, nids=nids):
if task.num_corrections == 0: continue
count += 1
print(make_banner(str(task), width=ncols, mark="="))
for corr in task.corrections:
pprint(corr)
if not count: print("No correction found.")
return count | Show the corrections applied to the flow at run-time.
Args:
status: if not None, only the tasks with this status are select.
nids: optional list of node identifiers used to filter the tasks.
Return: The number of corrections found. |
def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict):
"""
View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return:
"""
data_shapes = {k: v.shape for k, v in arg_dict.items()}
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
internal_sym = self.sym.get_internals()[sym_name]
data_inputs = {k: mx.nd.empty(v, ctx=self.ctx)
for k, v in self.data_shapes.items()
if k in internal_sym.list_arguments()}
params = {k: v for k, v in self.params.items() if
k in internal_sym.list_arguments()}
aux_states = {k: v for k, v in self.aux_states.items()
if k in internal_sym.list_auxiliary_states()}
exe = internal_sym.bind(ctx=self.ctx,
args=dict(params, **data_inputs),
args_grad=None,
grad_req='null',
aux_states=aux_states,
shared_exec=self.exe)
for k, v in arg_dict.items():
exe.arg_dict[k][:] = v
exe.forward(is_train=False)
assert 1 == len(exe.outputs)
for output in exe.outputs:
output.wait_to_read()
return exe.outputs[0] | View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return: |
def _shutdown(self, manual):
"""
Shuts down the TLS session and then shuts down the underlying socket
:param manual:
A boolean if the connection was manually shutdown
"""
if self._ssl is None:
return
while True:
result = libssl.SSL_shutdown(self._ssl)
# Don't be noisy if the socket is already closed
try:
self._raw_write()
except (TLSDisconnectError):
pass
if result >= 0:
break
if result < 0:
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
if self._raw_read() != b'':
continue
else:
break
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
continue
else:
handle_openssl_error(0, TLSError)
if manual:
self._local_closed = True
libssl.SSL_free(self._ssl)
self._ssl = None
# BIOs are freed by SSL_free()
self._rbio = None
self._wbio = None
try:
self._socket.shutdown(socket_.SHUT_RDWR)
except (socket_.error):
pass | Shuts down the TLS session and then shuts down the underlying socket
:param manual:
A boolean if the connection was manually shutdown |
def get_url_shortener():
"""
Return the selected URL shortener backend.
"""
try:
backend_module = import_module(URL_SHORTENER_BACKEND)
backend = getattr(backend_module, 'backend')
except (ImportError, AttributeError):
warnings.warn('%s backend cannot be imported' % URL_SHORTENER_BACKEND,
RuntimeWarning)
backend = default_backend
except ImproperlyConfigured as e:
warnings.warn(str(e), RuntimeWarning)
backend = default_backend
return backend | Return the selected URL shortener backend. |
def register_model(model_name, dataset_name, model_func):
"""Register a new model that can be obtained with `get_model_config`."""
model_map = _get_model_map(dataset_name)
if model_name in model_map:
raise ValueError("Model \"%s\" is already registered for dataset"
"\"%s\"" % (model_name, dataset_name))
model_map[model_name] = model_func | Register a new model that can be obtained with `get_model_config`. |
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path)) | Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). |
def step(self, action):
"""
Move the end effector(s) according to the input control.
Args:
action (numpy array): The array should have the corresponding elements.
0-2: The desired change in end effector position in x, y, and z.
3-6: The desired change in orientation, expressed as a (x, y, z, w) quaternion.
Note that this quaternion encodes a relative rotation with respect to the
current gripper orientation. If the current rotation is r, this corresponds
to a quaternion d such that r * d will be the new rotation.
*: Controls for gripper actuation.
Note: When wrapping around a Baxter environment, the indices 0-6 inidicate the
right hand. Indices 7-13 indicate the left hand, and the rest (*) are the gripper
inputs (first right, then left).
"""
input_1 = self._make_input(action[:7], self.env._right_hand_quat)
if self.env.mujoco_robot.name == "sawyer":
velocities = self.controller.get_control(**input_1)
low_action = np.concatenate([velocities, action[7:]])
elif self.env.mujoco_robot.name == "baxter":
input_2 = self._make_input(action[7:14], self.env._left_hand_quat)
velocities = self.controller.get_control(input_1, input_2)
low_action = np.concatenate([velocities, action[14:]])
else:
raise Exception(
"Only Sawyer and Baxter robot environments are supported for IK "
"control currently."
)
# keep trying to reach the target in a closed-loop
for i in range(self.action_repeat):
ret = self.env.step(low_action)
velocities = self.controller.get_control()
if self.env.mujoco_robot.name == "sawyer":
low_action = np.concatenate([velocities, action[7:]])
else:
low_action = np.concatenate([velocities, action[14:]])
return ret | Move the end effector(s) according to the input control.
Args:
action (numpy array): The array should have the corresponding elements.
0-2: The desired change in end effector position in x, y, and z.
3-6: The desired change in orientation, expressed as a (x, y, z, w) quaternion.
Note that this quaternion encodes a relative rotation with respect to the
current gripper orientation. If the current rotation is r, this corresponds
to a quaternion d such that r * d will be the new rotation.
*: Controls for gripper actuation.
Note: When wrapping around a Baxter environment, the indices 0-6 inidicate the
right hand. Indices 7-13 indicate the left hand, and the rest (*) are the gripper
inputs (first right, then left). |
def transform_to_chomsky_normal_form(grammar, inplace=False):
# type: (Grammar, bool) -> Grammar
"""
Transform grammar to Chomsky Normal Form.
:param grammar: Grammar to transform.
:param inplace: True if transformation should be performed in place. False by default.
:return: Grammar in Chomsky Normal Form.
"""
# Copy if required
if inplace is False:
grammar = copy(grammar)
# Create nonterminals rewritable to the terminal.
# They will be inserted into the grammar as needed.
fill = TerminalsFilling(grammar)
to_process = Queue()
for r in grammar.rules:
to_process.put(r)
while not to_process.empty():
rule = to_process.get() # type: Type[Rule]
# Check, if rule must be split
if len(rule.right) > 2:
grammar.rules.remove(rule)
# create nonterm that represent group on the right
created_nonterm = type("ChomskyGroup[" + rule.__name__ + "]",
(ChomskyGroupNonterminal,),
ChomskyGroupNonterminal.__dict__.copy()) # type: Type[ChomskyGroupNonterminal]
created_nonterm.group = rule.right[1:]
# create rule that replace current
created_left_rule = type("ChomskySplit[" + rule.__name__ + "]",
(ChomskySplitRule,),
ChomskySplitRule.__dict__.copy()) # type: Type[ChomskySplitRule]
created_left_rule.rule = ([rule.fromSymbol], [rule.right[0], created_nonterm])
created_left_rule.from_rule = rule
# create rule with symbols on the right
created_right_rule = type("ChomskyRest[" + rule.__name__ + "]",
(ChomskyRestRule,),
ChomskyRestRule.__dict__.copy()) # type: Type[ChomskyRestRule]
created_right_rule.rule = ([created_nonterm], rule.right[1:])
created_right_rule.from_rule = rule
# add it to the grammar
grammar.nonterminals.add(created_nonterm)
grammar.rules.add(created_left_rule, created_right_rule)
to_process.put(created_left_rule)
to_process.put(created_right_rule)
# check, if must replace terminal
elif len(rule.right) == 2:
if rule.right[0] in grammar.terminals:
# first symbol is terminal
# remove rule from grammar
grammar.rules.remove(rule)
# get nonterminal rewritable to the terminal and add that rules into the grammar implicitly
symb = fill.get(rule.right[0])
# create rule replacing the original one
created = type("ChomskyLeft[" + rule.__name__ + "]",
(ChomskyTerminalReplaceRule,),
ChomskyTerminalReplaceRule.__dict__.copy()) # type: Type[ChomskyTerminalReplaceRule]
created.rule = ([rule.fromSymbol], [symb, rule.right[1]])
created.from_rule = rule
created.replaced_index = 0
# add it into the grammar
grammar.rules.add(created)
to_process.put(created)
elif rule.right[1] in grammar.terminals:
# second symbol is terminal
# remove rule from grammar
grammar.rules.remove(rule)
# get nonterminal rewritable to the terminal and add that rules into the grammar implicitly
symb = fill.get(rule.right[1])
# create rule replacing the original one
created = type("ChomskyRight[" + rule.__name__ + "]",
(ChomskyTerminalReplaceRule,),
ChomskyTerminalReplaceRule.__dict__.copy())
created.rule = ([rule.fromSymbol], [rule.right[0], symb])
created.from_rule = rule
created.replaced_index = 1
# add it into the grammar
grammar.rules.add(created)
to_process.put(created)
return grammar | Transform grammar to Chomsky Normal Form.
:param grammar: Grammar to transform.
:param inplace: True if transformation should be performed in place. False by default.
:return: Grammar in Chomsky Normal Form. |
def exploit(self):
"""
Starts the exploiting phase, you should run setup before running this function.
if auto is set, this function will fire the exploit to all systems. Otherwise a curses interface is shown.
"""
search = ServiceSearch()
host_search = HostSearch()
services = search.get_services(tags=['MS17-010'])
services = [service for service in services]
if len(services) == 0:
print_error("No services found that are vulnerable for MS17-010")
return
if self.auto:
print_success("Found {} services vulnerable for MS17-010".format(len(services)))
for service in services:
print_success("Exploiting " + str(service.address))
host = host_search.id_to_object(str(service.address))
system_os = ''
if host.os:
system_os = host.os
else:
system_os = self.detect_os(str(service.address))
host.os = system_os
host.save()
text = self.exploit_single(str(service.address), system_os)
print_notification(text)
else:
service_list = []
for service in services:
host = host_search.id_to_object(str(service.address))
system_os = ''
if host.os:
system_os = host.os
else:
system_os = self.detect_os(str(service.address))
host.os = system_os
host.save()
service_list.append({'ip': service.address, 'os': system_os, 'string': "{ip} ({os}) {hostname}".format(ip=service.address, os=system_os, hostname=host.hostname)})
draw_interface(service_list, self.callback, "Exploiting {ip} with OS: {os}") | Starts the exploiting phase, you should run setup before running this function.
if auto is set, this function will fire the exploit to all systems. Otherwise a curses interface is shown. |
def get_file_url(self):
"""stub"""
if self.has_file_url():
return self._get_asset_content(
Id(self.my_osid_object._my_map['fileId']['assetId']),
self.my_osid_object._my_map['fileId']['assetContentTypeId']).get_url()
raise IllegalState() | stub |
def parse_emails(values):
'''
Take a string or list of strings and try to extract all the emails
'''
emails = []
if isinstance(values, str):
values = [values]
# now we know we have a list of strings
for value in values:
matches = re_emails.findall(value)
emails.extend([match[2] for match in matches])
return emails | Take a string or list of strings and try to extract all the emails |
def isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
theta_levels : array
One-dimensional array of desired theta surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Other Parameters
----------------
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
tmpk_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for theta levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Change when Python 2.7 no longer supported
# Pull out keyword arguments
tmpk_out = kwargs.pop('tmpk_out', False)
max_iters = kwargs.pop('max_iters', 50)
eps = kwargs.pop('eps', 1e-6)
axis = kwargs.pop('axis', 0)
bottom_up_search = kwargs.pop('bottom_up_search', True)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to('hPa')
temperature = temperature.to('kelvin')
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
slices = tuple(slices)
pres = np.broadcast_to(pres[slices], temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
theta_levels = np.asanyarray(theta_levels.to('kelvin')).reshape(-1)
isentlevels = theta_levels[np.argsort(theta_levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = mpconsts.kappa.m_as('dimensionless')
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(theta_levels):
raise ValueError('Input theta level out of data bounds')
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = mpconsts.P0 ** ka
# index values for each point for the pressure level nearest to the desired theta level
above, below, good = find_bounding_indices(pres_theta.m, theta_levels, axis,
from_below=bottom_up_search)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# Make sure we ignore any nans in the data for solving; checking a is enough since it
# combines log_p and tmpk.
good &= ~np.isnan(a)
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(_isen_iter, isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps, maxiter=max_iters)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if tmpk_out = true, calculate temperature and output as last item in list
if tmpk_out:
ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),
axis=axis)
if len(args) > 1:
ret.extend(others)
else:
ret.append(others)
return ret | r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
theta_levels : array
One-dimensional array of desired theta surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Other Parameters
----------------
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
tmpk_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for theta levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature |
def getOperationName(self, ps, action):
'''Returns operation name.
action -- soapAction value
'''
method = self.root.get(_get_element_nsuri_name(ps.body_root)) or \
self.soapAction.get(action)
if method is None:
raise UnknownRequestException, \
'failed to map request to a method: action(%s), root%s' %(action,_get_element_nsuri_name(ps.body_root))
return method | Returns operation name.
action -- soapAction value |
def showGridColumns( self ):
"""
Returns whether or not this delegate should draw columns when \
rendering the grid.
:return <bool>
"""
delegate = self.itemDelegate()
if ( isinstance(delegate, XTreeWidgetDelegate) ):
return delegate.showGridColumns()
return False | Returns whether or not this delegate should draw columns when \
rendering the grid.
:return <bool> |
def start(self, context):
"""Initialize the database connection."""
self.config['alias'] = self.alias
safe_config = dict(self.config)
del safe_config['host']
log.info("Connecting MongoEngine database layer.", extra=dict(
uri = redact_uri(self.config['host']),
config = self.config,
))
self.connection = connect(**self.config) | Initialize the database connection. |
def get_name(self, name_case=DdlParseBase.NAME_CASE.original):
"""
Get Name converted case
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: name
"""
if name_case == self.NAME_CASE.lower:
return self._name.lower()
elif name_case == self.NAME_CASE.upper:
return self._name.upper()
else:
return self._name | Get Name converted case
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: name |
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Vol Constant', fam.Normal(0,3,transform=None), fam.Normal(0,3))
for p_term in range(self.p):
self.latent_variables.add_z('p(' + str(p_term+1) + ')', fam.Normal(0,0.5,transform='logit'), fam.Normal(0,3))
if p_term == 0:
self.latent_variables.z_list[-1].start = 3.00
else:
self.latent_variables.z_list[-1].start = -4.00
for q_term in range(self.q):
self.latent_variables.add_z('q(' + str(q_term+1) + ')', fam.Normal(0,0.5,transform='logit'), fam.Normal(0,3))
if q_term == 0:
self.latent_variables.z_list[-1].start = -1.50
else:
self.latent_variables.z_list[-1].start = -4.00
self.latent_variables.add_z('v', fam.Flat(transform='exp'), fam.Normal(0,3))
self.latent_variables.add_z('Returns Constant', fam.Normal(0,3,transform=None), fam.Normal(0,3))
self.latent_variables.add_z('GARCH-M', fam.Normal(0,3,transform=None), fam.Normal(0,3))
# Starting values
self.latent_variables.z_list[-3].start = 2.0 | Creates model latent variables
Returns
----------
None (changes model attributes) |
def __setParentSymbol(self, value):
"""self.__parentSymbol variable setter"""
errors = []
if not value is str and not value.split():
errors.append('parentSymbol_ERROR : Symbol : must be char or string!')
else:
self.__parentSymbol = value
if errors:
view.Tli.showErrors('SymbolError', errors) | self.__parentSymbol variable setter |
def assure_migrations_table_setup(db):
"""
Make sure the migrations table is set up in the database.
"""
from mig.models import MigrationData
if not MigrationData.__table__.exists(db.bind):
MigrationData.metadata.create_all(
db.bind, tables=[MigrationData.__table__]) | Make sure the migrations table is set up in the database. |
async def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
await m.drop_table(**drop_table_kwargs) | Drop tables for all given models (in the right order). |
def is_revision_chain_placeholder(pid):
"""For replicas, the PIDs referenced in revision chains are reserved for use by
other replicas."""
return d1_gmn.app.models.ReplicaRevisionChainReference.objects.filter(
pid__did=pid
).exists() | For replicas, the PIDs referenced in revision chains are reserved for use by
other replicas. |
def p_expr_EQ_expr(p):
""" expr : expr EQ expr
"""
p[0] = make_binary(p.lineno(2), 'EQ', p[1], p[3], lambda x, y: x == y) | expr : expr EQ expr |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.