code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset | Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2) |
def open_file(self, title="Open File", initialDir="~", fileTypes="*|All Files", rememberAs=None, **kwargs):
"""
Show an Open File dialog
Usage: C{dialog.open_file(title="Open File", initialDir="~", fileTypes="*|All Files", rememberAs=None, **kwargs)}
@param title: window title for the dialog
@param initialDir: starting directory for the file dialog
@param fileTypes: file type filter expression
@param rememberAs: gives an ID to this file dialog, allowing it to open at the last used path next time
@return: a tuple containing the exit code and file path
@rtype: C{DialogData(int, str)}
"""
if rememberAs is not None:
return self._run_kdialog(title, ["--getopenfilename", initialDir, fileTypes, ":" + rememberAs], kwargs)
else:
return self._run_kdialog(title, ["--getopenfilename", initialDir, fileTypes], kwargs) | Show an Open File dialog
Usage: C{dialog.open_file(title="Open File", initialDir="~", fileTypes="*|All Files", rememberAs=None, **kwargs)}
@param title: window title for the dialog
@param initialDir: starting directory for the file dialog
@param fileTypes: file type filter expression
@param rememberAs: gives an ID to this file dialog, allowing it to open at the last used path next time
@return: a tuple containing the exit code and file path
@rtype: C{DialogData(int, str)} |
def _distance_matrix(self, a, b):
"""Pairwise distance between each point in `a` and each point in `b`"""
def sq(x): return (x * x)
# matrix = np.sum(map(lambda a,b: sq(a[:,None] - b[None,:]), a.T,
# b.T), axis=0)
# A faster version than above:
matrix = sq(a[:, 0][:, None] - b[:, 0][None, :])
for x, y in zip(a.T[1:], b.T[1:]):
matrix += sq(x[:, None] - y[None, :])
return matrix | Pairwise distance between each point in `a` and each point in `b` |
def analyse_text(text):
"""
Perform a structural analysis for basic Easytrieve constructs.
"""
result = 0.0
lines = text.split('\n')
hasEndProc = False
hasHeaderComment = False
hasFile = False
hasJob = False
hasProc = False
hasParm = False
hasReport = False
def isCommentLine(line):
return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None
def isEmptyLine(line):
return not bool(line.strip())
# Remove possible empty lines and header comments.
while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])):
if not isEmptyLine(lines[0]):
hasHeaderComment = True
del lines[0]
if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]):
# Looks like an Easytrieve macro.
result = 0.4
if hasHeaderComment:
result += 0.4
else:
# Scan the source for lines starting with indicators.
for line in lines:
words = line.split()
if (len(words) >= 2):
firstWord = words[0]
if not hasReport:
if not hasJob:
if not hasFile:
if not hasParm:
if firstWord == 'PARM':
hasParm = True
if firstWord == 'FILE':
hasFile = True
if firstWord == 'JOB':
hasJob = True
elif firstWord == 'PROC':
hasProc = True
elif firstWord == 'END-PROC':
hasEndProc = True
elif firstWord == 'REPORT':
hasReport = True
# Weight the findings.
if hasJob and (hasProc == hasEndProc):
if hasHeaderComment:
result += 0.1
if hasParm:
if hasProc:
# Found PARM, JOB and PROC/END-PROC:
# pretty sure this is Easytrieve.
result += 0.8
else:
# Found PARAM and JOB: probably this is Easytrieve
result += 0.5
else:
# Found JOB and possibly other keywords: might be Easytrieve
result += 0.11
if hasParm:
# Note: PARAM is not a proper English word, so this is
# regarded a much better indicator for Easytrieve than
# the other words.
result += 0.2
if hasFile:
result += 0.01
if hasReport:
result += 0.01
assert 0.0 <= result <= 1.0
return result | Perform a structural analysis for basic Easytrieve constructs. |
def verification_email_body(case_name, url, display_name, category, subcategory, breakpoint_1, breakpoint_2, hgnc_symbol, panels, gtcalls, tx_changes, name, comment):
"""
Builds the html code for the variant verification emails (order verification and cancel verification)
Args:
case_name(str): case display name
url(str): the complete url to the variant, accessible when clicking on the email link
display_name(str): a display name for the variant
category(str): category of the variant
subcategory(str): sub-category of the variant
breakpoint_1(str): breakpoint 1 (format is 'chr:start')
breakpoint_2(str): breakpoint 2 (format is 'chr:stop')
hgnc_symbol(str): a gene or a list of genes separated by comma
panels(str): a gene panel of a list of panels separated by comma
gtcalls(str): genotyping calls of any sample in the family
tx_changes(str): amino acid changes caused by the variant, only for snvs otherwise 'Not available'
name(str): user_obj['name'], uft-8 encoded
comment(str): sender's comment from form
Returns:
html(str): the html body of the variant verification email
"""
html = """
<ul>
<li>
<strong>Case {case_name}</strong>: <a href="{url}">{display_name}</a>
</li>
<li><strong>Variant type</strong>: {category} ({subcategory})
<li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>
<li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>
<li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>
<li><strong>Gene panels</strong>: {panels}</li>
<li><strong>GT call</strong></li>
{gtcalls}
<li><strong>Amino acid changes</strong></li>
{tx_changes}
<li><strong>Comment</strong>: {comment}</li>
<li><strong>Ordered by</strong>: {name}</li>
</ul>
""".format(
case_name=case_name,
url=url,
display_name=display_name,
category=category,
subcategory=subcategory,
breakpoint_1=breakpoint_1,
breakpoint_2=breakpoint_2,
hgnc_symbol=hgnc_symbol,
panels=panels,
gtcalls=gtcalls,
tx_changes=tx_changes,
name=name,
comment=comment)
return html | Builds the html code for the variant verification emails (order verification and cancel verification)
Args:
case_name(str): case display name
url(str): the complete url to the variant, accessible when clicking on the email link
display_name(str): a display name for the variant
category(str): category of the variant
subcategory(str): sub-category of the variant
breakpoint_1(str): breakpoint 1 (format is 'chr:start')
breakpoint_2(str): breakpoint 2 (format is 'chr:stop')
hgnc_symbol(str): a gene or a list of genes separated by comma
panels(str): a gene panel of a list of panels separated by comma
gtcalls(str): genotyping calls of any sample in the family
tx_changes(str): amino acid changes caused by the variant, only for snvs otherwise 'Not available'
name(str): user_obj['name'], uft-8 encoded
comment(str): sender's comment from form
Returns:
html(str): the html body of the variant verification email |
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release() | Clear the cache. |
def create_trie(self):
"""Create a trie of source root patterns from options.
:returns: :class:`SourceRootTrie`
"""
trie = SourceRootTrie(self.source_root_factory)
options = self.get_options()
for category in SourceRootCategories.ALL:
# Add patterns.
for pattern in options.get('{}_root_patterns'.format(category), []):
trie.add_pattern(pattern, category)
# Add fixed source roots.
for path, langs in options.get('{}_roots'.format(category), {}).items():
trie.add_fixed(path, langs, category)
return trie | Create a trie of source root patterns from options.
:returns: :class:`SourceRootTrie` |
def edit_project(self, id_, **kwargs):
"""
Edits a project by ID. All fields available at creation can be updated
as well. If you want to update hourly rates retroactively, set the
argument `update_hourly_rate_on_time_entries` to True.
"""
data = self._wrap_dict("project", kwargs)
return self.patch("/projects/{}.json".format(id_), data=data) | Edits a project by ID. All fields available at creation can be updated
as well. If you want to update hourly rates retroactively, set the
argument `update_hourly_rate_on_time_entries` to True. |
def get_least_common_subsumer(self,from_tid,to_tid):
"""
Returns the deepest common subsumer among two terms
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: string
@return: the term identifier of the common subsumer
"""
termid_from = self.terminal_for_term.get(from_tid)
termid_to = self.terminal_for_term.get(to_tid)
path_from = self.paths_for_terminal[termid_from][0]
path_to = self.paths_for_terminal[termid_to][0]
common_nodes = set(path_from) & set(path_to)
if len(common_nodes) == 0:
return None
else:
indexes = []
for common_node in common_nodes:
index1 = path_from.index(common_node)
index2 = path_to.index(common_node)
indexes.append((common_node,index1+index2))
indexes.sort(key=itemgetter(1))
shortest_common = indexes[0][0]
return shortest_common | Returns the deepest common subsumer among two terms
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: string
@return: the term identifier of the common subsumer |
def config_control(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument
'''
Will check if the configuration was changed.
If differences found, will try to commit.
In case commit unsuccessful, will try to rollback.
:return: A tuple with a boolean that specifies if the config was changed/committed/rollbacked on the device.\
And a string that provides more details of the reason why the configuration was not committed properly.
CLI Example:
.. code-block:: bash
salt '*' net.config_control
'''
result = True
comment = ''
changed, not_changed_rsn = config_changed(inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
if not changed:
return (changed, not_changed_rsn)
# config changed, thus let's try to commit
try_commit = commit()
if not try_commit.get('result'):
result = False
comment = 'Unable to commit the changes: {reason}.\n\
Will try to rollback now!'.format(
reason=try_commit.get('comment')
)
try_rollback = rollback()
if not try_rollback.get('result'):
comment += '\nCannot rollback! {reason}'.format(
reason=try_rollback.get('comment')
)
return result, comment | Will check if the configuration was changed.
If differences found, will try to commit.
In case commit unsuccessful, will try to rollback.
:return: A tuple with a boolean that specifies if the config was changed/committed/rollbacked on the device.\
And a string that provides more details of the reason why the configuration was not committed properly.
CLI Example:
.. code-block:: bash
salt '*' net.config_control |
def sam_readline(sock, partial = None):
"""read a line from a sam control socket"""
response = b''
exception = None
while True:
try:
c = sock.recv(1)
if not c:
raise EOFError('SAM connection died. Partial response %r %r' % (partial, response))
elif c == b'\n':
break
else:
response += c
except (BlockingIOError, pysocket.timeout) as e:
if partial is None:
raise e
else:
exception = e
break
if partial is None:
# print('<--', response)
return response.decode('ascii')
else:
# print('<--', repr(partial), '+', response, exception)
return (partial + response.decode('ascii'), exception) | read a line from a sam control socket |
def __tag_repo(self, data, repository):
"""
Tag the current repository.
:param data: a dictionary containing the data about the experiment
:type data: dict
"""
assert self.__tag_name not in [t.name for t in repository.tags]
return TagReference.create(repository, self.__tag_name, message=json.dumps(data)) | Tag the current repository.
:param data: a dictionary containing the data about the experiment
:type data: dict |
def is_user(value, min=None, max=None):
"""
Check whether username or uid as argument exists.
if this function recieved username, convert uid and exec validation.
"""
if type(value) == str:
try:
entry = pwd.getpwnam(value)
value = entry.pw_uid
except KeyError:
err_message = ('{0}: No such user.'.format(value))
raise validate.VdtValueError(err_message)
return value
elif type(value) == int:
try:
pwd.getpwuid(value)
except KeyError:
err_message = ('{0}: No such user.'.format(value))
raise validate.VdtValueError(err_message)
return value
else:
err_message = ('Please, use str or int to "user" parameter.')
raise validate.VdtTypeError(err_message) | Check whether username or uid as argument exists.
if this function recieved username, convert uid and exec validation. |
def _get_cibfile_cksum(cibname):
'''
Get the full path of the file containing a checksum of a CIB-file with the name of the CIB
'''
cibfile_cksum = '{0}.cksum'.format(_get_cibfile(cibname))
log.trace('cibfile_cksum: %s', cibfile_cksum)
return cibfile_cksum | Get the full path of the file containing a checksum of a CIB-file with the name of the CIB |
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)] | Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states. |
def get_phrases(self, ns=None, layer='syntax', cat_key='cat', cat_val='NP'):
"""yield all node IDs that dominate the given phrase type, e.g. all NPs"""
if not ns:
ns = self.ns
for node_id in select_nodes_by_layer(self, '{0}:{1}'.format(ns, layer)):
if self.node[node_id][self.ns+':'+cat_key] == cat_val:
yield node_id | yield all node IDs that dominate the given phrase type, e.g. all NPs |
def parseExternalSubset(self, ExternalID, SystemID):
"""parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) * """
libxml2mod.xmlParseExternalSubset(self._o, ExternalID, SystemID) | parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) * |
def __last_commit(self):
"""
Retrieve the most recent commit message (with ``svn info``)
Returns:
tuple: (datestr, (revno, user, None, desc))
$ svn info
Path: .
URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python
Repository Root: http://python-dlp.googlecode.com/svn
Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d
Revision: 378
Node Kind: directory
Schedule: normal
Last Changed Author: chimezie
Last Changed Rev: 378
Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011)
"""
cmd = ['svn', 'info']
op = self.sh(cmd, shell=False)
if not op:
return None
author, rev, datestr = op.split('\n')[7:10]
author = author.split(': ', 1)[1].strip()
rev = rev.split(': ', 1)[1].strip()
datestr = datestr.split(': ', 1)[1].split('(', 1)[0].strip()
return datestr, (rev, author, None, None) | Retrieve the most recent commit message (with ``svn info``)
Returns:
tuple: (datestr, (revno, user, None, desc))
$ svn info
Path: .
URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python
Repository Root: http://python-dlp.googlecode.com/svn
Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d
Revision: 378
Node Kind: directory
Schedule: normal
Last Changed Author: chimezie
Last Changed Rev: 378
Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011) |
def find_tool(name, additional_paths = [], path_last = False):
""" Attempts to find tool (binary) named 'name' in PATH and in
'additional-paths'. If found in path, returns 'name'. If
found in additional paths, returns full name. If the tool
is found in several directories, returns the first path found.
Otherwise, returns the empty string. If 'path_last' is specified,
path is checked after 'additional_paths'.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(additional_paths, basestring)
assert isinstance(path_last, (int, bool))
programs = path.programs_path()
match = path.glob(programs, [name, name + '.exe'])
additional_match = path.glob(additional_paths, [name, name + '.exe'])
result = []
if path_last:
result = additional_match
if not result and match:
result = match
else:
if match:
result = match
elif additional_match:
result = additional_match
if result:
return path.native(result[0])
else:
return '' | Attempts to find tool (binary) named 'name' in PATH and in
'additional-paths'. If found in path, returns 'name'. If
found in additional paths, returns full name. If the tool
is found in several directories, returns the first path found.
Otherwise, returns the empty string. If 'path_last' is specified,
path is checked after 'additional_paths'. |
def resolve_relative_paths(paths: List[str]) -> Dict[str, str]:
"""
Query buck to obtain a mapping from each absolute path to the relative
location in the analysis directory.
"""
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
LOG.error(
"Buck root couldn't be found. Returning empty analysis directory mapping."
)
return {}
command = [
"buck",
"query",
"--json",
"--output-attribute",
".*",
"owner(%s)",
*paths,
]
try:
output = json.loads(
subprocess.check_output(command, timeout=30, stderr=subprocess.DEVNULL)
.decode()
.strip()
)
except (
subprocess.TimeoutExpired,
subprocess.CalledProcessError,
json.decoder.JSONDecodeError,
) as error:
raise BuckException("Querying buck for relative paths failed: {}".format(error))
# TODO(T40580762) we should use the owner name to determine which files are a
# part of the pyre project
results = {}
for path in paths:
# For each path, search for the target that owns it.
for owner in output.values():
prefix = os.path.join(buck_root, owner["buck.base_path"]) + os.sep
if not path.startswith(prefix):
continue
suffix = path[len(prefix) :]
if suffix not in owner["srcs"]:
continue
if "buck.base_module" in owner:
base_path = os.path.join(*owner["buck.base_module"].split("."))
else:
base_path = owner["buck.base_path"]
results[path] = os.path.join(base_path, owner["srcs"][suffix])
break # move on to next path
return results | Query buck to obtain a mapping from each absolute path to the relative
location in the analysis directory. |
def get_spam_checker(backend_path):
"""
Return the selected spam checker backend.
"""
try:
backend_module = import_module(backend_path)
backend = getattr(backend_module, 'backend')
except (ImportError, AttributeError):
warnings.warn('%s backend cannot be imported' % backend_path,
RuntimeWarning)
backend = None
except ImproperlyConfigured as e:
warnings.warn(str(e), RuntimeWarning)
backend = None
return backend | Return the selected spam checker backend. |
def ssh_file(opts, dest_path, contents=None, kwargs=None, local_file=None):
'''
Copies a file to the remote SSH target using either sftp or scp, as
configured.
'''
if opts.get('file_transport', 'sftp') == 'sftp':
return sftp_file(dest_path, contents, kwargs, local_file)
return scp_file(dest_path, contents, kwargs, local_file) | Copies a file to the remote SSH target using either sftp or scp, as
configured. |
def unique_list(input_list):
r"""
For a given list (of points) remove any duplicates
"""
output_list = []
if len(input_list) > 0:
dim = _sp.shape(input_list)[1]
for i in input_list:
match = False
for j in output_list:
if dim == 3:
if i[0] == j[0] and i[1] == j[1] and i[2] == j[2]:
match = True
elif dim == 2:
if i[0] == j[0] and i[1] == j[1]:
match = True
elif dim == 1:
if i[0] == j[0]:
match = True
if match is False:
output_list.append(i)
return output_list | r"""
For a given list (of points) remove any duplicates |
def _tensor_product(t1, t2):
"""Computes the outer product of two possibly batched vectors.
Args:
t1: A `tf.Tensor` of shape `[..., n]`.
t2: A `tf.Tensor` of shape `[..., m]`.
Returns:
A tensor of shape `[..., n, m]` with matching batch dimensions, let's call
it `r`, whose components are:
```None
r[..., i, j] = t1[..., i] * t2[..., j]
```
"""
return tf.matmul(tf.expand_dims(t1, axis=-1), tf.expand_dims(t2, axis=-2)) | Computes the outer product of two possibly batched vectors.
Args:
t1: A `tf.Tensor` of shape `[..., n]`.
t2: A `tf.Tensor` of shape `[..., m]`.
Returns:
A tensor of shape `[..., n, m]` with matching batch dimensions, let's call
it `r`, whose components are:
```None
r[..., i, j] = t1[..., i] * t2[..., j]
``` |
def _set_kernel_manager(self, kernel_manager):
""" Disconnect from the current kernel manager (if any) and set a new
kernel manager.
"""
# Disconnect the old kernel manager, if necessary.
old_manager = self._kernel_manager
if old_manager is not None:
old_manager.started_kernel.disconnect(self._started_kernel)
old_manager.started_channels.disconnect(self._started_channels)
old_manager.stopped_channels.disconnect(self._stopped_channels)
# Disconnect the old kernel manager's channels.
old_manager.sub_channel.message_received.disconnect(self._dispatch)
old_manager.shell_channel.message_received.disconnect(self._dispatch)
old_manager.stdin_channel.message_received.disconnect(self._dispatch)
old_manager.hb_channel.kernel_died.disconnect(
self._handle_kernel_died)
# Handle the case where the old kernel manager is still listening.
if old_manager.channels_running:
self._stopped_channels()
# Set the new kernel manager.
self._kernel_manager = kernel_manager
if kernel_manager is None:
return
# Connect the new kernel manager.
kernel_manager.started_kernel.connect(self._started_kernel)
kernel_manager.started_channels.connect(self._started_channels)
kernel_manager.stopped_channels.connect(self._stopped_channels)
# Connect the new kernel manager's channels.
kernel_manager.sub_channel.message_received.connect(self._dispatch)
kernel_manager.shell_channel.message_received.connect(self._dispatch)
kernel_manager.stdin_channel.message_received.connect(self._dispatch)
kernel_manager.hb_channel.kernel_died.connect(self._handle_kernel_died)
# Handle the case where the kernel manager started channels before
# we connected.
if kernel_manager.channels_running:
self._started_channels() | Disconnect from the current kernel manager (if any) and set a new
kernel manager. |
def inserir(self, name):
"""Inserts a new Division Dc and returns its identifier.
:param name: Division Dc name. String with a minimum 2 and maximum of 80 characters
:return: Dictionary with the following structure:
::
{'division_dc': {'id': < id_division_dc >}}
:raise InvalidParameterError: Name is null and invalid.
:raise NomeDivisaoDcDuplicadoError: There is already a registered Division Dc with the value of name.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
division_dc_map = dict()
division_dc_map['name'] = name
code, xml = self.submit(
{'division_dc': division_dc_map}, 'POST', 'divisiondc/')
return self.response(code, xml) | Inserts a new Division Dc and returns its identifier.
:param name: Division Dc name. String with a minimum 2 and maximum of 80 characters
:return: Dictionary with the following structure:
::
{'division_dc': {'id': < id_division_dc >}}
:raise InvalidParameterError: Name is null and invalid.
:raise NomeDivisaoDcDuplicadoError: There is already a registered Division Dc with the value of name.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
def _write_zip(self, func_src, fpath):
"""
Write the function source to a zip file, suitable for upload to
Lambda.
Note there's a bit of undocumented magic going on here; Lambda needs
the execute bit set on the module with the handler in it (i.e. 0755
or 0555 permissions). There doesn't seem to be *any* documentation on
how to do this in the Python docs. The only real hint comes from the
source code of ``zipfile.ZipInfo.from_file()``, which includes:
st = os.stat(filename)
...
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
:param func_src: lambda function source
:type func_src: str
:param fpath: path to write the zip file at
:type fpath: str
"""
# get timestamp for file
now = datetime.now()
zi_tup = (now.year, now.month, now.day, now.hour, now.minute,
now.second)
logger.debug('setting zipinfo date to: %s', zi_tup)
# create a ZipInfo so we can set file attributes/mode
zinfo = zipfile.ZipInfo('webhook2lambda2sqs_func.py', zi_tup)
# set file mode
zinfo.external_attr = 0x0755 << 16
logger.debug('setting zipinfo file mode to: %s', zinfo.external_attr)
logger.debug('writing zip file at: %s', fpath)
with zipfile.ZipFile(fpath, 'w') as z:
z.writestr(zinfo, func_src) | Write the function source to a zip file, suitable for upload to
Lambda.
Note there's a bit of undocumented magic going on here; Lambda needs
the execute bit set on the module with the handler in it (i.e. 0755
or 0555 permissions). There doesn't seem to be *any* documentation on
how to do this in the Python docs. The only real hint comes from the
source code of ``zipfile.ZipInfo.from_file()``, which includes:
st = os.stat(filename)
...
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
:param func_src: lambda function source
:type func_src: str
:param fpath: path to write the zip file at
:type fpath: str |
def add_key_filters(self, key_filters):
"""
Adds key filters to the inputs.
:param key_filters: a list of filters
:type key_filters: list
:rtype: :class:`RiakMapReduce`
"""
if self._input_mode == 'query':
raise ValueError('Key filters are not supported in a query.')
self._key_filters.extend(key_filters)
return self | Adds key filters to the inputs.
:param key_filters: a list of filters
:type key_filters: list
:rtype: :class:`RiakMapReduce` |
def pretty_print(rows, keyword, domain):
"""
rows is
list when get domains
dict when get specific domain
"""
if isinstance(rows, dict):
pretty_print_domain(rows, keyword, domain)
elif isinstance(rows, list):
pretty_print_zones(rows) | rows is
list when get domains
dict when get specific domain |
def get_common_path(pathlist):
"""Return common path for all paths in pathlist"""
common = osp.normpath(osp.commonprefix(pathlist))
if len(common) > 1:
if not osp.isdir(common):
return abspardir(common)
else:
for path in pathlist:
if not osp.isdir(osp.join(common, path[len(common)+1:])):
# `common` is not the real common prefix
return abspardir(common)
else:
return osp.abspath(common) | Return common path for all paths in pathlist |
def process_ipvsix_frame(self,
id=None,
msg=None):
"""process_ipvsix_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ipv6 frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "ipv6_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.ipvsix_keys:
self.ipvsix_keys[new_key] = k
# end of capturing all unique keys
dt["ipv6_id"] = id
self.all_ipvsix.append(dt)
log.debug("IPV6 data updated:")
log.debug(self.ipvsix_keys)
log.debug(self.all_ipvsix)
log.debug("")
return flat_msg | process_ipvsix_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ipv6 frame for packet |
def GetFeatureService(self, itemId, returnURLOnly=False):
"""Obtains a feature service by item ID.
Args:
itemId (str): The feature service's item ID.
returnURLOnly (bool): A boolean value to return the URL of the feature service. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the feature service is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.
"""
admin = None
item = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
if self._securityHandler.valid == False:
self._valid = self._securityHandler.valid
self._message = self._securityHandler.message
return None
item = admin.content.getItem(itemId=itemId)
if item.type == "Feature Service":
if returnURLOnly:
return item.url
else:
fs = arcrest.agol.FeatureService(
url=item.url,
securityHandler=self._securityHandler)
if fs.layers is None or len(fs.layers) == 0 :
fs = arcrest.ags.FeatureService(
url=item.url)
return fs
return None
except:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "GetFeatureService",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
admin = None
item = None
del item
del admin
gc.collect() | Obtains a feature service by item ID.
Args:
itemId (str): The feature service's item ID.
returnURLOnly (bool): A boolean value to return the URL of the feature service. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the feature service is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. |
def source_csv_to_pandas(path, table, read_csv_args=None):
"""
Parameters
----------
path: str
path to directory or zipfile
table: str
name of table
read_csv_args:
string arguments passed to the read_csv function
Returns
-------
df: pandas:DataFrame
"""
if '.txt' not in table:
table += '.txt'
if isinstance(path, dict):
data_obj = path[table]
f = data_obj.split("\n")
else:
if os.path.isdir(path):
f = open(os.path.join(path, table))
else:
z = zipfile.ZipFile(path)
for path in z.namelist():
if table in path:
table = path
break
try:
f = zip_open(z, table)
except KeyError as e:
return pd.DataFrame()
if read_csv_args:
df = pd.read_csv(**read_csv_args)
else:
df = pd.read_csv(f)
return df | Parameters
----------
path: str
path to directory or zipfile
table: str
name of table
read_csv_args:
string arguments passed to the read_csv function
Returns
-------
df: pandas:DataFrame |
def _init_goslims(self, dagslim):
"""Get GO IDs in GO slims."""
go2obj_main = self.gosubdag.go2obj
go2obj_slim = {go for go, o in dagslim.items() if go in go2obj_main}
if self.gosubdag.relationships:
return self._get_goslimids_norel(go2obj_slim)
return set(dagslim.keys()) | Get GO IDs in GO slims. |
def as_dict(self):
"""
Makes XcFunc obey the general json interface used in pymatgen for easier serialization.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
# print("in as_dict", type(self.x), type(self.c), type(self.xc))
if self.x is not None: d["x"] = self.x.as_dict()
if self.c is not None: d["c"] = self.c.as_dict()
if self.xc is not None: d["xc"] = self.xc.as_dict()
return d | Makes XcFunc obey the general json interface used in pymatgen for easier serialization. |
def get_week_URL(date, day=0):
"""
Returns the week view URL for a given date.
:param date: A date instance.
:param day: Day number in a month.
"""
if day < 1:
day = 1
date = datetime(year=date.year, month=date.month, day=day, tzinfo=utc)
return reverse('calendar_week', kwargs={'year': date.isocalendar()[0],
'week': date.isocalendar()[1]}) | Returns the week view URL for a given date.
:param date: A date instance.
:param day: Day number in a month. |
def number(self, assignment_class=None, namespace='d'):
"""
Return a new number.
:param assignment_class: Determines the length of the number. Possible values are 'authority' (3 characters) ,
'registered' (5) , 'unregistered' (7) and 'self' (9). Self assigned numbers are random and acquired locally,
while the other assignment classes use the number server defined in the configuration. If None,
then look in the number server configuration for one of the class keys, starting
with the longest class and working to the shortest.
:param namespace: The namespace character, the first character in the number. Can be one of 'd', 'x' or 'b'
:return:
"""
if assignment_class == 'self':
# When 'self' is explicit, don't look for number server config
return str(DatasetNumber())
elif assignment_class is None:
try:
nsconfig = self.services['numbers']
except ConfigurationError:
# A missing configuration is equivalent to 'self'
self.logger.error('No number server configuration; returning self assigned number')
return str(DatasetNumber())
for assignment_class in ('self', 'unregistered', 'registered', 'authority'):
if assignment_class+'-key' in nsconfig:
break
# For the case where the number configuratoin references a self-assigned key
if assignment_class == 'self':
return str(DatasetNumber())
else:
try:
nsconfig = self.services['numbers']
except ConfigurationError:
raise ConfigurationError('No number server configuration')
if assignment_class + '-key' not in nsconfig:
raise ConfigurationError(
'Assignment class {} not number server config'.format(assignment_class))
try:
key = nsconfig[assignment_class + '-key']
config = {
'key': key,
'host': nsconfig['host'],
'port': nsconfig.get('port', 80)
}
ns = NumberServer(**config)
n = str(next(ns))
self.logger.info('Got number from number server: {}'.format(n))
except HTTPError as e:
self.logger.error('Failed to get number from number server for key: {}'.format(key, e.message))
self.logger.error('Using self-generated number. There is no problem with this, '
'but they are longer than centrally generated numbers.')
n = str(DatasetNumber())
return n | Return a new number.
:param assignment_class: Determines the length of the number. Possible values are 'authority' (3 characters) ,
'registered' (5) , 'unregistered' (7) and 'self' (9). Self assigned numbers are random and acquired locally,
while the other assignment classes use the number server defined in the configuration. If None,
then look in the number server configuration for one of the class keys, starting
with the longest class and working to the shortest.
:param namespace: The namespace character, the first character in the number. Can be one of 'd', 'x' or 'b'
:return: |
def gauss_fit(X, Y):
"""
Fit the function to a gaussian.
Parameters
----------
X: 1d array
X values
Y: 1d array
Y values
Returns
-------
(The return from scipy.optimize.curve_fit)
popt : array
Optimal values for the parameters
pcov : 2d array
The estimated covariance of popt.
Notes
-----
/!\ This uses a slow curve_fit function! do not use if need speed!
"""
X = np.asarray(X)
Y = np.asarray(Y)
# Can not have negative values
Y[Y < 0] = 0
# define gauss function
def gauss(x, a, x0, sigma):
return a * np.exp(-(x - x0)**2 / (2 * sigma**2))
# get first estimation for parameter
mean = (X * Y).sum() / Y.sum()
sigma = np.sqrt((Y * ((X - mean)**2)).sum() / Y.sum())
height = Y.max()
# fit with curve_fit
return curve_fit(gauss, X, Y, p0=[height, mean, sigma]) | Fit the function to a gaussian.
Parameters
----------
X: 1d array
X values
Y: 1d array
Y values
Returns
-------
(The return from scipy.optimize.curve_fit)
popt : array
Optimal values for the parameters
pcov : 2d array
The estimated covariance of popt.
Notes
-----
/!\ This uses a slow curve_fit function! do not use if need speed! |
def get_raw_data(self, url, *args, **kwargs):
"""Gets data from url as bytes
Returns content under the provided url as bytes
ie. for binary data
Args:
**url**: address of the wanted data
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
bytes
"""
res = self._conn.get(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200:
return res.content
else:
return None | Gets data from url as bytes
Returns content under the provided url as bytes
ie. for binary data
Args:
**url**: address of the wanted data
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
bytes |
def GetAttachmentIdFromMediaId(media_id):
"""Gets attachment id from media id.
:param str media_id:
:return:
The attachment id from the media id.
:rtype: str
"""
altchars = '+-'
if not six.PY2:
altchars = altchars.encode('utf-8')
# altchars for '+' and '/'. We keep '+' but replace '/' with '-'
buffer = base64.b64decode(str(media_id), altchars)
resoure_id_length = 20
attachment_id = ''
if len(buffer) > resoure_id_length:
# We are cutting off the storage index.
attachment_id = base64.b64encode(buffer[0:resoure_id_length], altchars)
if not six.PY2:
attachment_id = attachment_id.decode('utf-8')
else:
attachment_id = media_id
return attachment_id | Gets attachment id from media id.
:param str media_id:
:return:
The attachment id from the media id.
:rtype: str |
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError('Invalid current offset value less than zero.')
# The SleuthKit is not POSIX compliant in its read behavior. Therefore
# pytsk3 will raise an IOError if the read offset is beyond the data size.
if self._current_offset >= self._size:
return b''
if size is None or self._current_offset + size > self._size:
size = self._size - self._current_offset
if self._tsk_attribute:
data = self._tsk_file.read_random(
self._current_offset, size, self._tsk_attribute.info.type,
self._tsk_attribute.info.id)
else:
data = self._tsk_file.read_random(self._current_offset, size)
# It is possible the that returned data size is not the same as the
# requested data size. At this layer we don't care and this discrepancy
# should be dealt with on a higher layer if necessary.
self._current_offset += len(data)
return data | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. |
def _listen(self):
"""Listen for messages passed from parent
This method distributes messages received via stdin to their
corresponding channel. Based on the format of the incoming
message, the message is forwarded to its corresponding channel
to be processed by its corresponding handler.
"""
def _listen():
"""This runs in a thread"""
for line in iter(sys.stdin.readline, b""):
try:
response = json.loads(line)
except Exception as e:
# The parent has passed on a message that
# isn't formatted in any particular way.
# This is likely a bug.
raise e
else:
if response.get("header") == "pyblish-qml:popen.response":
self.channels["response"].put(line)
elif response.get("header") == "pyblish-qml:popen.parent":
self.channels["parent"].put(line)
elif response.get("header") == "pyblish-qml:server.pulse":
self._kill.cancel() # reset timer
self._self_destruct()
else:
# The parent has passed on a message that
# is JSON, but not in any format we recognise.
# This is likely a bug.
raise Exception("Unhandled message "
"passed to Popen, '%s'" % line)
thread = threading.Thread(target=_listen)
thread.daemon = True
thread.start() | Listen for messages passed from parent
This method distributes messages received via stdin to their
corresponding channel. Based on the format of the incoming
message, the message is forwarded to its corresponding channel
to be processed by its corresponding handler. |
def delete_affinity_group(self, affinity_group_name):
'''
Deletes an affinity group in the specified subscription.
affinity_group_name:
The name of the affinity group.
'''
_validate_not_none('affinity_group_name', affinity_group_name)
return self._perform_delete('/' + self.subscription_id + \
'/affinitygroups/' + \
_str(affinity_group_name)) | Deletes an affinity group in the specified subscription.
affinity_group_name:
The name of the affinity group. |
def error_codes(self):
"""ThreatConnect error codes."""
if self._error_codes is None:
from .tcex_error_codes import TcExErrorCodes
self._error_codes = TcExErrorCodes()
return self._error_codes | ThreatConnect error codes. |
def decrypt(self, msg):
"""decrypt a message"""
error = False
signature = msg[0:SHA256.digest_size]
iv = msg[SHA256.digest_size:SHA256.digest_size + AES.block_size]
cipher_text = msg[SHA256.digest_size + AES.block_size:]
if self.sign(iv + cipher_text) != signature:
error = True
ctr = Counter.new(AES.block_size * 8, initial_value=self.bin2long(iv))
cipher = AES.AESCipher(self._cipherkey, AES.MODE_CTR, counter=ctr)
plain_text = cipher.decrypt(cipher_text)
if error:
raise DecryptionError
return plain_text | decrypt a message |
def info(model=None, markdown=False, silent=False):
"""
Print info about spaCy installation. If a model shortcut link is
speficied as an argument, print model information. Flag --markdown
prints details in Markdown for easy copy-pasting to GitHub issues.
"""
msg = Printer()
if model:
if util.is_package(model):
model_path = util.get_package_path(model)
else:
model_path = util.get_data_path() / model
meta_path = model_path / "meta.json"
if not meta_path.is_file():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta = srsly.read_json(meta_path)
if model_path.resolve() != model_path:
meta["link"] = path2str(model_path)
meta["source"] = path2str(model_path.resolve())
else:
meta["source"] = path2str(model_path)
if not silent:
title = "Info about model '{}'".format(model)
model_meta = {
k: v for k, v in meta.items() if k not in ("accuracy", "speed")
}
if markdown:
print_markdown(model_meta, title=title)
else:
msg.table(model_meta, title=title)
return meta
data = {
"spaCy version": about.__version__,
"Location": path2str(Path(__file__).parent.parent),
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Models": list_models(),
}
if not silent:
title = "Info about spaCy"
if markdown:
print_markdown(data, title=title)
else:
msg.table(data, title=title)
return data | Print info about spaCy installation. If a model shortcut link is
speficied as an argument, print model information. Flag --markdown
prints details in Markdown for easy copy-pasting to GitHub issues. |
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
rbridge_id = ET.SubElement(cluster_fwdl_entries, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def is_address_here(self, address):
"""
Tries to determine if the given address belongs to this module.
@type address: int
@param address: Memory address.
@rtype: bool or None
@return: C{True} if the address belongs to the module,
C{False} if it doesn't,
and C{None} if it can't be determined.
"""
base = self.get_base()
size = self.get_size()
if base and size:
return base <= address < (base + size)
return None | Tries to determine if the given address belongs to this module.
@type address: int
@param address: Memory address.
@rtype: bool or None
@return: C{True} if the address belongs to the module,
C{False} if it doesn't,
and C{None} if it can't be determined. |
def get_msms_df_on_file(pdb_file, outfile=None, outdir=None, outext='_msms.df', force_rerun=False):
"""Run MSMS (using Biopython) on a PDB file.
Saves a CSV file of:
chain: chain ID
resnum: residue number (PDB numbering)
icode: residue insertion code
res_depth: average depth of all atoms in a residue
ca_depth: depth of the alpha carbon atom
Depths are in units Angstroms. 1A = 10^-10 m = 1nm
Args:
pdb_file: Path to PDB file
outfile: Optional name of output file (without extension)
outdir: Optional output directory
outext: Optional extension for the output file
outext: Suffix appended to json results file
force_rerun: Rerun MSMS even if results exist already
Returns:
Pandas DataFrame: ResidueDepth property_dict, reformatted
"""
# Create the output file name
outfile = ssbio.utils.outfile_maker(inname=pdb_file, outname=outfile, outdir=outdir, outext=outext)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
# Load the structure
my_structure = StructureIO(pdb_file)
model = my_structure.first_model
df = get_msms_df(model, pdb_id=op.splitext(op.basename(pdb_file))[0],
outfile=outfile, outdir=outdir, outext=outext, force_rerun=force_rerun)
else:
log.debug('{}: already ran MSMS and force_rerun={}, loading results'.format(outfile, force_rerun))
df = pd.read_csv(outfile, index_col=0)
return df | Run MSMS (using Biopython) on a PDB file.
Saves a CSV file of:
chain: chain ID
resnum: residue number (PDB numbering)
icode: residue insertion code
res_depth: average depth of all atoms in a residue
ca_depth: depth of the alpha carbon atom
Depths are in units Angstroms. 1A = 10^-10 m = 1nm
Args:
pdb_file: Path to PDB file
outfile: Optional name of output file (without extension)
outdir: Optional output directory
outext: Optional extension for the output file
outext: Suffix appended to json results file
force_rerun: Rerun MSMS even if results exist already
Returns:
Pandas DataFrame: ResidueDepth property_dict, reformatted |
def msg_curse(self, args=None, max_width=None):
"""Return the list to display in the UI."""
# Init the return message
ret = []
# Only process if stats exist and plugin not disable
if not self.stats or self.args.percpu or self.is_disable():
return ret
# Build the string message
# If user stat is not here, display only idle / total CPU usage (for
# exemple on Windows OS)
idle_tag = 'user' not in self.stats
# Header
msg = '{}'.format('CPU')
ret.append(self.curse_add_line(msg, "TITLE"))
trend_user = self.get_trend('user')
trend_system = self.get_trend('system')
if trend_user is None or trend_user is None:
trend_cpu = None
else:
trend_cpu = trend_user + trend_system
msg = ' {:4}'.format(self.trend_msg(trend_cpu))
ret.append(self.curse_add_line(msg))
# Total CPU usage
msg = '{:5.1f}%'.format(self.stats['total'])
if idle_tag:
ret.append(self.curse_add_line(
msg, self.get_views(key='total', option='decoration')))
else:
ret.append(self.curse_add_line(msg))
# Nice CPU
if 'nice' in self.stats:
msg = ' {:8}'.format('nice:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='nice', option='optional')))
msg = '{:5.1f}%'.format(self.stats['nice'])
ret.append(self.curse_add_line(msg, optional=self.get_views(key='nice', option='optional')))
# ctx_switches
if 'ctx_switches' in self.stats:
msg = ' {:8}'.format('ctx_sw:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='ctx_switches', option='optional')))
msg = '{:>5}'.format(self.auto_unit(int(self.stats['ctx_switches'] // self.stats['time_since_update']),
min_symbol='K'))
ret.append(self.curse_add_line(
msg, self.get_views(key='ctx_switches', option='decoration'),
optional=self.get_views(key='ctx_switches', option='optional')))
# New line
ret.append(self.curse_new_line())
# User CPU
if 'user' in self.stats:
msg = '{:8}'.format('user:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['user'])
ret.append(self.curse_add_line(
msg, self.get_views(key='user', option='decoration')))
elif 'idle' in self.stats:
msg = '{:8}'.format('idle:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['idle'])
ret.append(self.curse_add_line(msg))
# IRQ CPU
if 'irq' in self.stats:
msg = ' {:8}'.format('irq:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='irq', option='optional')))
msg = '{:5.1f}%'.format(self.stats['irq'])
ret.append(self.curse_add_line(msg, optional=self.get_views(key='irq', option='optional')))
# interrupts
if 'interrupts' in self.stats:
msg = ' {:8}'.format('inter:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='interrupts', option='optional')))
msg = '{:>5}'.format(int(self.stats['interrupts'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='interrupts', option='optional')))
# New line
ret.append(self.curse_new_line())
# System CPU
if 'system' in self.stats and not idle_tag:
msg = '{:8}'.format('system:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['system'])
ret.append(self.curse_add_line(
msg, self.get_views(key='system', option='decoration')))
else:
msg = '{:8}'.format('core:')
ret.append(self.curse_add_line(msg))
msg = '{:>6}'.format(self.stats['nb_log_core'])
ret.append(self.curse_add_line(msg))
# IOWait CPU
if 'iowait' in self.stats:
msg = ' {:8}'.format('iowait:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='iowait', option='optional')))
msg = '{:5.1f}%'.format(self.stats['iowait'])
ret.append(self.curse_add_line(
msg, self.get_views(key='iowait', option='decoration'),
optional=self.get_views(key='iowait', option='optional')))
# soft_interrupts
if 'soft_interrupts' in self.stats:
msg = ' {:8}'.format('sw_int:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='soft_interrupts', option='optional')))
msg = '{:>5}'.format(int(self.stats['soft_interrupts'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='soft_interrupts', option='optional')))
# New line
ret.append(self.curse_new_line())
# Idle CPU
if 'idle' in self.stats and not idle_tag:
msg = '{:8}'.format('idle:')
ret.append(self.curse_add_line(msg))
msg = '{:5.1f}%'.format(self.stats['idle'])
ret.append(self.curse_add_line(msg))
# Steal CPU usage
if 'steal' in self.stats:
msg = ' {:8}'.format('steal:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='steal', option='optional')))
msg = '{:5.1f}%'.format(self.stats['steal'])
ret.append(self.curse_add_line(
msg, self.get_views(key='steal', option='decoration'),
optional=self.get_views(key='steal', option='optional')))
# syscalls
# syscalls: number of system calls since boot. Always set to 0 on Linux. (do not display)
if 'syscalls' in self.stats and not LINUX:
msg = ' {:8}'.format('syscal:')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='syscalls', option='optional')))
msg = '{:>5}'.format(int(self.stats['syscalls'] // self.stats['time_since_update']))
ret.append(self.curse_add_line(msg, optional=self.get_views(key='syscalls', option='optional')))
# Return the message with decoration
return ret | Return the list to display in the UI. |
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__TK_template_info__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to reraise it unchanged. XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames) | If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames. |
def parse_from_json(json_str):
"""
Given a Unified Uploader message, parse the contents and return a
MarketOrderList or MarketHistoryList instance.
:param str json_str: A Unified Uploader message as a JSON string.
:rtype: MarketOrderList or MarketHistoryList
:raises: MalformedUploadError when invalid JSON is passed in.
"""
try:
message_dict = json.loads(json_str)
except ValueError:
raise ParseError("Mal-formed JSON input.")
upload_keys = message_dict.get('uploadKeys', False)
if upload_keys is False:
raise ParseError(
"uploadKeys does not exist. At minimum, an empty array is required."
)
elif not isinstance(upload_keys, list):
raise ParseError(
"uploadKeys must be an array object."
)
upload_type = message_dict['resultType']
try:
if upload_type == 'orders':
return orders.parse_from_dict(message_dict)
elif upload_type == 'history':
return history.parse_from_dict(message_dict)
else:
raise ParseError(
'Unified message has unknown upload_type: %s' % upload_type)
except TypeError as exc:
# MarketOrder and HistoryEntry both raise TypeError exceptions if
# invalid input is encountered.
raise ParseError(exc.message) | Given a Unified Uploader message, parse the contents and return a
MarketOrderList or MarketHistoryList instance.
:param str json_str: A Unified Uploader message as a JSON string.
:rtype: MarketOrderList or MarketHistoryList
:raises: MalformedUploadError when invalid JSON is passed in. |
def create_unary_node(self, operator, child, param=None, schema=None):
"""
Return a Unary Node whose type depends on the specified operator.
:param schema:
:param child:
:param operator: A relational algebra operator (see constants.py)
:param param: A list of parameters for the operator.
:return: A Unary Node.
"""
if operator == self.grammar.syntax.select_op:
conditions = ' '.join(flatten(param))
node = SelectNode(child, conditions)
elif operator == self.grammar.syntax.project_op:
node = ProjectNode(child, param)
elif operator == self.grammar.syntax.rename_op:
name = None
attributes = []
if isinstance(param[0], str):
name = param.pop(0)
if param:
attributes = param[0]
node = RenameNode(child, name, attributes, schema)
elif operator == self.grammar.syntax.assign_op:
name = param[0]
attributes = [] if len(param) < 2 else param[1]
node = AssignNode(child, name, attributes, schema)
else:
raise ValueError
return node | Return a Unary Node whose type depends on the specified operator.
:param schema:
:param child:
:param operator: A relational algebra operator (see constants.py)
:param param: A list of parameters for the operator.
:return: A Unary Node. |
def get_translatable_children(self, obj):
"""
Obtain all the translatable children from "obj"
:param obj:
:return:
"""
collector = NestedObjects(using='default')
collector.collect([obj])
object_list = collector.nested()
items = self.get_elements(object_list)
# avoid first object because it's the main object
return items[1:] | Obtain all the translatable children from "obj"
:param obj:
:return: |
def package(self):
"""Copy Flatbuffers' artifacts to package folder
"""
cmake = self.configure_cmake()
cmake.install()
self.copy(pattern="LICENSE.txt", dst="licenses")
self.copy(pattern="FindFlatBuffers.cmake", dst=os.path.join("lib", "cmake", "flatbuffers"), src="CMake")
self.copy(pattern="flathash*", dst="bin", src="bin")
self.copy(pattern="flatc*", dst="bin", src="bin")
if self.settings.os == "Windows" and self.options.shared:
if self.settings.compiler == "Visual Studio":
shutil.move(os.path.join(self.package_folder, "lib", "%s.dll" % self.name),
os.path.join(self.package_folder, "bin", "%s.dll" % self.name))
elif self.settings.compiler == "gcc":
shutil.move(os.path.join(self.package_folder, "lib", "lib%s.dll" % self.name),
os.path.join(self.package_folder, "bin", "lib%s.dll" % self.name)) | Copy Flatbuffers' artifacts to package folder |
def _get_sorted_action_keys(self, keys_list):
"""
This function returns only the elements starting with 'action-' in
'keys_list'. The returned list is sorted by the index appended to
the end of each element
"""
# The names can be found in reflexrulewidget.pt inside the
# Reflex action rules list section.
action_list = []
for key in keys_list:
if key.startswith('action-'):
action_list.append(key)
action_list.sort()
return action_list | This function returns only the elements starting with 'action-' in
'keys_list'. The returned list is sorted by the index appended to
the end of each element |
def update(self, prms):
"""
Args:
prms(dict): dict of {variable name: value}
Any name in prms must be in the graph and in vars_to_update.
"""
with self.sess.as_default():
fetches = []
feeds = {}
for name, value in six.iteritems(prms):
assert name in self.name_map
var = self.name_map[name]
fetches.append(var.initializer)
# This is the implementation of `var.load`
feeds[var.initializer.inputs[1]] = SessionUpdate.relaxed_value_for_var(value, var)
self.sess.run(fetches, feed_dict=feeds) | Args:
prms(dict): dict of {variable name: value}
Any name in prms must be in the graph and in vars_to_update. |
def zip_job(job_ini, archive_zip='', risk_ini='', oq=None, log=logging.info):
"""
Zip the given job.ini file into the given archive, together with all
related files.
"""
if not os.path.exists(job_ini):
sys.exit('%s does not exist' % job_ini)
archive_zip = archive_zip or 'job.zip'
if isinstance(archive_zip, str): # actually it should be path-like
if not archive_zip.endswith('.zip'):
sys.exit('%s does not end with .zip' % archive_zip)
if os.path.exists(archive_zip):
sys.exit('%s exists already' % archive_zip)
# do not validate to avoid permissions error on the export_dir
oq = oq or readinput.get_oqparam(job_ini, validate=False)
if risk_ini:
risk_ini = os.path.normpath(os.path.abspath(risk_ini))
risk_inputs = readinput.get_params([risk_ini])['inputs']
del risk_inputs['job_ini']
oq.inputs.update(risk_inputs)
files = readinput.get_input_files(oq)
if risk_ini:
files = [risk_ini] + files
return general.zipfiles(files, archive_zip, log=log) | Zip the given job.ini file into the given archive, together with all
related files. |
def get_dataset(self, key, info):
"""Read data from file and return the corresponding projectables."""
if key.name in ['longitude', 'latitude']:
logger.debug('Reading coordinate arrays.')
if self.lons is None or self.lats is None:
self.lons, self.lats = self.get_lonlats()
if key.name == 'latitude':
proj = Dataset(self.lats, id=key, **info)
else:
proj = Dataset(self.lons, id=key, **info)
else:
data = self.get_sds_variable(key.name)
proj = Dataset(data, id=key, **info)
return proj | Read data from file and return the corresponding projectables. |
def separator_line(cls, sep='-', size=10):
""" Display a separator line. """
if cls.intty():
cls.echo(sep * size) | Display a separator line. |
def open(self, pathobj):
"""
Opens the remote file and returns a file-like object HTTPResponse
Given the nature of HTTP streaming, this object doesn't support
seek()
"""
url = str(pathobj)
raw, code = self.rest_get_stream(url, auth=pathobj.auth, verify=pathobj.verify,
cert=pathobj.cert)
if not code == 200:
raise RuntimeError("%d" % code)
return raw | Opens the remote file and returns a file-like object HTTPResponse
Given the nature of HTTP streaming, this object doesn't support
seek() |
def put(self, url, json=None, data=None, **kwargs):
"""Sends a PUT request.
Args:
url(basestring): The URL of the API endpoint.
json: Data to be sent in JSON format in tbe body of the request.
data: Data to be sent in the body of the request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint.
"""
check_type(url, basestring, may_be_none=False)
# Expected response code
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['PUT'])
response = self.request('PUT', url, erc, json=json, data=data,
**kwargs)
return extract_and_parse_json(response) | Sends a PUT request.
Args:
url(basestring): The URL of the API endpoint.
json: Data to be sent in JSON format in tbe body of the request.
data: Data to be sent in the body of the request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint. |
def create(self, deal_id, *args, **kwargs):
"""
Create an associated contact
Creates a deal's associated contact and its role
If the specified deal or contact does not exist, the request will return an error
:calls: ``post /deals/{deal_id}/associated_contacts``
:param int deal_id: Unique identifier of a Deal.
:param tuple *args: (optional) Single object representing AssociatedContact resource.
:param dict **kwargs: (optional) AssociatedContact attributes.
:return: Dictionary that support attriubte-style access and represents newely created AssociatedContact resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for AssociatedContact are missing')
attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST)
_, _, associated_contact = self.http_client.post("/deals/{deal_id}/associated_contacts".format(deal_id=deal_id), body=attributes)
return associated_contact | Create an associated contact
Creates a deal's associated contact and its role
If the specified deal or contact does not exist, the request will return an error
:calls: ``post /deals/{deal_id}/associated_contacts``
:param int deal_id: Unique identifier of a Deal.
:param tuple *args: (optional) Single object representing AssociatedContact resource.
:param dict **kwargs: (optional) AssociatedContact attributes.
:return: Dictionary that support attriubte-style access and represents newely created AssociatedContact resource.
:rtype: dict |
def envGet(self, name, default=None, conv=None):
"""Return value for environment variable or None.
@param name: Name of environment variable.
@param default: Default value if variable is undefined.
@param conv: Function for converting value to desired type.
@return: Value of environment variable.
"""
if self._env.has_key(name):
if conv is not None:
return conv(self._env.get(name))
else:
return self._env.get(name)
else:
return default | Return value for environment variable or None.
@param name: Name of environment variable.
@param default: Default value if variable is undefined.
@param conv: Function for converting value to desired type.
@return: Value of environment variable. |
def crop_at_zero_crossing(gen, seconds=5, error=0.1):
'''
Crop the generator, ending at a zero-crossing
Crop the generator to produce approximately seconds seconds
(default 5s) of audio at the provided FRAME_RATE, attempting
to end the clip at a zero crossing point to avoid clicking.
'''
source = iter(gen)
buffer_length = int(2 * error * sampler.FRAME_RATE)
# split the source into two iterators:
# - start, which contains the bulk of the sound clip
# - and end, which contains the final 100ms, plus 100ms past
# the desired clip length. We may cut the clip anywhere
# within this +/-100ms end buffer.
start = itertools.islice(source, 0, int((seconds - error) * sampler.FRAME_RATE))
end = itertools.islice(source, 0, buffer_length)
for sample in start:
yield sample
# pull end buffer generator into memory so we can work with it
end = list(end)
# find min by sorting buffer samples, first by abs of sample, then by distance from optimal
best = sorted(enumerate(end), key=lambda x: (math.fabs(x[1]),abs((buffer_length/2)-x[0])))
print best[:10]
print best[0][0]
# todo: better logic when we don't have a perfect zero crossing
#if best[0][1] != 0:
# # we don't have a perfect zero crossing, so let's look for best fit?
# pass
# crop samples at index of best zero crossing
for sample in end[:best[0][0] + 1]:
yield sample | Crop the generator, ending at a zero-crossing
Crop the generator to produce approximately seconds seconds
(default 5s) of audio at the provided FRAME_RATE, attempting
to end the clip at a zero crossing point to avoid clicking. |
def chunkify(chunksize):
""" Very stupid "chunk vectorizer" which keeps memory use down.
This version requires all inputs to have the same number of elements,
although it shouldn't be that hard to implement simple broadcasting.
"""
def chunkifier(func):
def wrap(*args):
assert len(args) > 0
assert all(len(a.flat) == len(args[0].flat) for a in args)
nelements = len(args[0].flat)
nchunks, remain = divmod(nelements, chunksize)
out = np.ndarray(args[0].shape)
for start in range(0, nelements, chunksize):
#print(start)
stop = start+chunksize
if start+chunksize > nelements:
stop = nelements-start
iargs = tuple(a.flat[start:stop] for a in args)
out.flat[start:stop] = func(*iargs)
return out
return wrap
return chunkifier | Very stupid "chunk vectorizer" which keeps memory use down.
This version requires all inputs to have the same number of elements,
although it shouldn't be that hard to implement simple broadcasting. |
def to_nullable_map(value):
"""
Converts JSON string into map object or returns null when conversion is not possible.
:param value: the JSON string to convert.
:return: Map object value or null when conversion is not supported.
"""
if value == None:
return None
# Parse JSON
try:
value = json.loads(value)
return RecursiveMapConverter.to_nullable_map(value)
except:
return None | Converts JSON string into map object or returns null when conversion is not possible.
:param value: the JSON string to convert.
:return: Map object value or null when conversion is not supported. |
def uninstall(pkg):
'''
Uninstall the specified package.
Args:
pkg (str): The package name.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.uninstall org.gimp.GIMP
'''
ret = {'result': None, 'output': ''}
out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' uninstall ' + pkg)
if out['retcode'] and out['stderr']:
ret['stderr'] = out['stderr'].strip()
ret['result'] = False
else:
ret['stdout'] = out['stdout'].strip()
ret['result'] = True
return ret | Uninstall the specified package.
Args:
pkg (str): The package name.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.uninstall org.gimp.GIMP |
def get_fallback_coord(self,isotope='Ni-56',masslimit=0.1,masscutlim=False,delay=True):
'''
Returns fallback mass coordinate so that the amount of masslimit
of the isotope isotope is ejected. Explosion type chosen with delay option.
masscutlim: If true, new fallback coordinate can only as small as the original fallback
prescription by C. Fryer. Useful for more massive stars which would not eject any metals
with Freyer's prescription.
'''
def getmasscut(m_ini,z_ini,delay):
if int(m_ini)==12:
m_ini=15
z_metal=z_ini/0.02
print 'MINI',m_ini,z_metal
if ((m_ini>=11.) and (m_ini<30.)):
if delay==True:
mass_cut = 1.1 + 0.2*np.exp((m_ini-11.0)/4.) - (2.0 + z_metal)*np.exp(0.4*(m_ini -26.0))
####rapid cc
else:
if m_ini<22.:
mass_cut= 1.1 +0.2*np.exp((m_ini-11.0)/7.5) + 10*(1.0+z_metal)*np.exp(-(m_ini-23.5)**2/(1.0+z_metal)**2)
elif m_ini<30 :
mass_cut= 1.1 + 0.2*np.exp((m_ini-11.0)/4.) - (2.0 + z_metal)*np.exp(0.4*(m_ini -26.0)) - 1.85 + 0.25*z_metal +10.0*(1.0+z_metal)*np.exp(-(m_ini-23.5)**2/(1.0+z_metal)**2)
##at higher mass difference
elif ((m_ini>30) and (m_ini<50)):
#delay
if delay==True:
mass_cut= min( 33.35 + (4.75 + 1.25*z_metal)*(m_ini-34.),m_ini-z_metal**0.5 *(1.3*m_ini - 18.35))
else:
mass_cut = min( 33.35 + (4.75 + 1.25*z_metal)*(m_ini-34.),m_ini-z_metal**0.5 *(1.3*m_ini - 18.35)) - 1.85 + z_metal*(75. -m_ini)/20.
elif m_ini>50:
#Page 7, Fryer12, only at solar Z valid
if z_metal==1:
if m_ini<90.:
mass_cut = 1.8 + 0.04*(90. - m_ini)
else:
mass_cut = 1.8 + np.log10(m_ini - 89.)
#The part below will probably never be used
if z_metal <1:
if m_ini<90.:
mass_cut = max(min( 33.35 + (4.75 + 1.25*z_metal)*(m_ini-34.),m_ini-z_metal**0.5 *(1.3*m_ini - 18.35)),1.8 + 0.04*(90. - m_ini))
else:
mass_cut = max(min( 33.35 + (4.75 + 1.25*z_metal)*(m_ini-34.),m_ini-z_metal**0.5 *(1.3*m_ini - 18.35)),1.8 + np.log10(m_ini - 89.))
mass_cut=round(mass_cut,2)
return mass_cut
fallback_coords=[]
orig_fallback=[]
minis=[]
ni56_mass_all=[]
o16_mass_all=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_restart[i])
m_ini=sefiles.get("mini")
z_ini=sefiles.get("zini")
minis.append(m_ini)
mass_cut=getmasscut(m_ini,z_ini,delay)
print 'mass cut',mass_cut
cycle=int(sefiles.se.cycles[-1])
mass_cycle=sefiles.get(cycle,"mass")
mass_limit=mass_cycle[-1] #maximum mass_test can be
idx_start=min(range(len(mass_cycle)), key=lambda i: abs(mass_cycle[i]-mass_cut))
ni56_frac=sefiles.get(cycle,'Ni-56')
o16_frac=sefiles.get(cycle,'O-16')
deltam=mass_cycle[1:]-mass_cycle[:-1]
ni56_mass=0
o16_mass=0
ni56_mass_orig=0
newremnant=mass_cut
#go fromm outside to the inside
for k in range(len(mass_cycle)-1)[::-1]:
cellm=deltam[k]*ni56_frac[k]
#in case fallback coordinate should not be smaller then mass_cut (fryer)
if masscutlim==True:
if mass_cycle[k]<mass_cut:
break
if ni56_mass>masslimit:
newremnant=mass_cycle[k]
print 'found new remnant',newremnant,'ni56:',ni56_mass
break
ni56_mass+=cellm
o16_mass+= (deltam[k]*o16_frac[k])
if newremnant == mass_limit:
print 'Ni-56 does not reach 0.1Msun, take old remnant',newremnant
fallback_coords.append(newremnant)
orig_fallback.append(mass_cut)
ni56_mass_all.append(ni56_mass)
o16_mass_all.append(o16_mass)
print '########Results:######'
for k in range(len(minis)):
print 'Initial mass: '+str(minis[k])+'Original fallback coord (fryer): '+str(orig_fallback[k])+',New fallback coord: '+str(fallback_coords[k])+'Ni-56 ejected: '+str(ni56_mass_all[k])+'O16: '+str(o16_mass_all[k])
return minis, fallback_coords | Returns fallback mass coordinate so that the amount of masslimit
of the isotope isotope is ejected. Explosion type chosen with delay option.
masscutlim: If true, new fallback coordinate can only as small as the original fallback
prescription by C. Fryer. Useful for more massive stars which would not eject any metals
with Freyer's prescription. |
def decode_devid(devid, pname):
'''decode one device ID. Used for 'devid' command in mavproxy and MAVExplorer'''
devid = int(devid)
if devid == 0:
return
bus_type=devid & 0x07
bus=(devid>>3) & 0x1F
address=(devid>>8)&0xFF
devtype=(devid>>16)
bustypes = {
1: "I2C",
2: "SPI",
3: "UAVCAN",
4: "SITL"
}
compass_types = {
0x01 : "DEVTYPE_HMC5883_OLD",
0x07 : "DEVTYPE_HMC5883",
0x02 : "DEVTYPE_LSM303D",
0x04 : "DEVTYPE_AK8963 ",
0x05 : "DEVTYPE_BMM150 ",
0x06 : "DEVTYPE_LSM9DS1",
0x08 : "DEVTYPE_LIS3MDL",
0x09 : "DEVTYPE_AK09916",
0x0A : "DEVTYPE_IST8310",
0x0B : "DEVTYPE_ICM20948",
0x0C : "DEVTYPE_MMC3416",
0x0D : "DEVTYPE_QMC5883L",
0x0E : "DEVTYPE_MAG3110",
0x0F : "DEVTYPE_SITL",
0x10 : "DEVTYPE_IST8308",
0x11 : "DEVTYPE_RM3100",
}
imu_types = {
0x09 : "DEVTYPE_BMI160",
0x10 : "DEVTYPE_L3G4200D",
0x11 : "DEVTYPE_ACC_LSM303D",
0x12 : "DEVTYPE_ACC_BMA180",
0x13 : "DEVTYPE_ACC_MPU6000",
0x16 : "DEVTYPE_ACC_MPU9250",
0x17 : "DEVTYPE_ACC_IIS328DQ",
0x21 : "DEVTYPE_GYR_MPU6000",
0x22 : "DEVTYPE_GYR_L3GD20",
0x24 : "DEVTYPE_GYR_MPU9250",
0x25 : "DEVTYPE_GYR_I3G4250D",
0x26 : "DEVTYPE_GYR_LSM9DS1",
0x27 : "DEVTYPE_INS_ICM20789",
0x28 : "DEVTYPE_INS_ICM20689",
0x29 : "DEVTYPE_INS_BMI055",
0x2A : "DEVTYPE_SITL",
0x2B : "DEVTYPE_INS_BMI088",
0x2C : "DEVTYPE_INS_ICM20948",
0x2D : "DEVTYPE_INS_ICM20648",
0x2E : "DEVTYPE_INS_ICM20649",
0x2F : "DEVTYPE_INS_ICM20602",
}
decoded_devname = ""
if pname.startswith("COMPASS"):
decoded_devname = compass_types.get(devtype, "UNKNOWN")
if pname.startswith("INS"):
decoded_devname = imu_types.get(devtype, "UNKNOWN")
print("%s: bus_type:%s(%u) bus:%u address:%u(0x%x) devtype:%u(0x%x) %s" % (
pname,
bustypes.get(bus_type,"UNKNOWN"), bus_type,
bus, address, address, devtype, devtype, decoded_devname)) | decode one device ID. Used for 'devid' command in mavproxy and MAVExplorer |
def set_mode_loiter(self):
'''enter LOITER mode'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_NAV_LOITER_UNLIM, 0, 0, 0, 0, 0, 0, 0, 0)
else:
MAV_ACTION_LOITER = 27
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_LOITER) | enter LOITER mode |
def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from this distribution.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params.
"""
if param is not None:
dtype = [(param, float)]
else:
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
for (p,_) in dtype:
offset = numpy.power(self._bounds[p][0], self.dim)
factor = numpy.power(self._bounds[p][1], self.dim) - \
numpy.power(self._bounds[p][0], self.dim)
arr[p] = numpy.random.uniform(0.0, 1.0, size=size)
arr[p] = numpy.power(factor * arr[p] + offset, 1.0 / self.dim)
return arr | Gives a set of random values drawn from this distribution.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params. |
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile) | Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory. |
def execute(self):
""" Executes all sql statements from bundle.sql. """
from ambry.mprlib import execute_sql
execute_sql(self._bundle.library, self.record_content) | Executes all sql statements from bundle.sql. |
def take_snapshot(self, entity_id, entity, last_event_version):
"""
Creates a Snapshot from the given state, and appends it
to the snapshot store.
:rtype: Snapshot
"""
# Create the snapshot.
snapshot = Snapshot(
originator_id=entity_id,
originator_version=last_event_version,
topic=get_topic(entity.__class__),
state=None if entity is None else deepcopy(entity.__dict__)
)
self.snapshot_store.store(snapshot)
# Return the snapshot.
return snapshot | Creates a Snapshot from the given state, and appends it
to the snapshot store.
:rtype: Snapshot |
def p(i, sample_size, weights):
"""
Given a weighted set and sample size return the probabilty that the
weight `i` will be present in the sample.
Created to test the output of the `SomeOf` maker class. The math was
provided by Andy Blackshaw - thank you dad :)
"""
# Determine the initial pick values
weight_i = weights[i]
weights_sum = sum(weights)
# Build a list of weights that don't contain the weight `i`. This list will
# be used to build the possible picks before weight `i`.
other_weights = list(weights)
del other_weights[i]
# Calculate the probability
probability_of_i = 0
for picks in range(0, sample_size):
# Build the list of possible permutations for this pick in the sample
permutations = list(itertools.permutations(other_weights, picks))
# Calculate the probability for this permutation
permutation_probabilities = []
for permutation in permutations:
# Calculate the probability for each pick in the permutation
pick_probabilities = []
pick_weight_sum = weights_sum
for pick in permutation:
pick_probabilities.append(pick / pick_weight_sum)
# Each time we pick we update the sum of the weight the next
# pick is from.
pick_weight_sum -= pick
# Add the probability of picking i as the last pick
pick_probabilities += [weight_i / pick_weight_sum]
# Multiply all the probabilities for the permutation together
permutation_probability = reduce(
lambda x, y: x * y, pick_probabilities
)
permutation_probabilities.append(permutation_probability)
# Add together all the probabilities for all permutations together
probability_of_i += sum(permutation_probabilities)
return probability_of_i | Given a weighted set and sample size return the probabilty that the
weight `i` will be present in the sample.
Created to test the output of the `SomeOf` maker class. The math was
provided by Andy Blackshaw - thank you dad :) |
def jsonify(self, obj, many=sentinel, *args, **kwargs):
"""Return a JSON response containing the serialized data.
:param obj: Object to serialize.
:param bool many: Whether `obj` should be serialized as an instance
or as a collection. If unset, defaults to the value of the
`many` attribute on this Schema.
:param kwargs: Additional keyword arguments passed to `flask.jsonify`.
.. versionchanged:: 0.6.0
Takes the same arguments as `marshmallow.Schema.dump`. Additional
keyword arguments are passed to `flask.jsonify`.
.. versionchanged:: 0.6.3
The `many` argument for this method defaults to the value of
the `many` attribute on the Schema. Previously, the `many`
argument of this method defaulted to False, regardless of the
value of `Schema.many`.
"""
if many is sentinel:
many = self.many
if _MARSHMALLOW_VERSION_INFO[0] >= 3:
data = self.dump(obj, many=many)
else:
data = self.dump(obj, many=many).data
return flask.jsonify(data, *args, **kwargs) | Return a JSON response containing the serialized data.
:param obj: Object to serialize.
:param bool many: Whether `obj` should be serialized as an instance
or as a collection. If unset, defaults to the value of the
`many` attribute on this Schema.
:param kwargs: Additional keyword arguments passed to `flask.jsonify`.
.. versionchanged:: 0.6.0
Takes the same arguments as `marshmallow.Schema.dump`. Additional
keyword arguments are passed to `flask.jsonify`.
.. versionchanged:: 0.6.3
The `many` argument for this method defaults to the value of
the `many` attribute on the Schema. Previously, the `many`
argument of this method defaulted to False, regardless of the
value of `Schema.many`. |
def ExamineEvent(self, mediator, event):
"""Analyzes an event and creates Windows Services as required.
At present, this method only handles events extracted from the Registry.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
# TODO: Handle event log entries here also (ie, event id 4697).
event_data_type = getattr(event, 'data_type', '')
if event_data_type == 'windows:registry:service':
# Create and store the service.
service = WindowsService.FromEvent(event)
self._service_collection.AddService(service) | Analyzes an event and creates Windows Services as required.
At present, this method only handles events extracted from the Registry.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine. |
def hdict(self, hashroot):
""" Get all data contained in hashed category 'hashroot' as dict """
hfiles = self.keys(hashroot + "/*")
hfiles.sort()
last = len(hfiles) and hfiles[-1] or ''
if last.endswith('xx'):
# print "using xx"
hfiles = [last] + hfiles[:-1]
all = {}
for f in hfiles:
# print "using",f
try:
all.update(self[f])
except KeyError:
print "Corrupt",f,"deleted - hset is not threadsafe!"
del self[f]
self.uncache(f)
return all | Get all data contained in hashed category 'hashroot' as dict |
def build_masters(
filename,
master_dir,
designspace_instance_dir=None,
designspace_path=None,
family_name=None,
propagate_anchors=True,
minimize_glyphs_diffs=False,
normalize_ufos=False,
create_background_layers=False,
generate_GDEF=True,
store_editor_state=True,
):
"""Write and return UFOs from the masters and the designspace defined in a
.glyphs file.
Args:
master_dir: Directory where masters are written.
designspace_instance_dir: If provided, a designspace document will be
written alongside the master UFOs though no instances will be built.
family_name: If provided, the master UFOs will be given this name and
only instances with this name will be included in the designspace.
Returns:
A named tuple of master UFOs (`ufos`) and the path to the designspace
file (`designspace_path`).
"""
font = GSFont(filename)
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if designspace_instance_dir is None:
instance_dir = None
else:
instance_dir = os.path.relpath(designspace_instance_dir, master_dir)
designspace = to_designspace(
font,
family_name=family_name,
propagate_anchors=propagate_anchors,
instance_dir=instance_dir,
minimize_glyphs_diffs=minimize_glyphs_diffs,
generate_GDEF=generate_GDEF,
store_editor_state=store_editor_state,
)
# Only write full masters to disk. This assumes that layer sources are always part
# of another full master source, which must always be the case in a .glyphs file.
ufos = {}
for source in designspace.sources:
if source.filename in ufos:
assert source.font is ufos[source.filename]
continue
if create_background_layers:
ufo_create_background_layer_for_all_glyphs(source.font)
ufo_path = os.path.join(master_dir, source.filename)
clean_ufo(ufo_path)
source.font.save(ufo_path)
if normalize_ufos:
import ufonormalizer
ufonormalizer.normalizeUFO(ufo_path, writeModTimes=False)
ufos[source.filename] = source.font
if not designspace_path:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
return Masters(ufos, designspace_path) | Write and return UFOs from the masters and the designspace defined in a
.glyphs file.
Args:
master_dir: Directory where masters are written.
designspace_instance_dir: If provided, a designspace document will be
written alongside the master UFOs though no instances will be built.
family_name: If provided, the master UFOs will be given this name and
only instances with this name will be included in the designspace.
Returns:
A named tuple of master UFOs (`ufos`) and the path to the designspace
file (`designspace_path`). |
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more | Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()). |
def overview():
"""
Provides an overview of the duplicate credentials.
"""
search = Credential.search()
search.aggs.bucket('password_count', 'terms', field='secret', order={'_count': 'desc'}, size=20)\
.metric('username_count', 'cardinality', field='username') \
.metric('host_count', 'cardinality', field='host_ip') \
.metric('top_hits', 'top_hits', docvalue_fields=['username'], size=100)
response = search.execute()
print_line("{0:65} {1:5} {2:5} {3:5} {4}".format("Secret", "Count", "Hosts", "Users", "Usernames"))
print_line("-"*100)
for entry in response.aggregations.password_count.buckets:
usernames = []
for creds in entry.top_hits:
usernames.append(creds.username[0])
usernames = list(set(usernames))
print_line("{0:65} {1:5} {2:5} {3:5} {4}".format(entry.key, entry.doc_count, entry.host_count.value, entry.username_count.value, usernames)) | Provides an overview of the duplicate credentials. |
def launch_svc_event_handler(self, service):
"""Launch event handler for a service
Format of the line that triggers function call::
LAUNCH_SVC_EVENT_HANDLER;<host_name>;<service_description>
:param service: service to execute the event handler
:type service: alignak.objects.service.Service
:return: None
"""
service.get_event_handlers(self.hosts, self.daemon.macromodulations,
self.daemon.timeperiods, ext_cmd=True) | Launch event handler for a service
Format of the line that triggers function call::
LAUNCH_SVC_EVENT_HANDLER;<host_name>;<service_description>
:param service: service to execute the event handler
:type service: alignak.objects.service.Service
:return: None |
def appendGraph(self, graph_name, graph):
"""Utility method to associate Graph Object to Plugin.
This utility method is for use in constructor of child classes for
associating a MuninGraph instances to the plugin.
@param graph_name: Graph Name
@param graph: MuninGraph Instance
"""
self._graphDict[graph_name] = graph
self._graphNames.append(graph_name)
if not self.isMultigraph and len(self._graphNames) > 1:
raise AttributeError("Simple Munin Plugins cannot have more than one graph.") | Utility method to associate Graph Object to Plugin.
This utility method is for use in constructor of child classes for
associating a MuninGraph instances to the plugin.
@param graph_name: Graph Name
@param graph: MuninGraph Instance |
def _run_single(self, thread_id, agent, environment, deterministic=False,
max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None):
"""
The target function for a thread, runs an agent and environment until signaled to stop.
Adds rewards to shared episode rewards list.
Args:
thread_id (int): The ID of the thread that's running this target function.
agent (Agent): The Agent object that this particular thread uses.
environment (Environment): The Environment object that this particular thread uses.
max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes.
episode_finished (callable): Function called after each episode that takes an episode summary spec and
returns False, if this single run should terminate after this episode.
Can be used e.g. to set a particular mean reward threshold.
"""
# figure out whether we are using the deprecated way of "episode_finished" reporting
old_episode_finished = False
if episode_finished is not None and len(getargspec(episode_finished).args) == 1:
old_episode_finished = True
episode = 0
# Run this single worker (episode loop) as long as global count thresholds have not been reached.
while not self.should_stop:
state = environment.reset()
agent.reset()
self.global_timestep, self.global_episode = agent.timestep, agent.episode
episode_reward = 0
# Time step (within episode) loop
time_step = 0
time_start = time.time()
while True:
action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False)
reward = 0
for repeat in xrange(self.repeat_actions):
state, terminal, step_reward = environment.execute(action=action)
reward += step_reward
if terminal:
break
if not testing:
# agent.observe(reward=reward, terminal=terminal)
# Insert everything at once.
agent.atomic_observe(
states=state,
actions=action,
internals=internals,
reward=reward,
terminal=terminal
)
if sleep is not None:
time.sleep(sleep)
time_step += 1
episode_reward += reward
if terminal or time_step == max_episode_timesteps:
break
# Abort the episode (discard its results) when global says so.
if self.should_stop:
return
self.global_timestep += time_step
# Avoid race condition where order in episode_rewards won't match order in episode_timesteps.
self.episode_list_lock.acquire()
self.episode_rewards.append(episode_reward)
self.episode_timesteps.append(time_step)
self.episode_times.append(time.time() - time_start)
self.episode_list_lock.release()
if episode_finished is not None:
# old way of calling episode_finished
if old_episode_finished:
summary_data = {
"thread_id": thread_id,
"episode": episode,
"timestep": time_step,
"episode_reward": episode_reward
}
if not episode_finished(summary_data):
return
# New way with BasicRunner (self) and thread-id.
elif not episode_finished(self, thread_id):
return
episode += 1 | The target function for a thread, runs an agent and environment until signaled to stop.
Adds rewards to shared episode rewards list.
Args:
thread_id (int): The ID of the thread that's running this target function.
agent (Agent): The Agent object that this particular thread uses.
environment (Environment): The Environment object that this particular thread uses.
max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes.
episode_finished (callable): Function called after each episode that takes an episode summary spec and
returns False, if this single run should terminate after this episode.
Can be used e.g. to set a particular mean reward threshold. |
def get_python(self):
"""Return cursor if multi-select, direct value if single-select"""
cursor = super(ReferenceField, self).get_python()
if self.multiselect:
return cursor
else:
try:
return cursor[0]
except IndexError:
return None | Return cursor if multi-select, direct value if single-select |
def write_result(self, data):
"""Write the results received to the database
:param dict data: the data to save in database
:return: None
"""
data['custom_timers'] = ujson.dumps(data['custom_timers'])
self.results.append(data)
if len(self.results) >= 150: # 150 rows for SQLite default limit
with db.execution_context():
with db.atomic():
Result.insert_many(self.results).execute()
del self.results[:] | Write the results received to the database
:param dict data: the data to save in database
:return: None |
def create_custom_trees(cls, obj, options=None):
"""
Returns the appropriate set of customized subtree clones for
an object, suitable for merging with Store.custom_options (i.e
with the ids appropriately offset). Note if an object has no
integer ids a new OptionTree is built.
The id_mapping return value is a list mapping the ids that
need to be matched as set to their new values.
"""
clones, id_mapping = {}, []
obj_ids = cls.get_object_ids(obj)
offset = cls.id_offset()
obj_ids = [None] if len(obj_ids)==0 else obj_ids
for tree_id in obj_ids:
if tree_id is not None and tree_id in Store.custom_options():
original = Store.custom_options()[tree_id]
clone = OptionTree(items = original.items(),
groups = original.groups)
clones[tree_id + offset + 1] = clone
id_mapping.append((tree_id, tree_id + offset + 1))
else:
clone = OptionTree(groups=Store.options().groups)
clones[offset] = clone
id_mapping.append((tree_id, offset))
# Nodes needed to ensure allowed_keywords is respected
for k in Store.options():
if k in [(opt.split('.')[0],) for opt in options]:
group = {grp:Options(
allowed_keywords=opt.allowed_keywords)
for (grp, opt) in
Store.options()[k].groups.items()}
clone[k] = group
return {k:cls.apply_customizations(options, t) if options else t
for k,t in clones.items()}, id_mapping | Returns the appropriate set of customized subtree clones for
an object, suitable for merging with Store.custom_options (i.e
with the ids appropriately offset). Note if an object has no
integer ids a new OptionTree is built.
The id_mapping return value is a list mapping the ids that
need to be matched as set to their new values. |
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri) | Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo |
def _load_cache(self, filename):
"""Load the cached page references from `<filename>.ptc`."""
try:
with open(filename + self.CACHE_EXTENSION, 'rb') as file:
prev_number_of_pages, prev_page_references = pickle.load(file)
except (IOError, TypeError):
prev_number_of_pages, prev_page_references = {}, {}
return prev_number_of_pages, prev_page_references | Load the cached page references from `<filename>.ptc`. |
def on_site(self):
"""
Return entries published on current site.
"""
return super(EntryPublishedManager, self).get_queryset().filter(
sites=Site.objects.get_current()) | Return entries published on current site. |
def print(self, indent=0):
"""Print a structural view of the registered extensions."""
LOG.info("%s:: %s", indent * " ", self.__class__)
for ext in self.next_extensions:
ext.print(indent=indent + 2) | Print a structural view of the registered extensions. |
def bitcount(self, key, start=None, end=None):
"""Count set bits in a string.
:raises TypeError: if only start or end specified.
"""
if start is None and end is not None:
raise TypeError("both start and stop must be specified")
elif start is not None and end is None:
raise TypeError("both start and stop must be specified")
elif start is not None and end is not None:
args = (start, end)
else:
args = ()
return self.execute(b'BITCOUNT', key, *args) | Count set bits in a string.
:raises TypeError: if only start or end specified. |
def put_meta(request):
"""MNStorage.updateSystemMetadata(session, pid, sysmeta) → boolean.
TODO: Currently, this call allows making breaking changes to SysMeta. We need to
clarify what can be modified and what the behavior should be when working with SIDs
and chains.
"""
if django.conf.settings.REQUIRE_WHITELIST_FOR_UPDATE:
d1_gmn.app.auth.assert_create_update_delete_permission(request)
d1_gmn.app.util.coerce_put_post(request)
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (('field', 'pid'), ('file', 'sysmeta'))
)
pid = request.POST['pid']
d1_gmn.app.auth.assert_allowed(request, d1_gmn.app.auth.WRITE_LEVEL, pid)
new_sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES['sysmeta'])
d1_gmn.app.views.assert_sysmeta.has_matching_modified_timestamp(new_sysmeta_pyxb)
d1_gmn.app.views.create.set_mn_controlled_values(
request, new_sysmeta_pyxb, is_modification=True
)
d1_gmn.app.sysmeta.create_or_update(new_sysmeta_pyxb)
d1_gmn.app.event_log.log_update_event(
pid,
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
new_sysmeta_pyxb.dateUploaded
),
)
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | MNStorage.updateSystemMetadata(session, pid, sysmeta) → boolean.
TODO: Currently, this call allows making breaking changes to SysMeta. We need to
clarify what can be modified and what the behavior should be when working with SIDs
and chains. |
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
the_list = [self._ConsumeSingleByteString()]
while self.token and self.token[0] in _QUOTES:
the_list.append(self._ConsumeSingleByteString())
return b''.join(the_list) | Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed. |
def libvlc_video_get_logo_int(p_mi, option):
'''Get integer logo option.
@param p_mi: libvlc media player instance.
@param option: logo option to get, values of libvlc_video_logo_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_logo_int', None) or \
_Cfunction('libvlc_video_get_logo_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option) | Get integer logo option.
@param p_mi: libvlc media player instance.
@param option: logo option to get, values of libvlc_video_logo_option_t. |
def send_dictation_result(self, result, sentences=None, app_uuid=None):
'''
Send the result of a dictation session
:param result: Result of the session
:type result: DictationResult
:param sentences: list of sentences, each of which is a list of words and punctuation
:param app_uuid: UUID of app that initiated the session
:type app_uuid: uuid.UUID
'''
assert self._session_id != VoiceService.SESSION_ID_INVALID
assert isinstance(result, TranscriptionResult)
transcription = None
if result == TranscriptionResult.Success:
if len(sentences) > 0:
s_list = []
for s in sentences:
words = [Word(confidence=100, data=w) for w in s]
s_list.append(Sentence(words=words))
transcription = Transcription(transcription=SentenceList(sentences=s_list))
flags = 0
if app_uuid is not None:
assert isinstance(app_uuid, uuid.UUID)
flags |= Flags.AppInitiated
attributes = []
if app_uuid is not None:
assert isinstance(app_uuid, uuid.UUID)
attributes.append(Attribute(id=AttributeType.AppUuid, data=AppUuid(uuid=app_uuid)))
if transcription is not None:
attributes.append(Attribute(id=AttributeType.Transcription, data=transcription))
logger.debug("Sending dictation result (result={}".format(result) +
", app={})".format(app_uuid) if app_uuid is not None else ")")
self._pebble.send_packet(VoiceControlResult(flags=flags, data=DictationResult(
session_id=self._session_id, result=result, attributes=AttributeList(dictionary=attributes))))
self._session_id = VoiceService.SESSION_ID_INVALID | Send the result of a dictation session
:param result: Result of the session
:type result: DictationResult
:param sentences: list of sentences, each of which is a list of words and punctuation
:param app_uuid: UUID of app that initiated the session
:type app_uuid: uuid.UUID |
def create_from_pybankid_exception(cls, exception):
"""Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError`
"""
return cls(
"{0}: {1}".format(exception.__class__.__name__, str(exception)),
_exception_class_to_status_code.get(exception.__class__),
) | Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError` |
def get_config_path(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME):
"""
Return the path where the config file is stored.
Args:
app_name (text_type, optional): Name of the application, defaults to
``'projecthamster``. Allows you to use your own application specific
namespace if you wish.
file_name (text_type, optional): Name of the config file. Defaults to
``config.conf``.
Returns:
str: Fully qualified path (dir & filename) where we expect the config file.
"""
return os.path.join(appdirs.user_config_dir, file_name) | Return the path where the config file is stored.
Args:
app_name (text_type, optional): Name of the application, defaults to
``'projecthamster``. Allows you to use your own application specific
namespace if you wish.
file_name (text_type, optional): Name of the config file. Defaults to
``config.conf``.
Returns:
str: Fully qualified path (dir & filename) where we expect the config file. |
def _get_entity_by_class(self, entity_cls):
"""Fetch Entity record with Entity class details"""
entity_qualname = fully_qualified_name(entity_cls)
if entity_qualname in self._registry:
return self._registry[entity_qualname]
else:
return self._find_entity_in_records_by_class_name(entity_cls.__name__) | Fetch Entity record with Entity class details |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.