code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def run(self, data, rewrap=False, prefetch=0):
"""
Wires the pipeline and returns a lazy object of
the transformed data.
:param data: must be an iterable, where a full document
must be returned for each loop
:param rewrap: (optional) is a bool that indicates the need to rewrap
data in cases where iterating over it produces undesired results,
for instance ``dict`` instances.
:param prefetch: (optional) is an int defining the number of items to
be prefetched once the pipeline starts yielding data. The
default prefetching mechanism is based on threads, so be
careful with CPU-bound processing pipelines.
"""
if rewrap:
data = [data]
for _filter in self._filters:
_filter.feed(data)
data = _filter
else:
iterable = self._prefetch_callable(data, prefetch) if prefetch else data
for out_data in iterable:
yield out_data | Wires the pipeline and returns a lazy object of
the transformed data.
:param data: must be an iterable, where a full document
must be returned for each loop
:param rewrap: (optional) is a bool that indicates the need to rewrap
data in cases where iterating over it produces undesired results,
for instance ``dict`` instances.
:param prefetch: (optional) is an int defining the number of items to
be prefetched once the pipeline starts yielding data. The
default prefetching mechanism is based on threads, so be
careful with CPU-bound processing pipelines. |
def get_edge(self, edge_id):
"""Returns the edge object identified by "edge_id"."""
try:
edge_object = self.edges[edge_id]
except KeyError:
raise NonexistentEdgeError(edge_id)
return edge_object | Returns the edge object identified by "edge_id". |
def copy(self):
"""Make a deep copy of this object.
Example::
>>> c2 = c.copy()
"""
vec1 = np.copy(self.scoef1._vec)
vec2 = np.copy(self.scoef2._vec)
return VectorCoefs(vec1, vec2, self.nmax, self.mmax) | Make a deep copy of this object.
Example::
>>> c2 = c.copy() |
def DiamAns(cmd, **fields):
"""Craft Diameter answer commands"""
upfields, name = getCmdParams(cmd, False, **fields)
p = DiamG(**upfields)
p.name = name
return p | Craft Diameter answer commands |
def downstream(self, step_name):
"""Returns the direct dependencies of the given step"""
return list(self.steps[dep] for dep in self.dag.downstream(step_name)) | Returns the direct dependencies of the given step |
def git_get_title_and_message(begin, end):
"""Get title and message summary for patches between 2 commits.
:param begin: first commit to look at
:param end: last commit to look at
:return: number of commits, title, message
"""
titles = git_get_log_titles(begin, end)
title = "Pull request for " + end
if len(titles) == 1:
title = titles[0]
pr_template = find_pull_request_template()
if pr_template:
message = get_pr_template_message(pr_template)
else:
if len(titles) == 1:
message = git_get_commit_body(end)
else:
message = "\n".join(titles)
return (len(titles), title, message) | Get title and message summary for patches between 2 commits.
:param begin: first commit to look at
:param end: last commit to look at
:return: number of commits, title, message |
def _intersect(start1, end1, start2, end2):
"""
Returns the intersection of two intervals. Returns (None,None) if the intersection is empty.
:param int start1: The start date of the first interval.
:param int end1: The end date of the first interval.
:param int start2: The start date of the second interval.
:param int end2: The end date of the second interval.
:rtype: tuple[int|None,int|None]
"""
start = max(start1, start2)
end = min(end1, end2)
if start > end:
return None, None
return start, end | Returns the intersection of two intervals. Returns (None,None) if the intersection is empty.
:param int start1: The start date of the first interval.
:param int end1: The end date of the first interval.
:param int start2: The start date of the second interval.
:param int end2: The end date of the second interval.
:rtype: tuple[int|None,int|None] |
def assemble_to_object(self, in_filename, verbose=False):
"""
Assemble *in_filename* assembly into *out_filename* object.
If *iaca_marked* is set to true, markers are inserted around the block with most packed
instructions or (if no packed instr. were found) the largest block and modified file is
saved to *in_file*.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
Returns two-tuple (filepointer, filename) to temp binary file.
"""
# Build file name
file_base_name = os.path.splitext(os.path.basename(in_filename))[0]
out_filename, already_exists = self._get_intermediate_file(file_base_name + '.o',
binary=True,
fp=False)
if already_exists:
# Do not use caching, because pointer_increment or asm_block selection may be different
pass
compiler, compiler_args = self._machine.get_compiler()
# Compile to object file
compiler_args.append('-c')
cmd = [compiler] + [
in_filename] + \
compiler_args + ['-o', out_filename]
if verbose:
print('Executing (assemble_to_object): ', ' '.join(cmd))
try:
# Assemble all to a binary
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Assembly failed:", e, file=sys.stderr)
sys.exit(1)
return out_filename | Assemble *in_filename* assembly into *out_filename* object.
If *iaca_marked* is set to true, markers are inserted around the block with most packed
instructions or (if no packed instr. were found) the largest block and modified file is
saved to *in_file*.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
Returns two-tuple (filepointer, filename) to temp binary file. |
def deconstruct(self):
"""Gets the values to pass to :see:__init__ when
re-creating this object."""
name, path, args, kwargs = super(
HStoreField, self).deconstruct()
if self.uniqueness is not None:
kwargs['uniqueness'] = self.uniqueness
if self.required is not None:
kwargs['required'] = self.required
return name, path, args, kwargs | Gets the values to pass to :see:__init__ when
re-creating this object. |
def save(self, *args, **kwargs):
"""
**uid**: :code:`cycle:{year}`
"""
self.slug = slugify(self.name)
self.uid = 'cycle:{}'.format(self.slug)
super(ElectionCycle, self).save(*args, **kwargs) | **uid**: :code:`cycle:{year}` |
def _state_delete(self):
'''Try to delete the state.yml file and the folder .blockade'''
try:
os.remove(self._state_file)
except OSError as err:
if err.errno not in (errno.EPERM, errno.ENOENT):
raise
try:
os.rmdir(self._state_dir)
except OSError as err:
if err.errno not in (errno.ENOTEMPTY, errno.ENOENT):
raise | Try to delete the state.yml file and the folder .blockade |
def _check_response_status(self, response):
"""
Checks the speficied HTTP response from the requests package and
raises an exception if a non-200 HTTP code was returned by the
server.
"""
if response.status_code != requests.codes.ok:
self._logger.error("%s %s", response.status_code, response.text)
raise exceptions.RequestNonSuccessException(
"Url {0} had status_code {1}".format(
response.url, response.status_code)) | Checks the speficied HTTP response from the requests package and
raises an exception if a non-200 HTTP code was returned by the
server. |
def corr_flat_und(a1, a2):
'''
Returns the correlation coefficient between two flattened adjacency
matrices. Only the upper triangular part is used to avoid double counting
undirected matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
undirected matrix 1
A2 : NxN np.ndarray
undirected matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2
'''
n = len(a1)
if len(a2) != n:
raise BCTParamError("Cannot calculate flattened correlation on "
"matrices of different size")
triu_ix = np.where(np.triu(np.ones((n, n)), 1))
return np.corrcoef(a1[triu_ix].flat, a2[triu_ix].flat)[0][1] | Returns the correlation coefficient between two flattened adjacency
matrices. Only the upper triangular part is used to avoid double counting
undirected matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
undirected matrix 1
A2 : NxN np.ndarray
undirected matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2 |
def time_range(self, start, end):
"""Add a request for a time range to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
start : datetime.datetime
The start of the requested time range
end : datetime.datetime
The end of the requested time range
Returns
-------
self : DataQuery
Returns self for chaining calls
"""
self._set_query(self.time_query, time_start=self._format_time(start),
time_end=self._format_time(end))
return self | Add a request for a time range to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
start : datetime.datetime
The start of the requested time range
end : datetime.datetime
The end of the requested time range
Returns
-------
self : DataQuery
Returns self for chaining calls |
def _construct_derivatives(self, coefs, **kwargs):
"""Return a list of derivatives given a list of coefficients."""
return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs] | Return a list of derivatives given a list of coefficients. |
def single_html(epub_file_path, html_out=sys.stdout, mathjax_version=None,
numchapters=None, includes=None):
"""Generate complete book HTML."""
epub = cnxepub.EPUB.from_file(epub_file_path)
if len(epub) != 1:
raise Exception('Expecting an epub with one book')
package = epub[0]
binder = cnxepub.adapt_package(package)
partcount.update({}.fromkeys(parts, 0))
partcount['book'] += 1
html = cnxepub.SingleHTMLFormatter(binder, includes=includes)
# Truncate binder to the first N chapters where N = numchapters.
logger.debug('Full binder: {}'.format(cnxepub.model_to_tree(binder)))
if numchapters is not None:
apply_numchapters(html.get_node_type, binder, numchapters)
logger.debug('Truncated Binder: {}'.format(
cnxepub.model_to_tree(binder)))
# Add mathjax to the page.
if mathjax_version:
etree.SubElement(
html.head,
'script',
src=MATHJAX_URL.format(mathjax_version=mathjax_version))
print(str(html), file=html_out)
if hasattr(html_out, 'name'):
# html_out is a file, close after writing
html_out.close() | Generate complete book HTML. |
def writeToCheckpoint(self, checkpointDir):
"""Serializes model using capnproto and writes data to ``checkpointDir``"""
proto = self.getSchema().new_message()
self.write(proto)
checkpointPath = self._getModelCheckpointFilePath(checkpointDir)
# Clean up old saved state, if any
if os.path.exists(checkpointDir):
if not os.path.isdir(checkpointDir):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete (not a directory)") \
% checkpointDir)
if not os.path.isfile(checkpointPath):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete"\
" (%s missing or not a file)") % \
(checkpointDir, checkpointPath))
shutil.rmtree(checkpointDir)
# Create a new directory for saving state
self.__makeDirectoryFromAbsolutePath(checkpointDir)
with open(checkpointPath, 'wb') as f:
proto.write(f) | Serializes model using capnproto and writes data to ``checkpointDir`` |
def notify(self):
"""
Calls the notification method
:return: True if the notification method has been called
"""
if self.__method is not None:
self.__method(self.__peer)
return True
return False | Calls the notification method
:return: True if the notification method has been called |
def node_vectors(node_id):
"""Get the vectors of a node.
You must specify the node id in the url.
You can pass direction (incoming/outgoing/all) and failed
(True/False/all).
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="all")
failed = request_parameter(parameter="failed", parameter_type="bool", default=False)
for x in [direction, failed]:
if type(x) == Response:
return x
# execute the request
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/vectors, node does not exist")
try:
vectors = node.vectors(direction=direction, failed=failed)
exp.vector_get_request(node=node, vectors=vectors)
session.commit()
except Exception:
return error_response(
error_type="/node/vectors GET server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(vectors=[v.__json__() for v in vectors]) | Get the vectors of a node.
You must specify the node id in the url.
You can pass direction (incoming/outgoing/all) and failed
(True/False/all). |
def resolve_type(arg):
# type: (object) -> InternalType
"""
Resolve object to one of our internal collection types or generic built-in type.
Args:
arg: object to resolve
"""
arg_type = type(arg)
if arg_type == list:
assert isinstance(arg, list) # this line helps mypy figure out types
sample = arg[:min(4, len(arg))]
tentative_type = TentativeType()
for sample_item in sample:
tentative_type.add(resolve_type(sample_item))
return ListType(tentative_type)
elif arg_type == set:
assert isinstance(arg, set) # this line helps mypy figure out types
sample = []
iterator = iter(arg)
for i in range(0, min(4, len(arg))):
sample.append(next(iterator))
tentative_type = TentativeType()
for sample_item in sample:
tentative_type.add(resolve_type(sample_item))
return SetType(tentative_type)
elif arg_type == FakeIterator:
assert isinstance(arg, FakeIterator) # this line helps mypy figure out types
sample = []
iterator = iter(arg)
for i in range(0, min(4, len(arg))):
sample.append(next(iterator))
tentative_type = TentativeType()
for sample_item in sample:
tentative_type.add(resolve_type(sample_item))
return IteratorType(tentative_type)
elif arg_type == tuple:
assert isinstance(arg, tuple) # this line helps mypy figure out types
sample = list(arg[:min(10, len(arg))])
return TupleType([resolve_type(sample_item) for sample_item in sample])
elif arg_type == dict:
assert isinstance(arg, dict) # this line helps mypy figure out types
key_tt = TentativeType()
val_tt = TentativeType()
for i, (k, v) in enumerate(iteritems(arg)):
if i > 4:
break
key_tt.add(resolve_type(k))
val_tt.add(resolve_type(v))
return DictType(key_tt, val_tt)
else:
return type(arg) | Resolve object to one of our internal collection types or generic built-in type.
Args:
arg: object to resolve |
def put(self, name, value):
"""Put a variable to MATLAB workspace.
"""
pm = ndarray_to_mxarray(self._libmx, value)
self._libeng.engPutVariable(self._ep, name, pm)
self._libmx.mxDestroyArray(pm) | Put a variable to MATLAB workspace. |
def notify(self, resource):
"""
Notifies the observers of a certain resource.
:param resource: the resource
"""
observers = self._observeLayer.notify(resource)
logger.debug("Notify")
for transaction in observers:
with transaction:
transaction.response = None
transaction = self._requestLayer.receive_request(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.response)
self.send_datagram(transaction.response) | Notifies the observers of a certain resource.
:param resource: the resource |
def getFailureMessage(failure):
"""
Return a short message based on L{twisted.python.failure.Failure}.
Tries to find where the exception was triggered.
"""
str(failure.type)
failure.getErrorMessage()
if len(failure.frames) == 0:
return "failure %(exc)s: %(msg)s" % locals()
(func, filename, line, some, other) = failure.frames[-1]
filename = scrubFilename(filename)
return "failure %(exc)s at %(filename)s:%(line)s: %(func)s(): %(msg)s" \
% locals() | Return a short message based on L{twisted.python.failure.Failure}.
Tries to find where the exception was triggered. |
def to_html(data):
"""
Serializes a python object as HTML
This method uses the to_json method to turn the given data object into
formatted JSON that is displayed in an HTML page. If pygments in installed,
syntax highlighting will also be applied to the JSON.
"""
base_html_template = Template('''
<html>
<head>
{% if style %}
<style type="text/css">
{{ style }}
</style>
{% endif %}
</head>
<body>
{% if style %}
{{ body|safe }}
{% else %}
<pre></code>{{ body }}</code></pre>
{% endif %}
</body>
</html>
''')
code = to_json(data, indent=4)
if PYGMENTS_INSTALLED:
c = Context({
'body': highlight(code, JSONLexer(), HtmlFormatter()),
'style': HtmlFormatter().get_style_defs('.highlight')
})
html = base_html_template.render(c)
else:
c = Context({'body': code})
html = base_html_template.render(c)
return html | Serializes a python object as HTML
This method uses the to_json method to turn the given data object into
formatted JSON that is displayed in an HTML page. If pygments in installed,
syntax highlighting will also be applied to the JSON. |
def get_config_value(self, name, defaultValue):
"""
Parameters:
- name
- defaultValue
"""
self.send_get_config_value(name, defaultValue)
return self.recv_get_config_value() | Parameters:
- name
- defaultValue |
def random(self, cascadeFetch=False):
'''
Random - Returns a random record in current filterset.
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Instance of Model object, or None if no items math current filters
'''
matchedKeys = list(self.getPrimaryKeys())
obj = None
# Loop so we don't return None when there are items, if item is deleted between getting key and getting obj
while matchedKeys and not obj:
key = matchedKeys.pop(random.randint(0, len(matchedKeys)-1))
obj = self.get(key, cascadeFetch=cascadeFetch)
return obj | Random - Returns a random record in current filterset.
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Instance of Model object, or None if no items math current filters |
def get_mac_addr(mac_addr):
"""converts bytes to mac addr format
:mac_addr: ctypes.structure
:return: str
mac addr in format
11:22:33:aa:bb:cc
"""
mac_addr = bytearray(mac_addr)
mac = b':'.join([('%02x' % o).encode('ascii') for o in mac_addr])
return mac | converts bytes to mac addr format
:mac_addr: ctypes.structure
:return: str
mac addr in format
11:22:33:aa:bb:cc |
def partof(self, ns1, id1, ns2, id2):
"""Return True if one entity is "partof" another.
Parameters
----------
ns1 : str
Namespace code for an entity.
id1 : str
URI for an entity.
ns2 : str
Namespace code for an entity.
id2 : str
URI for an entity.
Returns
-------
bool
True if t1 has a "partof" relationship with t2, either directly or
through a series of intermediates; False otherwise.
"""
rel_fun = lambda node, graph: self.partof_objects(node)
return self.directly_or_indirectly_related(ns1, id1, ns2, id2,
self.partof_closure,
rel_fun) | Return True if one entity is "partof" another.
Parameters
----------
ns1 : str
Namespace code for an entity.
id1 : str
URI for an entity.
ns2 : str
Namespace code for an entity.
id2 : str
URI for an entity.
Returns
-------
bool
True if t1 has a "partof" relationship with t2, either directly or
through a series of intermediates; False otherwise. |
def bck_from_spt (spt):
"""Calculate a bolometric correction constant for a J band magnitude based on
a spectral type, using the fits of Wilking+ (1999AJ....117..469W), Dahn+
(2002AJ....124.1170D), and Nakajima+ (2004ApJ...607..499N).
spt - Numerical spectral type. M0=0, M9=9, L0=10, ...
Returns: the correction `bck` such that `m_bol = k_abs + bck`, or NaN if
`spt` is out of range.
Valid values of `spt` are between 2 and 30.
"""
# NOTE: the way np.piecewise() is implemented, the last 'true' value in
# the condition list is the one that takes precedence. This motivates the
# construction of our condition list.
#
# XXX: I've restructured the implementation; this needs testing!
spt = np.asfarray (spt) # we crash with integer inputs for some reason.
return np.piecewise (spt,
[spt < 30,
spt < 19,
spt <= 14,
spt < 10,
(spt < 2) | (spt >= 30)],
[lambda s: 3.41 - 0.21 * (s - 20), # Nakajima
lambda s: 3.42 - 0.075 * (s - 14), # Dahn, Nakajima
lambda s: 3.42 + 0.075 * (s - 14), # Dahn, Nakajima
lambda s: 2.43 + 0.0895 * s, # Wilking; only ok for spt >= M2!
np.nan]) | Calculate a bolometric correction constant for a J band magnitude based on
a spectral type, using the fits of Wilking+ (1999AJ....117..469W), Dahn+
(2002AJ....124.1170D), and Nakajima+ (2004ApJ...607..499N).
spt - Numerical spectral type. M0=0, M9=9, L0=10, ...
Returns: the correction `bck` such that `m_bol = k_abs + bck`, or NaN if
`spt` is out of range.
Valid values of `spt` are between 2 and 30. |
def liftover_to_genome(pass_pos, gtf):
"""Liftover from precursor to genome"""
fixed_pos = []
for pos in pass_pos:
if pos["chrom"] not in gtf:
continue
db_pos = gtf[pos["chrom"]][0]
mut = _parse_mut(pos["sv"])
print([db_pos, pos])
if db_pos[3] == "+":
pos['pre_pos'] = db_pos[1] + pos["pre_pos"] + 1
else:
pos['pre_pos'] = db_pos[2] - (pos["pre_pos"] - 1)
pos['chrom'] = db_pos[0]
pos['nt'] = list(mut[0])
fixed_pos.append(pos)
_print_header(fixed_pos)
for pos in fixed_pos:
print_vcf(pos) | Liftover from precursor to genome |
def get_config(self):
"""Returns the config of this layer.
This Layer's `make_distribution_fn` is serialized via a library built on
Python pickle. This serialization of Python functions is provided for
convenience, but:
1. The use of this format for long-term storage of models is discouraged.
In particular, it may not be possible to deserialize in a different
version of Python.
2. While serialization is generally supported for lambdas, local
functions, and static methods (and closures over these constructs),
complex functions may fail to serialize.
3. `Tensor` objects (and functions referencing `Tensor` objects) can only
be serialized when the tensor value is statically known. (Such Tensors
are serialized as numpy arrays.)
Instead of relying on `DistributionLambda.get_config`, consider subclassing
`DistributionLambda` and directly implementing Keras serialization via
`get_config` / `from_config`.
NOTE: At the moment, `DistributionLambda` can only be serialized if the
`convert_to_tensor_fn` is a serializable Keras object (i.e., implements
`get_config`) or one of the standard values:
- `Distribution.sample` (or `"sample"`)
- `Distribution.mean` (or `"mean"`)
- `Distribution.mode` (or `"mode"`)
- `Distribution.stddev` (or `"stddev"`)
- `Distribution.variance` (or `"variance"`)
"""
config = {
'make_distribution_fn': _serialize_function(self._make_distribution_fn),
'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn),
}
base_config = super(DistributionLambda, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | Returns the config of this layer.
This Layer's `make_distribution_fn` is serialized via a library built on
Python pickle. This serialization of Python functions is provided for
convenience, but:
1. The use of this format for long-term storage of models is discouraged.
In particular, it may not be possible to deserialize in a different
version of Python.
2. While serialization is generally supported for lambdas, local
functions, and static methods (and closures over these constructs),
complex functions may fail to serialize.
3. `Tensor` objects (and functions referencing `Tensor` objects) can only
be serialized when the tensor value is statically known. (Such Tensors
are serialized as numpy arrays.)
Instead of relying on `DistributionLambda.get_config`, consider subclassing
`DistributionLambda` and directly implementing Keras serialization via
`get_config` / `from_config`.
NOTE: At the moment, `DistributionLambda` can only be serialized if the
`convert_to_tensor_fn` is a serializable Keras object (i.e., implements
`get_config`) or one of the standard values:
- `Distribution.sample` (or `"sample"`)
- `Distribution.mean` (or `"mean"`)
- `Distribution.mode` (or `"mode"`)
- `Distribution.stddev` (or `"stddev"`)
- `Distribution.variance` (or `"variance"`) |
def maybeparens(lparen, item, rparen):
"""Wrap an item in optional parentheses, only applying them if necessary."""
return item | lparen.suppress() + item + rparen.suppress() | Wrap an item in optional parentheses, only applying them if necessary. |
async def _get_security_token(self) -> None:
"""Request a security token."""
_LOGGER.debug('Requesting security token.')
if self._credentials is None:
return
# Make sure only 1 request can be sent at a time.
async with self._security_token_lock:
# Confirm there is still no security token.
if self._security_token is None:
login_resp = await self._request(
'post',
LOGIN_ENDPOINT,
json=self._credentials,
login_request=True,
)
return_code = int(login_resp.get('ReturnCode', 1))
if return_code != 0:
if return_code == 203:
# Invalid username or password.
_LOGGER.debug('Invalid username or password')
self._credentials = None
raise MyQError(login_resp['ErrorMessage'])
self._security_token = login_resp['SecurityToken'] | Request a security token. |
def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node) | we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms) |
def update_bounds(self, bounds):
'''Update the bounds inplace'''
self.bounds = np.array(bounds, dtype='float32')
vertices, directions = self._gen_bounds(self.bounds)
self._verts_vbo.set_data(vertices)
self._directions_vbo.set_data(directions)
self.widget.update() | Update the bounds inplace |
def goto_step(self, inst: InstanceNode) -> InstanceNode:
"""Return member instance of `inst` addressed by the receiver.
Args:
inst: Current instance.
"""
return inst.look_up(**self.parse_keys(inst.schema_node)) | Return member instance of `inst` addressed by the receiver.
Args:
inst: Current instance. |
def is_valid(self, qstr=None):
"""Return True if string is valid"""
if qstr is None:
qstr = self.currentText()
return is_module_or_package(to_text_string(qstr)) | Return True if string is valid |
def compose(request, recipient=None, form_class=ComposeForm,
template_name='django_messages/compose.html', success_url=None, recipient_filter=None):
"""
Displays and handles the ``form_class`` form to compose new messages.
Required Arguments: None
Optional Arguments:
``recipient``: username of a `django.contrib.auth` User, who should
receive the message, optionally multiple usernames
could be separated by a '+'
``form_class``: the form-class to use
``template_name``: the template to use
``success_url``: where to redirect after successfull submission
"""
if request.method == "POST":
sender = request.user
form = form_class(request.POST, recipient_filter=recipient_filter)
if form.is_valid():
form.save(sender=request.user)
messages.info(request, _(u"Message successfully sent."))
if success_url is None:
success_url = reverse('messages_inbox')
if 'next' in request.GET:
success_url = request.GET['next']
return HttpResponseRedirect(success_url)
else:
form = form_class()
if recipient is not None:
recipients = [u for u in User.objects.filter(**{'%s__in' % get_username_field(): [r.strip() for r in recipient.split('+')]})]
form.fields['recipient'].initial = recipients
return render(request, template_name, {
'form': form,
}) | Displays and handles the ``form_class`` form to compose new messages.
Required Arguments: None
Optional Arguments:
``recipient``: username of a `django.contrib.auth` User, who should
receive the message, optionally multiple usernames
could be separated by a '+'
``form_class``: the form-class to use
``template_name``: the template to use
``success_url``: where to redirect after successfull submission |
def inference(self, kern_r, kern_c, Xr, Xc, Zr, Zc, likelihood, Y, qU_mean ,qU_var_r, qU_var_c, indexD, output_dim):
"""
The SVI-VarDTC inference
"""
N, D, Mr, Mc, Qr, Qc = Y.shape[0], output_dim,Zr.shape[0], Zc.shape[0], Zr.shape[1], Zc.shape[1]
uncertain_inputs_r = isinstance(Xr, VariationalPosterior)
uncertain_inputs_c = isinstance(Xc, VariationalPosterior)
uncertain_outputs = isinstance(Y, VariationalPosterior)
grad_dict = self._init_grad_dict(N,D,Mr,Mc)
beta = 1./likelihood.variance
if len(beta)==1:
beta = np.zeros(D)+beta
psi0_r, psi1_r, psi2_r = self.gatherPsiStat(kern_r, Xr, Zr, uncertain_inputs_r)
psi0_c, psi1_c, psi2_c = self.gatherPsiStat(kern_c, Xc, Zc, uncertain_inputs_c)
#======================================================================
# Compute Common Components
#======================================================================
Kuu_r = kern_r.K(Zr).copy()
diag.add(Kuu_r, self.const_jitter)
Lr = jitchol(Kuu_r)
Kuu_c = kern_c.K(Zc).copy()
diag.add(Kuu_c, self.const_jitter)
Lc = jitchol(Kuu_c)
mu, Sr, Sc = qU_mean, qU_var_r, qU_var_c
LSr = jitchol(Sr)
LSc = jitchol(Sc)
LcInvMLrInvT = dtrtrs(Lc,dtrtrs(Lr,mu.T)[0].T)[0]
LcInvLSc = dtrtrs(Lc, LSc)[0]
LrInvLSr = dtrtrs(Lr, LSr)[0]
LcInvScLcInvT = tdot(LcInvLSc)
LrInvSrLrInvT = tdot(LrInvLSr)
tr_LrInvSrLrInvT = np.square(LrInvLSr).sum()
tr_LcInvScLcInvT = np.square(LcInvLSc).sum()
mid_res = {
'psi0_r': psi0_r,
'psi1_r': psi1_r,
'psi2_r': psi2_r,
'psi0_c': psi0_c,
'psi1_c': psi1_c,
'psi2_c': psi2_c,
'Lr':Lr,
'Lc':Lc,
'LcInvMLrInvT': LcInvMLrInvT,
'LcInvScLcInvT': LcInvScLcInvT,
'LrInvSrLrInvT': LrInvSrLrInvT,
}
#======================================================================
# Compute log-likelihood
#======================================================================
logL = 0.
for d in range(D):
logL += self.inference_d(d, beta, Y, indexD, grad_dict, mid_res, uncertain_inputs_r, uncertain_inputs_c, Mr, Mc)
logL += -Mc * (np.log(np.diag(Lr)).sum()-np.log(np.diag(LSr)).sum()) -Mr * (np.log(np.diag(Lc)).sum()-np.log(np.diag(LSc)).sum()) \
- np.square(LcInvMLrInvT).sum()/2. - tr_LrInvSrLrInvT * tr_LcInvScLcInvT/2. + Mr*Mc/2.
#======================================================================
# Compute dL_dKuu
#======================================================================
tmp = tdot(LcInvMLrInvT)/2. + tr_LrInvSrLrInvT/2. * LcInvScLcInvT - Mr/2.*np.eye(Mc)
dL_dKuu_c = backsub_both_sides(Lc, tmp, 'left')
dL_dKuu_c += dL_dKuu_c.T
dL_dKuu_c *= 0.5
tmp = tdot(LcInvMLrInvT.T)/2. + tr_LcInvScLcInvT/2. * LrInvSrLrInvT - Mc/2.*np.eye(Mr)
dL_dKuu_r = backsub_both_sides(Lr, tmp, 'left')
dL_dKuu_r += dL_dKuu_r.T
dL_dKuu_r *= 0.5
#======================================================================
# Compute dL_dqU
#======================================================================
tmp = - LcInvMLrInvT
dL_dqU_mean = dtrtrs(Lc, dtrtrs(Lr, tmp.T, trans=1)[0].T, trans=1)[0]
LScInv = dtrtri(LSc)
tmp = -tr_LrInvSrLrInvT/2.*np.eye(Mc)
dL_dqU_var_c = backsub_both_sides(Lc, tmp, 'left') + tdot(LScInv.T) * Mr/2.
LSrInv = dtrtri(LSr)
tmp = -tr_LcInvScLcInvT/2.*np.eye(Mr)
dL_dqU_var_r = backsub_both_sides(Lr, tmp, 'left') + tdot(LSrInv.T) * Mc/2.
#======================================================================
# Compute the Posterior distribution of inducing points p(u|Y)
#======================================================================
post = PosteriorMultioutput(LcInvMLrInvT=LcInvMLrInvT, LcInvScLcInvT=LcInvScLcInvT,
LrInvSrLrInvT=LrInvSrLrInvT, Lr=Lr, Lc=Lc, kern_r=kern_r, Xr=Xr, Zr=Zr)
#======================================================================
# Compute dL_dpsi
#======================================================================
grad_dict['dL_dqU_mean'] += dL_dqU_mean
grad_dict['dL_dqU_var_c'] += dL_dqU_var_c
grad_dict['dL_dqU_var_r'] += dL_dqU_var_r
grad_dict['dL_dKuu_c'] += dL_dKuu_c
grad_dict['dL_dKuu_r'] += dL_dKuu_r
if not uncertain_inputs_c:
grad_dict['dL_dKdiag_c'] = grad_dict['dL_dpsi0_c']
grad_dict['dL_dKfu_c'] = grad_dict['dL_dpsi1_c']
if not uncertain_inputs_r:
grad_dict['dL_dKdiag_r'] = grad_dict['dL_dpsi0_r']
grad_dict['dL_dKfu_r'] = grad_dict['dL_dpsi1_r']
return post, logL, grad_dict | The SVI-VarDTC inference |
def json_template(data, template_name, template_context):
"""Old style, use JSONTemplateResponse instead of this.
"""
html = render_to_string(template_name, template_context)
data = data or {}
data['html'] = html
return HttpResponse(json_encode(data), content_type='application/json') | Old style, use JSONTemplateResponse instead of this. |
def _blast(bvname2vals, name_map):
"""Helper function to expand (blast) str -> int map into str ->
bool map. This is used to send word level inputs to aiger."""
if len(name_map) == 0:
return dict()
return fn.merge(*(dict(zip(names, bvname2vals[bvname]))
for bvname, names in name_map)) | Helper function to expand (blast) str -> int map into str ->
bool map. This is used to send word level inputs to aiger. |
def search_accounts(self, **kwargs):
"""
Return a list of up to 5 matching account domains. Partial matches on
name and domain are supported.
:calls: `GET /api/v1/accounts/search \
<https://canvas.instructure.com/doc/api/account_domain_lookups.html#method.account_domain_lookups.search>`_
:rtype: dict
"""
response = self.__requester.request(
'GET',
'accounts/search',
_kwargs=combine_kwargs(**kwargs)
)
return response.json() | Return a list of up to 5 matching account domains. Partial matches on
name and domain are supported.
:calls: `GET /api/v1/accounts/search \
<https://canvas.instructure.com/doc/api/account_domain_lookups.html#method.account_domain_lookups.search>`_
:rtype: dict |
def from_definition(self, table: Table, version: int):
"""Add all columns from the table added in the specified version"""
self.table(table)
self.add_columns(*table.columns.get_with_version(version))
return self | Add all columns from the table added in the specified version |
def _estimate_gas(self, initializer: bytes, salt_nonce: int,
payment_token: str, payment_receiver: str) -> int:
"""
Gas estimation done using web3 and calling the node
Payment cannot be estimated, as no ether is in the address. So we add some gas later.
:param initializer: Data initializer to send to GnosisSafe setup method
:param salt_nonce: Nonce that will be used to generate the salt to calculate
the address of the new proxy contract.
:return: Total gas estimation
"""
# Estimate the contract deployment. We cannot estimate the refunding, as the safe address has not any fund
gas: int = self.proxy_factory_contract.functions.createProxyWithNonce(self.master_copy_address,
initializer, salt_nonce).estimateGas()
# It's not very relevant if is 1 or 9999
payment: int = 1
# We estimate the refund as a new tx
if payment_token == NULL_ADDRESS:
# Same cost to send 1 ether than 1000
gas += self.w3.eth.estimateGas({'to': payment_receiver, 'value': payment})
else:
# Top should be around 52000 when storage is needed (funder no previous owner of token),
# we use value 1 as we are simulating an internal call, and in that calls you don't pay for the data.
# If it was a new tx sending 5000 tokens would be more expensive than sending 1 because of data costs
gas += 55000
# try:
# gas += get_erc20_contract(self.w3,
# payment_token).functions.transfer(payment_receiver,
# payment).estimateGas({'from':
# payment_token})
# except ValueError as exc:
# raise InvalidERC20Token from exc
return gas | Gas estimation done using web3 and calling the node
Payment cannot be estimated, as no ether is in the address. So we add some gas later.
:param initializer: Data initializer to send to GnosisSafe setup method
:param salt_nonce: Nonce that will be used to generate the salt to calculate
the address of the new proxy contract.
:return: Total gas estimation |
def items_iter(self, limit):
'''Get an iterator of the 'items' in each page. Instead of a feature
collection from each page, the iterator yields the features.
:param int limit: The number of 'items' to limit to.
:return: iter of items in page
'''
pages = (page.get() for page in self._pages())
items = itertools.chain.from_iterable(
(p[self.ITEM_KEY] for p in pages)
)
if limit is not None:
items = itertools.islice(items, limit)
return items | Get an iterator of the 'items' in each page. Instead of a feature
collection from each page, the iterator yields the features.
:param int limit: The number of 'items' to limit to.
:return: iter of items in page |
def preprocess_data(self, div=1, downsample=0, sum_norm=None,
include_genes=None, exclude_genes=None,
include_cells=None, exclude_cells=None,
norm='log', min_expression=1, thresh=0.01,
filter_genes=True):
"""Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered.
"""
# load data
try:
D= self.adata_raw.X
self.adata = self.adata_raw.copy()
except AttributeError:
print('No data loaded')
# filter cells
cell_names = np.array(list(self.adata_raw.obs_names))
idx_cells = np.arange(D.shape[0])
if(include_cells is not None):
include_cells = np.array(list(include_cells))
idx2 = np.where(np.in1d(cell_names, include_cells))[0]
idx_cells = np.array(list(set(idx2) & set(idx_cells)))
if(exclude_cells is not None):
exclude_cells = np.array(list(exclude_cells))
idx4 = np.where(np.in1d(cell_names, exclude_cells,
invert=True))[0]
idx_cells = np.array(list(set(idx4) & set(idx_cells)))
if downsample > 0:
numcells = int(D.shape[0] / downsample)
rand_ind = np.random.choice(np.arange(D.shape[0]),
size=numcells, replace=False)
idx_cells = np.array(list(set(rand_ind) & set(idx_cells)))
else:
numcells = D.shape[0]
mask_cells = np.zeros(D.shape[0], dtype='bool')
mask_cells[idx_cells] = True
self.adata = self.adata_raw[mask_cells,:].copy()
D = self.adata.X
if isinstance(D,np.ndarray):
D=sp.csr_matrix(D,dtype='float32')
else:
D=D.astype('float32')
D.sort_indices()
if(D.getformat() == 'csc'):
D=D.tocsr();
# sum-normalize
if (sum_norm == 'cell_median' and norm != 'multinomial'):
s = D.sum(1).A.flatten()
sum_norm = np.median(s)
D = D.multiply(1 / s[:,None] * sum_norm).tocsr()
elif (sum_norm == 'gene_median' and norm != 'multinomial'):
s = D.sum(0).A.flatten()
sum_norm = np.median(s)
s[s==0]=1
D = D.multiply(1 / s[None,:] * sum_norm).tocsr()
elif sum_norm is not None and norm != 'multinomial':
D = D.multiply(1 / D.sum(1).A.flatten()[:,
None] * sum_norm).tocsr()
# normalize
self.adata.X = D
if norm is None:
D.data[:] = (D.data / div)
elif(norm.lower() == 'log'):
D.data[:] = np.log2(D.data / div + 1)
elif(norm.lower() == 'ftt'):
D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1)
elif norm.lower() == 'multinomial':
ni = D.sum(1).A.flatten() #cells
pj = (D.sum(0) / D.sum()).A.flatten() #genes
col = D.indices
row=[]
for i in range(D.shape[0]):
row.append(i*np.ones(D.indptr[i+1]-D.indptr[i]))
row = np.concatenate(row).astype('int32')
mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr()
mu2 = mu.copy()
mu2.data[:]=mu2.data**2
mu2 = mu2.multiply(1/ni[:,None])
mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data)
self.adata.X = mu
if sum_norm is None:
sum_norm = np.median(ni)
D = D.multiply(1 / ni[:,None] * sum_norm).tocsr()
D.data[:] = np.log2(D.data / div + 1)
else:
D.data[:] = (D.data / div)
# zero-out low-expressed genes
idx = np.where(D.data <= min_expression)[0]
D.data[idx] = 0
# filter genes
gene_names = np.array(list(self.adata.var_names))
idx_genes = np.arange(D.shape[1])
if(include_genes is not None):
include_genes = np.array(list(include_genes))
idx = np.where(np.in1d(gene_names, include_genes))[0]
idx_genes = np.array(list(set(idx) & set(idx_genes)))
if(exclude_genes is not None):
exclude_genes = np.array(list(exclude_genes))
idx3 = np.where(np.in1d(gene_names, exclude_genes,
invert=True))[0]
idx_genes = np.array(list(set(idx3) & set(idx_genes)))
if(filter_genes):
a, ct = np.unique(D.indices, return_counts=True)
c = np.zeros(D.shape[1])
c[a] = ct
keep = np.where(np.logical_and(c / D.shape[0] > thresh,
c / D.shape[0] <= 1 - thresh))[0]
idx_genes = np.array(list(set(keep) & set(idx_genes)))
mask_genes = np.zeros(D.shape[1], dtype='bool')
mask_genes[idx_genes] = True
self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr()
self.adata.X.eliminate_zeros()
self.adata.var['mask_genes']=mask_genes
if norm == 'multinomial':
self.adata.layers['X_disp'] = D.multiply(mask_genes[None, :]).tocsr()
self.adata.layers['X_disp'].eliminate_zeros()
else:
self.adata.layers['X_disp'] = self.adata.X | Log-normalizes and filters the expression data.
Parameters
----------
div : float, optional, default 1
The factor by which the gene expression will be divided prior to
log normalization.
downsample : float, optional, default 0
The factor by which to randomly downsample the data. If 0, the
data will not be downsampled.
sum_norm : str or float, optional, default None
If a float, the total number of transcripts in each cell will be
normalized to this value prior to normalization and filtering.
Otherwise, nothing happens. If 'cell_median', each cell is
normalized to have the median total read count per cell. If
'gene_median', each gene is normalized to have the median total
read count per gene.
norm : str, optional, default 'log'
If 'log', log-normalizes the expression data. If 'ftt', applies the
Freeman-Tukey variance-stabilization transformation. If
'multinomial', applies the Pearson-residual transformation (this is
experimental and should only be used for raw, un-normalized UMI
datasets). If None, the data is not normalized.
include_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to keep.
All other genes will be filtered out. Gene names are case-
sensitive.
exclude_genes : array-like of string, optional, default None
A vector of gene names or indices that specifies the genes to
exclude. These genes will be filtered out. Gene names are case-
sensitive.
include_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to keep.
All other cells will be filtered out. Cell names are
case-sensitive.
exclude_cells : array-like of string, optional, default None
A vector of cell names that specifies the cells to exclude.
Thses cells will be filtered out. Cell names are
case-sensitive.
min_expression : float, optional, default 1
The threshold above which a gene is considered
expressed. Gene expression values less than 'min_expression' are
set to zero.
thresh : float, optional, default 0.2
Keep genes expressed in greater than 'thresh'*100 % of cells and
less than (1-'thresh')*100 % of cells, where a gene is considered
expressed if its expression value exceeds 'min_expression'.
filter_genes : bool, optional, default True
Setting this to False turns off filtering operations aside from
removing genes with zero expression across all cells. Genes passed
in exclude_genes or not passed in include_genes will still be
filtered. |
def imresize(img, size, interpolate="bilinear", channel_first=False):
"""
Resize image by pil module.
Args:
img (numpy.ndarray): Image array to save.
Image shape is considered as (height, width, channel) for RGB or (height, width) for gray-scale by default.
size (tupple of int): (width, height).
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value isyou can get the array whose shape is False, which means the img shape is (height, width, channels)
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]
Returns:
numpy.ndarray whose shape is ('size'[1], 'size'[0], channel) or (size[1], size[0])
"""
img = _imresize_before(img, size, channel_first,
interpolate, list(interpolations_map.keys()))
expand_flag = False
if len(img.shape) == 3 and img.shape[-1] == 1:
# (h, w, 1) can not be handled by pil.Image, temporally reshape to (h, w)
img = img.reshape(img.shape[0], img.shape[1])
expand_flag = True
resample = interpolations_map[interpolate]
if img.dtype == np.uint8:
resized = pil_resize_from_ndarray(img, size, resample)
else:
dtype = img.dtype
img_float32 = np.asarray(img, np.float32)
if len(img.shape) == 3:
resized = np.stack([pil_resize_from_ndarray(img_float32[..., i], size, resample)
for i in range(img.shape[-1])], axis=2)
else:
resized = pil_resize_from_ndarray(img_float32, size, resample)
resized = np.asarray(resized, dtype)
if expand_flag:
resized = resized[..., np.newaxis]
return _imresize_after(resized, channel_first) | Resize image by pil module.
Args:
img (numpy.ndarray): Image array to save.
Image shape is considered as (height, width, channel) for RGB or (height, width) for gray-scale by default.
size (tupple of int): (width, height).
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value isyou can get the array whose shape is False, which means the img shape is (height, width, channels)
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]
Returns:
numpy.ndarray whose shape is ('size'[1], 'size'[0], channel) or (size[1], size[0]) |
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map | A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. |
def createphysicalnetwork(type, create_processor = partial(default_processor, excluding=('id', 'type')),
reorder_dict = default_iterate_dict):
"""
:param type: physical network type
:param create_processor: create_processor(physicalnetwork, walk, write, \*, parameters)
"""
# create an new physical network
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
id_ = parameters['id']
new_network = create_new(PhysicalNetwork, value, id_)
new_network.type = type
create_processor(new_network, walk, write, parameters=parameters)
write(key, new_network)
new_networkmap = PhysicalNetworkMap.create_instance(id_)
new_networkmap.network = new_network.create_weakreference()
write(new_networkmap.getkey(), new_networkmap)
# Save into network set
try:
physet = walk(PhysicalNetworkSet.default_key())
except KeyError:
pass
else:
physet.set.dataset().add(new_network.create_weakreference())
write(physet.getkey(), physet)
return walker | :param type: physical network type
:param create_processor: create_processor(physicalnetwork, walk, write, \*, parameters) |
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
e_density = np.sum(self.calculate_stress(strain)*strain) / self.order
if convert_GPa_to_eV:
e_density *= self.GPa_to_eV_A3 # Conversion factor for GPa to eV/A^3
return e_density | Calculates the elastic energy density due to a strain |
def _folder_item_instrument(self, analysis_brain, item):
"""Fills the analysis' instrument to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item['Instrument'] = ''
if not analysis_brain.getInstrumentEntryOfResults:
# Manual entry of results, instrument is not allowed
item['Instrument'] = _('Manual')
item['replace']['Instrument'] = \
'<a href="#">{}</a>'.format(t(_('Manual')))
return
# Instrument can be assigned to this analysis
is_editable = self.is_analysis_edition_allowed(analysis_brain)
self.show_methodinstr_columns = True
instrument = self.get_instrument(analysis_brain)
if is_editable:
# Edition allowed
voc = self.get_instruments_vocabulary(analysis_brain)
if voc:
# The service has at least one instrument available
item['Instrument'] = instrument.UID() if instrument else ''
item['choices']['Instrument'] = voc
item['allow_edit'].append('Instrument')
return
if instrument:
# Edition not allowed
instrument_title = instrument and instrument.Title() or ''
instrument_link = get_link(instrument.absolute_url(),
instrument_title)
item['Instrument'] = instrument_title
item['replace']['Instrument'] = instrument_link
return | Fills the analysis' instrument to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row |
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text,
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
include_blank = (self.blank
or not (self.has_default()
or 'initial' in kwargs))
choices = [BLANK_CHOICE_DASH, ] if include_blank else []
choices.extend([
(
x.name,
getattr(x, 'verbose_name', x.name) or x.name,
getattr(x, 'help_text', None) or None
)
for x in self.choices_class.constants()
])
defaults['choices'] = choices
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
form_class = choices_form_class or ChoicesFormField
return form_class(**defaults) | Returns a django.forms.Field instance for this database Field. |
def extendedboldqc(auth, label, scan_ids=None, project=None, aid=None):
'''
Get ExtendedBOLDQC data as a sequence of dictionaries.
Example:
>>> import yaxil
>>> import json
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> for eqc in yaxil.extendedboldqc2(auth, 'AB1234C')
... print(json.dumps(eqc, indent=2))
:param auth: XNAT authentication object
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT MR Session label
:type label: str
:param scan_ids: Scan numbers to return
:type scan_ids: list
:param project: XNAT MR Session project
:type project: str
:param aid: XNAT Accession ID
:type aid: str
:returns: Generator of scan data dictionaries
:rtype: :mod:`dict`
'''
if not aid:
aid = accession(auth, label, project)
path = '/data/experiments'
params = {
'xsiType': 'neuroinfo:extendedboldqc',
'columns': ','.join(extendedboldqc.columns.keys())
}
if project:
params['project'] = project
params['xnat:mrSessionData/ID'] = aid
_,result = _get(auth, path, 'json', autobox=True, params=params)
for result in result['ResultSet']['Result']:
if scan_ids == None or result['neuroinfo:extendedboldqc/scan/scan_id'] in scan_ids:
data = dict()
for k,v in iter(extendedboldqc.columns.items()):
data[v] = result[k]
yield data | Get ExtendedBOLDQC data as a sequence of dictionaries.
Example:
>>> import yaxil
>>> import json
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> for eqc in yaxil.extendedboldqc2(auth, 'AB1234C')
... print(json.dumps(eqc, indent=2))
:param auth: XNAT authentication object
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT MR Session label
:type label: str
:param scan_ids: Scan numbers to return
:type scan_ids: list
:param project: XNAT MR Session project
:type project: str
:param aid: XNAT Accession ID
:type aid: str
:returns: Generator of scan data dictionaries
:rtype: :mod:`dict` |
def autosave_all(self):
"""Autosave all opened files."""
for index in range(self.stack.get_stack_count()):
self.autosave(index) | Autosave all opened files. |
def comment_form(context, object):
"""
Usage:
{% comment_form obj as comment_form %}
Will read the `user` var out of the contex to know if the form should be
form an auth'd user or not.
"""
user = context.get("user")
form_class = context.get("form", CommentForm)
form = form_class(obj=object, user=user)
return form | Usage:
{% comment_form obj as comment_form %}
Will read the `user` var out of the contex to know if the form should be
form an auth'd user or not. |
def render_image(**kwargs):
"""
Unstrict template block for rendering an image:
<img alt="{alt_text}" title="{title}" src="{url}">
"""
html = ''
url = kwargs.get('url', None)
if url:
html = '<img'
alt_text = kwargs.get('alt_text', None)
if alt_text:
html += ' alt="{}"'.format(alt_text)
title = kwargs.get('title', None)
if title:
html += ' title="{}"'.format(title)
html += ' src="{}">'.format(url)
return html | Unstrict template block for rendering an image:
<img alt="{alt_text}" title="{title}" src="{url}"> |
def path_order (x, y):
""" Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name.
"""
if x == y:
return 0
xg = get_grist (x)
yg = get_grist (y)
if yg and not xg:
return -1
elif xg and not yg:
return 1
else:
if not xg:
x = feature.expand_subfeatures([x])
y = feature.expand_subfeatures([y])
if x < y:
return -1
elif x > y:
return 1
else:
return 0 | Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name. |
def _make_cloud_datastore_context(app_id, external_app_ids=()):
"""Creates a new context to connect to a remote Cloud Datastore instance.
This should only be used outside of Google App Engine.
Args:
app_id: The application id to connect to. This differs from the project
id as it may have an additional prefix, e.g. "s~" or "e~".
external_app_ids: A list of apps that may be referenced by data in your
application. For example, if you are connected to s~my-app and store keys
for s~my-other-app, you should include s~my-other-app in the external_apps
list.
Returns:
An ndb.Context that can connect to a Remote Cloud Datastore. You can use
this context by passing it to ndb.set_context.
"""
from . import model # Late import to deal with circular imports.
# Late import since it might not exist.
if not datastore_pbs._CLOUD_DATASTORE_ENABLED:
raise datastore_errors.BadArgumentError(
datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)
import googledatastore
try:
from google.appengine.datastore import cloud_datastore_v1_remote_stub
except ImportError:
from google3.apphosting.datastore import cloud_datastore_v1_remote_stub
current_app_id = os.environ.get('APPLICATION_ID', None)
if current_app_id and current_app_id != app_id:
# TODO(pcostello): We should support this so users can connect to different
# applications.
raise ValueError('Cannot create a Cloud Datastore context that connects '
'to an application (%s) that differs from the application '
'already connected to (%s).' % (app_id, current_app_id))
os.environ['APPLICATION_ID'] = app_id
id_resolver = datastore_pbs.IdResolver((app_id,) + tuple(external_app_ids))
project_id = id_resolver.resolve_project_id(app_id)
endpoint = googledatastore.helper.get_project_endpoint_from_env(project_id)
datastore = googledatastore.Datastore(
project_endpoint=endpoint,
credentials=googledatastore.helper.get_credentials_from_env())
conn = model.make_connection(_api_version=datastore_rpc._CLOUD_DATASTORE_V1,
_id_resolver=id_resolver)
# If necessary, install the stubs
try:
stub = cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(datastore)
apiproxy_stub_map.apiproxy.RegisterStub(datastore_rpc._CLOUD_DATASTORE_V1,
stub)
except:
pass # The stub is already installed.
# TODO(pcostello): Ensure the current stub is connected to the right project.
# Install a memcache and taskqueue stub which throws on everything.
try:
apiproxy_stub_map.apiproxy.RegisterStub('memcache', _ThrowingStub())
except:
pass # The stub is already installed.
try:
apiproxy_stub_map.apiproxy.RegisterStub('taskqueue', _ThrowingStub())
except:
pass # The stub is already installed.
return make_context(conn=conn) | Creates a new context to connect to a remote Cloud Datastore instance.
This should only be used outside of Google App Engine.
Args:
app_id: The application id to connect to. This differs from the project
id as it may have an additional prefix, e.g. "s~" or "e~".
external_app_ids: A list of apps that may be referenced by data in your
application. For example, if you are connected to s~my-app and store keys
for s~my-other-app, you should include s~my-other-app in the external_apps
list.
Returns:
An ndb.Context that can connect to a Remote Cloud Datastore. You can use
this context by passing it to ndb.set_context. |
def cyber_observable_check(original_function):
"""Decorator for functions that require cyber observable data.
"""
def new_function(*args, **kwargs):
if not has_cyber_observable_data(args[0]):
return
func = original_function(*args, **kwargs)
if isinstance(func, Iterable):
for x in original_function(*args, **kwargs):
yield x
new_function.__name__ = original_function.__name__
return new_function | Decorator for functions that require cyber observable data. |
def get_warmer(self, doc_types=None, indices=None, name=None, querystring_args=None):
"""
Retrieve warmer
:param doc_types: list of document types
:param warmer: anything with ``serialize`` method or a dictionary
:param name: warmer name. If not provided, all warmers will be returned
:param querystring_args: additional arguments passed as GET params to ES
"""
name = name or ''
if not querystring_args:
querystring_args = {}
doc_types_str = ''
if doc_types:
doc_types_str = '/' + ','.join(doc_types)
path = '/{0}{1}/_warmer/{2}'.format(','.join(indices), doc_types_str, name)
return self._send_request(method='GET', path=path, params=querystring_args) | Retrieve warmer
:param doc_types: list of document types
:param warmer: anything with ``serialize`` method or a dictionary
:param name: warmer name. If not provided, all warmers will be returned
:param querystring_args: additional arguments passed as GET params to ES |
def _next_pattern(self):
"""Parses the next pattern by matching each in turn."""
current_state = self.state_stack[-1]
position = self._position
for pattern in self.patterns:
if current_state not in pattern.states:
continue
m = pattern.regex.match(self.source, position)
if not m:
continue
position = m.end()
token = None
if pattern.next_state:
self.state_stack.append(pattern.next_state)
if pattern.action:
callback = getattr(self, pattern.action, None)
if callback is None:
raise RuntimeError(
"No method defined for pattern action %s!" %
pattern.action)
if "token" in m.groups():
value = m.group("token")
else:
value = m.group(0)
token = callback(string=value, match=m,
pattern=pattern)
self._position = position
return token
self._error("Don't know how to match next. Did you forget quotes?",
start=self._position, end=self._position + 1) | Parses the next pattern by matching each in turn. |
def _start_loop(self, websocket, event_handler):
"""
We will listen for websockets events, sending a heartbeat/pong everytime
we react a TimeoutError. If we don't the webserver would close the idle connection,
forcing us to reconnect.
"""
log.debug('Starting websocket loop')
while True:
try:
yield from asyncio.wait_for(
self._wait_for_message(websocket, event_handler),
timeout=self.options['timeout']
)
except asyncio.TimeoutError:
yield from websocket.pong()
log.debug("Sending heartbeat...")
continue | We will listen for websockets events, sending a heartbeat/pong everytime
we react a TimeoutError. If we don't the webserver would close the idle connection,
forcing us to reconnect. |
def to_file(self, filename, format='shtools', header=None, errors=False,
**kwargs):
"""
Save spherical harmonic coefficients to a file.
Usage
-----
x.to_file(filename, [format='shtools', header, errors])
x.to_file(filename, [format='npy', **kwargs])
Parameters
----------
filename : str
Name of the output file.
format : str, optional, default = 'shtools'
'shtools' or 'npy'. See method from_file() for more information.
header : str, optional, default = None
A header string written to an 'shtools'-formatted file directly
before the spherical harmonic coefficients.
errors : bool, optional, default = False
If True, save the errors in the file (for 'shtools' formatted
files only).
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.save().
Description
-----------
If format='shtools', the coefficients and meta-data will be written to
an ascii formatted file. The first line is an optional user provided
header line, and the following line provides the attributes r0, gm,
omega, and lmax. The spherical harmonic coefficients are then listed,
with increasing degree and order, with the format
l, m, coeffs[0, l, m], coeffs[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively. If the errors are to be saved, the format of each line
will be
l, m, coeffs[0, l, m], coeffs[1, l, m], error[0, l, m], error[1, l, m]
If format='npy', the spherical harmonic coefficients (but not the
meta-data nor errors) will be saved to a binary numpy 'npy' file using
numpy.save().
"""
if format is 'shtools':
if errors is True and self.errors is None:
raise ValueError('Can not save errors when then have not been '
'initialized.')
if self.omega is None:
omega = 0.
else:
omega = self.omega
with open(filename, mode='w') as file:
if header is not None:
file.write(header + '\n')
file.write('{:.16e}, {:.16e}, {:.16e}, {:d}\n'.format(
self.r0, self.gm, omega, self.lmax))
for l in range(self.lmax+1):
for m in range(l+1):
if errors is True:
file.write('{:d}, {:d}, {:.16e}, {:.16e}, '
'{:.16e}, {:.16e}\n'
.format(l, m, self.coeffs[0, l, m],
self.coeffs[1, l, m],
self.errors[0, l, m],
self.errors[1, l, m]))
else:
file.write('{:d}, {:d}, {:.16e}, {:.16e}\n'
.format(l, m, self.coeffs[0, l, m],
self.coeffs[1, l, m]))
elif format is 'npy':
_np.save(filename, self.coeffs, **kwargs)
else:
raise NotImplementedError(
'format={:s} not implemented'.format(repr(format))) | Save spherical harmonic coefficients to a file.
Usage
-----
x.to_file(filename, [format='shtools', header, errors])
x.to_file(filename, [format='npy', **kwargs])
Parameters
----------
filename : str
Name of the output file.
format : str, optional, default = 'shtools'
'shtools' or 'npy'. See method from_file() for more information.
header : str, optional, default = None
A header string written to an 'shtools'-formatted file directly
before the spherical harmonic coefficients.
errors : bool, optional, default = False
If True, save the errors in the file (for 'shtools' formatted
files only).
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.save().
Description
-----------
If format='shtools', the coefficients and meta-data will be written to
an ascii formatted file. The first line is an optional user provided
header line, and the following line provides the attributes r0, gm,
omega, and lmax. The spherical harmonic coefficients are then listed,
with increasing degree and order, with the format
l, m, coeffs[0, l, m], coeffs[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively. If the errors are to be saved, the format of each line
will be
l, m, coeffs[0, l, m], coeffs[1, l, m], error[0, l, m], error[1, l, m]
If format='npy', the spherical harmonic coefficients (but not the
meta-data nor errors) will be saved to a binary numpy 'npy' file using
numpy.save(). |
def clear(self):
"""Clears the Merkle Tree by releasing the Merkle root and each leaf's references, the rest
should be garbage collected. This may be useful for situations where you want to take an existing
tree, make changes to the leaves, but leave it uncalculated for some time, without node
references that are no longer correct still hanging around. Usually it is better just to make
a new tree.
"""
self.root = None
for leaf in self.leaves:
leaf.p, leaf.sib, leaf.side = (None, ) * 3 | Clears the Merkle Tree by releasing the Merkle root and each leaf's references, the rest
should be garbage collected. This may be useful for situations where you want to take an existing
tree, make changes to the leaves, but leave it uncalculated for some time, without node
references that are no longer correct still hanging around. Usually it is better just to make
a new tree. |
def uniqueify_all(init_reqs, *other_reqs):
"""Find the union of all the given requirements."""
union = set(init_reqs)
for reqs in other_reqs:
union.update(reqs)
return list(union) | Find the union of all the given requirements. |
def msgDict(d,matching=None,sep1="=",sep2="\n",sort=True,cantEndWith=None):
"""convert a dictionary to a pretty formatted string."""
msg=""
if "record" in str(type(d)):
keys=d.dtype.names
else:
keys=d.keys()
if sort:
keys=sorted(keys)
for key in keys:
if key[0]=="_":
continue
if matching:
if not key in matching:
continue
if cantEndWith and key[-len(cantEndWith)]==cantEndWith:
continue
if 'float' in str(type(d[key])):
s="%.02f"%d[key]
else:
s=str(d[key])
if "object" in s:
s='<object>'
msg+=key+sep1+s+sep2
return msg.strip() | convert a dictionary to a pretty formatted string. |
def create_scoped_session(self, options=None):
"""Create a :class:`~sqlalchemy.orm.scoping.scoped_session`
on the factory from :meth:`create_session`.
An extra key ``'scopefunc'`` can be set on the ``options`` dict to
specify a custom scope function. If it's not provided, Flask's app
context stack identity is used. This will ensure that sessions are
created and removed with the request/response cycle, and should be fine
in most cases.
:param options: dict of keyword arguments passed to session class in
``create_session``
"""
if options is None:
options = {}
scopefunc = options.pop('scopefunc', _app_ctx_stack.__ident_func__)
options.setdefault('query_cls', self.Query)
return orm.scoped_session(
self.create_session(options), scopefunc=scopefunc
) | Create a :class:`~sqlalchemy.orm.scoping.scoped_session`
on the factory from :meth:`create_session`.
An extra key ``'scopefunc'`` can be set on the ``options`` dict to
specify a custom scope function. If it's not provided, Flask's app
context stack identity is used. This will ensure that sessions are
created and removed with the request/response cycle, and should be fine
in most cases.
:param options: dict of keyword arguments passed to session class in
``create_session`` |
def select(self, key, where=None, start=None, stop=None, columns=None,
iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named {key} in the file'.format(key=key))
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop,
where=_where,
columns=columns)
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=s.nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result() | Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object |
def save_to_local(self, callback_etat=print):
"""
Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args
"""
callback_etat("Aquisition...", 0, 3)
d = self.dumps()
s = json.dumps(d, indent=4, cls=formats.JsonEncoder)
callback_etat("Chiffrement...", 1, 3)
s = security.protege_data(s, True)
callback_etat("Enregistrement...", 2, 3)
try:
with open(self.LOCAL_DB_PATH, 'wb') as f:
f.write(s)
except (FileNotFoundError):
logging.exception(self.__class__.__name__)
raise StructureError("Chemin de sauvegarde introuvable !") | Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args |
def addchild(self, startip, endip, name, description):
"""
Method takes inpur of str startip, str endip, name, and description and adds a child scope.
The startip and endip MUST be in the IP address range of the parent scope.
:param startip: str of ipv4 address of the first address in the child scope
:param endip: str of ipv4 address of the last address in the child scope
:param name: of the owner of the child scope
:param description: description of the child scope
:return:
"""
add_child_ip_scope(self.auth, self.url, startip, endip, name, description, self.id) | Method takes inpur of str startip, str endip, name, and description and adds a child scope.
The startip and endip MUST be in the IP address range of the parent scope.
:param startip: str of ipv4 address of the first address in the child scope
:param endip: str of ipv4 address of the last address in the child scope
:param name: of the owner of the child scope
:param description: description of the child scope
:return: |
def generic_find_fk_constraint_names(table, columns, referenced, insp):
"""Utility to find foreign-key constraint names in alembic migrations"""
names = set()
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
names.add(fk['name'])
return names | Utility to find foreign-key constraint names in alembic migrations |
def parse(url):
"""
Parses out the information for this url, returning its components
expanded out to Python objects.
:param url | <str>
:return (<str> path, <dict> query, <str> fragment)
"""
result = urlparse.urlparse(nstr(url))
path = result.scheme + '://' + result.netloc
if result.path:
path += result.path
query = {}
# extract the python information from the query
if result.query:
url_query = urlparse.parse_qs(result.query)
for key, value in url_query.items():
if type(value) == list and len(value) == 1:
value = value[0]
query[key] = value
return path, query, result.fragment | Parses out the information for this url, returning its components
expanded out to Python objects.
:param url | <str>
:return (<str> path, <dict> query, <str> fragment) |
def augment_audio_with_sox(path, sample_rate, tempo, gain):
"""
Changes tempo and gain of the recording with sox and loads it.
"""
with NamedTemporaryFile(suffix=".wav") as augmented_file:
augmented_filename = augmented_file.name
sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)]
sox_params = "sox \"{}\" -r {} -c 1 -b 16 {} {} >/dev/null 2>&1".format(path, sample_rate,
augmented_filename,
" ".join(sox_augment_params))
os.system(sox_params)
y = load_audio(augmented_filename)
return y | Changes tempo and gain of the recording with sox and loads it. |
def get_title():
''' Returns console title string.
https://docs.microsoft.com/en-us/windows/console/getconsoletitle
'''
MAX_LEN = 256
buffer_ = create_unicode_buffer(MAX_LEN)
kernel32.GetConsoleTitleW(buffer_, MAX_LEN)
log.debug('%s', buffer_.value)
return buffer_.value | Returns console title string.
https://docs.microsoft.com/en-us/windows/console/getconsoletitle |
def check(self, order, sids):
''' Check if a message is available '''
payload = "{}"
#TODO store hashes {'intuition': {'id1': value, 'id2': value}}
# Block self.timeout seconds on self.channel for a message
raw_msg = self.client.blpop(self.channel, timeout=self.timeout)
if raw_msg:
_, payload = raw_msg
msg = json.loads(payload.replace("'", '"'), encoding='utf-8')
for sid in msg.keys():
#TODO Harmonize lower() and upper() symbols
if sid.lower() in map(str.lower, sids):
print('ordering {} of {}'.format(msg[sid], sid))
#order(sid.upper(), msg[sid])
order(sid, msg[sid])
else:
print('skipping unknown symbol {}'.format(sid)) | Check if a message is available |
def value(val, transform=None):
''' Convenience function to explicitly return a "value" specification for
a Bokeh :class:`~bokeh.core.properties.DataSpec` property.
Args:
val (any) : a fixed value to specify for a ``DataSpec`` property.
transform (Transform, optional) : a transform to apply (default: None)
Returns:
dict : ``{ "value": name }``
.. note::
String values for property specifications are by default interpreted
as field names. This function is especially useful when you want to
specify a fixed value with text properties.
Example:
.. code-block:: python
# The following will take text values to render from a data source
# column "text_column", but use a fixed value "12pt" for font size
p.text("x", "y", text="text_column",
text_font_size=value("12pt"), source=source)
'''
if transform:
return dict(value=val, transform=transform)
return dict(value=val) | Convenience function to explicitly return a "value" specification for
a Bokeh :class:`~bokeh.core.properties.DataSpec` property.
Args:
val (any) : a fixed value to specify for a ``DataSpec`` property.
transform (Transform, optional) : a transform to apply (default: None)
Returns:
dict : ``{ "value": name }``
.. note::
String values for property specifications are by default interpreted
as field names. This function is especially useful when you want to
specify a fixed value with text properties.
Example:
.. code-block:: python
# The following will take text values to render from a data source
# column "text_column", but use a fixed value "12pt" for font size
p.text("x", "y", text="text_column",
text_font_size=value("12pt"), source=source) |
def create_geom_filter(request, mapped_class, geom_attr):
"""Create MapFish geometry filter based on the request params. Either
a box or within or geometry filter, depending on the request params.
Additional named arguments are passed to the spatial filter.
Arguments:
request
the request.
mapped_class
the SQLAlchemy mapped class.
geom_attr
the key of the geometry property as defined in the SQLAlchemy
mapper. If you use ``declarative_base`` this is the name of
the geometry attribute as defined in the mapped class.
"""
tolerance = float(request.params.get('tolerance', 0.0))
epsg = None
if 'epsg' in request.params:
epsg = int(request.params['epsg'])
box = request.params.get('bbox')
shape = None
if box is not None:
box = [float(x) for x in box.split(',')]
shape = Polygon(((box[0], box[1]), (box[0], box[3]),
(box[2], box[3]), (box[2], box[1]),
(box[0], box[1])))
elif 'lon' in request.params and 'lat' in request.params:
shape = Point(float(request.params['lon']),
float(request.params['lat']))
elif 'geometry' in request.params:
shape = loads(request.params['geometry'],
object_hook=GeoJSON.to_instance)
shape = asShape(shape)
if shape is None:
return None
column_epsg = _get_col_epsg(mapped_class, geom_attr)
geom_attr = getattr(mapped_class, geom_attr)
epsg = column_epsg if epsg is None else epsg
if epsg != column_epsg:
geom_attr = func.ST_Transform(geom_attr, epsg)
geometry = from_shape(shape, srid=epsg)
return func.ST_DWITHIN(geom_attr, geometry, tolerance) | Create MapFish geometry filter based on the request params. Either
a box or within or geometry filter, depending on the request params.
Additional named arguments are passed to the spatial filter.
Arguments:
request
the request.
mapped_class
the SQLAlchemy mapped class.
geom_attr
the key of the geometry property as defined in the SQLAlchemy
mapper. If you use ``declarative_base`` this is the name of
the geometry attribute as defined in the mapped class. |
def getref():
"""Current default values for graph and component tables, primary area,
and wavelength set.
.. note::
Also see :func:`setref`.
Returns
-------
ans : dict
Mapping of parameter names to their current values.
"""
ans=dict(graphtable=GRAPHTABLE,
comptable=COMPTABLE,
thermtable=THERMTABLE,
area=PRIMARY_AREA,
waveset=_default_waveset_str)
return ans | Current default values for graph and component tables, primary area,
and wavelength set.
.. note::
Also see :func:`setref`.
Returns
-------
ans : dict
Mapping of parameter names to their current values. |
def build_board_checkers():
""" builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0
"""
grd = Grid(8,8, ["B","W"])
for c in range(4):
grd.set_tile(0,(c*2) - 1, "B")
grd.set_tile(1,(c*2) - 0, "B")
grd.set_tile(6,(c*2) + 1, "W")
grd.set_tile(7,(c*2) - 0, "W")
print(grd)
return grd | builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0 |
def write_config(config_data: Dict[str, Path],
path: Path = None):
""" Save the config file.
:param config_data: The index to save
:param base_dir: The place to save the file. If ``None``,
:py:meth:`infer_config_base_dir()` will be used
Only keys that are in the config elements will be saved.
"""
path = Path(path) if path else infer_config_base_dir()
valid_names = [ce.name for ce in CONFIG_ELEMENTS]
try:
os.makedirs(path, exist_ok=True)
with (path/_CONFIG_FILENAME).open('w') as base_f:
json.dump({k: str(v) for k, v in config_data.items()
if k in valid_names},
base_f, indent=2)
except OSError as e:
sys.stderr.write("Config index write to {} failed: {}\n"
.format(path/_CONFIG_FILENAME, e)) | Save the config file.
:param config_data: The index to save
:param base_dir: The place to save the file. If ``None``,
:py:meth:`infer_config_base_dir()` will be used
Only keys that are in the config elements will be saved. |
def copy_fields(layer, fields_to_copy):
"""Copy fields inside an attribute table.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields_to_copy: Dictionary of fields to copy.
:type fields_to_copy: dict
"""
for field in fields_to_copy:
index = layer.fields().lookupField(field)
if index != -1:
layer.startEditing()
source_field = layer.fields().at(index)
new_field = QgsField(source_field)
new_field.setName(fields_to_copy[field])
layer.addAttribute(new_field)
new_index = layer.fields().lookupField(fields_to_copy[field])
for feature in layer.getFeatures():
attributes = feature.attributes()
source_value = attributes[index]
layer.changeAttributeValue(
feature.id(), new_index, source_value)
layer.commitChanges()
layer.updateFields() | Copy fields inside an attribute table.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields_to_copy: Dictionary of fields to copy.
:type fields_to_copy: dict |
def main(sample_id, fastq_pair, gsize, minimum_coverage, opts):
""" Main executor of the integrity_coverage template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
gsize : float or int
Estimate of genome size in Mb.
minimum_coverage : float or int
Minimum coverage required for a sample to pass the coverage check
opts : list
List of arbitrary options. See `Expected input`_.
"""
logger.info("Starting integrity coverage main")
# Check for runtime options
if "-e" in opts:
skip_encoding = True
else:
skip_encoding = False
# Information for encoding guess
gmin, gmax = 99, 0
encoding = []
phred = None
# Information for coverage estimation
chars = 0
nreads = 0
# Information on maximum read length
max_read_length = 0
# Get compression of each FastQ pair file
file_objects = []
for fastq in fastq_pair:
logger.info("Processing file {}".format(fastq))
logger.info("[{}] Guessing file compression".format(fastq))
ftype = guess_file_compression(fastq)
# This can guess the compression of gz, bz2 and zip. If it cannot
# find the compression type, it tries to open a regular file
if ftype:
logger.info("[{}] Found file compression: {}".format(
fastq, ftype))
file_objects.append(COPEN[ftype](fastq, "rt"))
else:
logger.info("[{}] File compression not found. Assuming an "
"uncompressed file".format(fastq))
file_objects.append(open(fastq))
logger.info("Starting FastQ file parsing")
# The '*_encoding' file stores a string with the encoding ('Sanger')
# If no encoding is guessed, 'None' should be stored
# The '*_phred' file stores a string with the phred score ('33')
# If no phred is guessed, 'None' should be stored
# The '*_coverage' file stores the estimated coverage ('88')
# The '*_report' file stores a csv report of the file
# The '*_max_len' file stores a string with the maximum contig len ('155')
with open("{}_encoding".format(sample_id), "w") as enc_fh, \
open("{}_phred".format(sample_id), "w") as phred_fh, \
open("{}_coverage".format(sample_id), "w") as cov_fh, \
open("{}_report".format(sample_id), "w") as cov_rep, \
open("{}_max_len".format(sample_id), "w") as len_fh, \
open(".report.json", "w") as json_report, \
open(".status", "w") as status_fh, \
open(".fail", "w") as fail_fh:
try:
# Iterate over both pair files sequentially using itertools.chain
for i, line in enumerate(chain(*file_objects)):
# Parse only every 4th line of the file for the encoding
# e.g.: AAAA/EEEEEEEEEEE<EEEEEEEEEEEEEEEEEEEEEEEEE (...)
if (i + 1) % 4 == 0 and not skip_encoding:
# It is important to strip() the line so that any newline
# character is removed and not accounted for in the
# encoding guess
lmin, lmax = get_qual_range(line.strip())
# Guess new encoding if the range expands the previously
# set boundaries of gmin and gmax
if lmin < gmin or lmax > gmax:
gmin, gmax = min(lmin, gmin), max(lmax, gmax)
encoding, phred = get_encodings_in_range(gmin, gmax)
logger.debug(
"Updating estimates at line {} with range {} to"
" '{}' (encoding) and '{}' (phred)".format(
i, [lmin, lmax], encoding, phred))
# Parse only every 2nd line of the file for the coverage
# e.g.: GGATAATCTACCTTGACGATTTGTACTGGCGTTGGTTTCTTA (...)
if (i + 3) % 4 == 0:
read_len = len(line.strip())
chars += read_len
nreads += 1
# Evaluate maximum read length for sample
if read_len > max_read_length:
logger.debug("Updating maximum read length at line "
"{} to {}".format(i, read_len))
max_read_length = read_len
# End of FastQ parsing
logger.info("Finished FastQ file parsing")
# The minimum expected coverage for a sample to pass
exp_coverage = round(chars / (gsize * 1e6), 2)
# Set json report
if "-e" not in opts:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Raw BP",
"value": chars,
"table": "qc",
"columnBar": True},
{"header": "Reads",
"value": nreads,
"table": "qc",
"columnBar": True},
{"header": "Coverage",
"value": exp_coverage,
"table": "qc",
"columnBar": True,
"failThreshold": minimum_coverage
}
]
}],
"plotData": [{
"sample": sample_id,
"data": {
"sparkline": chars
}
}],
}
else:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Coverage",
"value": exp_coverage,
"table": "qc",
"columnBar": True,
"failThreshold": minimum_coverage
}
],
}],
}
# Get encoding
if len(encoding) > 0:
encoding = set(encoding)
phred = set(phred)
# Get encoding and phred as strings
# e.g. enc: Sanger, Illumina-1.8
# e.g. phred: 64
enc = "{}".format(",".join([x for x in encoding]))
phred = "{}".format(",".join(str(x) for x in phred))
logger.info("Encoding set to {}".format(enc))
logger.info("Phred set to {}".format(enc))
enc_fh.write(enc)
phred_fh.write(phred)
# Encoding not found
else:
if not skip_encoding:
encoding_msg = "Could not guess encoding and phred from " \
"FastQ"
logger.warning(encoding_msg)
json_dic["warnings"] = [{
"sample": sample_id,
"table": "qc",
"value": [encoding_msg]
}]
enc_fh.write("None")
phred_fh.write("None")
# Estimate coverage
logger.info("Estimating coverage based on a genome size of "
"{}".format(gsize))
logger.info("Expected coverage is {}".format(exp_coverage))
if exp_coverage >= minimum_coverage:
cov_rep.write("{},{},{}\\n".format(
sample_id, str(exp_coverage), "PASS"))
cov_fh.write(str(exp_coverage))
status_fh.write("pass")
# Estimated coverage does not pass minimum threshold
else:
fail_msg = "Sample with low coverage ({}), below the {} " \
"threshold".format(exp_coverage, minimum_coverage)
logger.error(fail_msg)
fail_fh.write(fail_msg)
cov_fh.write("fail")
status_fh.write("fail")
cov_rep.write("{},{},{}\\n".format(
sample_id, str(exp_coverage), "FAIL"))
json_dic["fail"] = [{
"sample": sample_id,
"table": "qc",
"value": [fail_msg]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
# Maximum read length
len_fh.write("{}".format(max_read_length))
# This exception is raised when the input FastQ files are corrupted
except EOFError:
logger.error("The FastQ files could not be correctly "
"parsed. They may be corrupt")
for fh in [enc_fh, phred_fh, cov_fh, cov_rep, len_fh]:
fh.write("corrupt")
status_fh.write("fail")
fail_fh.write("Could not read/parse FastQ. "
"Possibly corrupt file") | Main executor of the integrity_coverage template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
gsize : float or int
Estimate of genome size in Mb.
minimum_coverage : float or int
Minimum coverage required for a sample to pass the coverage check
opts : list
List of arbitrary options. See `Expected input`_. |
def countries(self):
"""
Access the countries
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryList
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryList
"""
if self._countries is None:
self._countries = CountryList(self._version, )
return self._countries | Access the countries
:returns: twilio.rest.voice.v1.dialing_permissions.country.CountryList
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryList |
def download(
self,
file: Union[IO[bytes], asyncio.StreamWriter, None]=None,
raw: bool=False, rewind: bool=True,
duration_timeout: Optional[float]=None):
'''Read the response content into file.
Args:
file: A file object or asyncio stream.
raw: Whether chunked transfer encoding should be included.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Be sure to call :meth:`start` first.
Coroutine.
'''
if self._session_state != SessionState.request_sent:
raise RuntimeError('Request not sent')
if rewind and file and hasattr(file, 'seek'):
original_offset = file.tell()
else:
original_offset = None
if not hasattr(file, 'drain'):
self._response.body = file
if not isinstance(file, Body):
self._response.body = Body(file)
read_future = self._stream.read_body(self._request, self._response, file=file, raw=raw)
try:
yield from asyncio.wait_for(read_future, timeout=duration_timeout)
except asyncio.TimeoutError as error:
raise DurationTimeout(
'Did not finish reading after {} seconds.'
.format(duration_timeout)
) from error
self._session_state = SessionState.response_received
if original_offset is not None:
file.seek(original_offset)
self.event_dispatcher.notify(self.Event.end_response, self._response)
self.recycle() | Read the response content into file.
Args:
file: A file object or asyncio stream.
raw: Whether chunked transfer encoding should be included.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Be sure to call :meth:`start` first.
Coroutine. |
def check(text):
"""Check the text."""
err = "MSC104"
msg = u"Don't fail to capitalize roman numeral abbreviations."
pwd_regex = " (I(i*)|i*)"
password = [
"World War{}".format(pwd_regex),
]
return blacklist(text, password, err, msg) | Check the text. |
def apply_noise(self, noise_weights=None, uniform_amount=0.1):
"""
Add noise to every link in the network.
Can use either a ``uniform_amount`` or a ``noise_weight`` weight
profile. If ``noise_weight`` is set, ``uniform_amount`` will be
ignored.
Args:
noise_weights (list): a list of weight tuples
of form ``(float, float)`` corresponding to
``(amount, weight)`` describing the noise to be
added to each link in the graph
uniform_amount (float): the maximum amount of uniform noise
to be applied if ``noise_weights`` is not set
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link(node_1, 3)
>>> node_1.add_link(node_2, 5)
>>> node_2.add_link(node_1, 1)
>>> graph = Graph([node_1, node_2])
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(link.target.value, link.weight))
One 3
Two 5
>>> graph.apply_noise()
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(
... link.target.value, link.weight)) # doctest: +SKIP
One 3.154
Two 5.321
"""
# Main node loop
for node in self.node_list:
for link in node.link_list:
if noise_weights is not None:
noise_amount = round(weighted_rand(noise_weights), 3)
else:
noise_amount = round(random.uniform(
0, link.weight * uniform_amount), 3)
link.weight += noise_amount | Add noise to every link in the network.
Can use either a ``uniform_amount`` or a ``noise_weight`` weight
profile. If ``noise_weight`` is set, ``uniform_amount`` will be
ignored.
Args:
noise_weights (list): a list of weight tuples
of form ``(float, float)`` corresponding to
``(amount, weight)`` describing the noise to be
added to each link in the graph
uniform_amount (float): the maximum amount of uniform noise
to be applied if ``noise_weights`` is not set
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_link(node_1, 3)
>>> node_1.add_link(node_2, 5)
>>> node_2.add_link(node_1, 1)
>>> graph = Graph([node_1, node_2])
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(link.target.value, link.weight))
One 3
Two 5
>>> graph.apply_noise()
>>> for link in graph.node_list[0].link_list:
... print('{} {}'.format(
... link.target.value, link.weight)) # doctest: +SKIP
One 3.154
Two 5.321 |
def raise_for_status(self):
"""Raises HTTPError if the request got an error."""
if 400 <= self.status_code < 600:
message = 'Error %s for %s' % (self.status_code, self.url)
raise HTTPError(message) | Raises HTTPError if the request got an error. |
def ensure_crops(self, *required_crops):
"""
Make sure a crop exists for each crop in required_crops.
Existing crops will not be changed.
If settings.ASSET_CELERY is specified then
the task will be run async
"""
if self._can_crop():
if settings.CELERY or settings.USE_CELERY_DECORATOR:
# this means that we are using celery
args = [self.pk]+list(required_crops)
tasks.ensure_crops.apply_async(args=args, countdown=5)
else:
tasks.ensure_crops(None, *required_crops, asset=self) | Make sure a crop exists for each crop in required_crops.
Existing crops will not be changed.
If settings.ASSET_CELERY is specified then
the task will be run async |
def set_umr_namelist(self):
"""Set UMR excluded modules name list"""
arguments, valid = QInputDialog.getText(self, _('UMR'),
_("Set the list of excluded modules as "
"this: <i>numpy, scipy</i>"),
QLineEdit.Normal,
", ".join(self.get_option('umr/namelist')))
if valid:
arguments = to_text_string(arguments)
if arguments:
namelist = arguments.replace(' ', '').split(',')
fixed_namelist = []
non_ascii_namelist = []
for module_name in namelist:
if PY2:
if all(ord(c) < 128 for c in module_name):
if programs.is_module_installed(module_name):
fixed_namelist.append(module_name)
else:
QMessageBox.warning(self, _('Warning'),
_("You are working with Python 2, this means that "
"you can not import a module that contains non-"
"ascii characters."), QMessageBox.Ok)
non_ascii_namelist.append(module_name)
elif programs.is_module_installed(module_name):
fixed_namelist.append(module_name)
invalid = ", ".join(set(namelist)-set(fixed_namelist)-
set(non_ascii_namelist))
if invalid:
QMessageBox.warning(self, _('UMR'),
_("The following modules are not "
"installed on your machine:\n%s"
) % invalid, QMessageBox.Ok)
QMessageBox.information(self, _('UMR'),
_("Please note that these changes will "
"be applied only to new Python/IPython "
"consoles"), QMessageBox.Ok)
else:
fixed_namelist = []
self.set_option('umr/namelist', fixed_namelist) | Set UMR excluded modules name list |
def connect(self):
"""
Starts up an authentication session for the client using cookie
authentication if necessary.
"""
if self.r_session:
self.session_logout()
if self.admin_party:
self._use_iam = False
self.r_session = ClientSession(
timeout=self._timeout
)
elif self._use_basic_auth:
self._use_iam = False
self.r_session = BasicSession(
self._user,
self._auth_token,
self.server_url,
timeout=self._timeout
)
elif self._use_iam:
self.r_session = IAMSession(
self._auth_token,
self.server_url,
auto_renew=self._auto_renew,
client_id=self._iam_client_id,
client_secret=self._iam_client_secret,
timeout=self._timeout
)
else:
self.r_session = CookieSession(
self._user,
self._auth_token,
self.server_url,
auto_renew=self._auto_renew,
timeout=self._timeout
)
# If a Transport Adapter was supplied add it to the session
if self.adapter is not None:
self.r_session.mount(self.server_url, self.adapter)
if self._client_user_header is not None:
self.r_session.headers.update(self._client_user_header)
self.session_login()
# Utilize an event hook to append to the response message
# using :func:`~cloudant.common_util.append_response_error_content`
self.r_session.hooks['response'].append(append_response_error_content) | Starts up an authentication session for the client using cookie
authentication if necessary. |
def GenerateLabels(self, hash_information):
"""Generates a list of strings that will be used in the event tag.
Args:
hash_information (dict[str, object]): JSON decoded contents of the result
of a Viper lookup, as produced by the ViperAnalyzer.
Returns:
list[str]: list of labels to apply to events.
"""
if not hash_information:
return ['viper_not_present']
projects = []
tags = []
for project, entries in iter(hash_information.items()):
if not entries:
continue
projects.append(project)
for entry in entries:
if entry['tags']:
tags.extend(entry['tags'])
if not projects:
return ['viper_not_present']
strings = ['viper_present']
for project_name in projects:
label = events.EventTag.CopyTextToLabel(
project_name, prefix='viper_project_')
strings.append(label)
for tag_name in tags:
label = events.EventTag.CopyTextToLabel(tag_name, prefix='viper_tag_')
strings.append(label)
return strings | Generates a list of strings that will be used in the event tag.
Args:
hash_information (dict[str, object]): JSON decoded contents of the result
of a Viper lookup, as produced by the ViperAnalyzer.
Returns:
list[str]: list of labels to apply to events. |
def convert_to_int(x: Any, default: int = None) -> int:
"""
Transforms its input into an integer, or returns ``default``.
"""
try:
return int(x)
except (TypeError, ValueError):
return default | Transforms its input into an integer, or returns ``default``. |
def getBucketInfo(self, buckets):
""" See the function description in base.py
"""
if self.ncategories==0:
return 0
topDownMappingM = self._getTopDownMapping()
categoryIndex = buckets[0]
category = self.categories[categoryIndex]
encoding = topDownMappingM.getRow(categoryIndex)
return [EncoderResult(value=category, scalar=categoryIndex,
encoding=encoding)] | See the function description in base.py |
def search_device_by_id(self, deviceID) -> Device:
""" searches a device by given id
Args:
deviceID(str): the device to search for
Returns
the Device object or None if it couldn't find a device
"""
for d in self.devices:
if d.id == deviceID:
return d
return None | searches a device by given id
Args:
deviceID(str): the device to search for
Returns
the Device object or None if it couldn't find a device |
def mkdir(self, paths, create_parent=False, mode=0o755):
''' Create a directoryCount
:param paths: Paths to create
:type paths: list of strings
:param create_parent: Also create the parent directories
:type create_parent: boolean
:param mode: Mode the directory should be created with
:type mode: int
:returns: a generator that yields dictionaries
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("mkdirs: no path given")
for path in paths:
if not path.startswith("/"):
path = self._join_user_path(path)
fileinfo = self._get_file_info(path)
if not fileinfo:
try:
request = client_proto.MkdirsRequestProto()
request.src = path
request.masked.perm = mode
request.createParent = create_parent
response = self.service.mkdirs(request)
yield {"path": path, "result": response.result}
except RequestError as e:
yield {"path": path, "result": False, "error": str(e)}
else:
yield {"path": path, "result": False, "error": "mkdir: `%s': File exists" % path} | Create a directoryCount
:param paths: Paths to create
:type paths: list of strings
:param create_parent: Also create the parent directories
:type create_parent: boolean
:param mode: Mode the directory should be created with
:type mode: int
:returns: a generator that yields dictionaries |
def remote_pdb_handler(signum, frame):
""" Handler to drop us into a remote debugger upon receiving SIGUSR1 """
try:
from remote_pdb import RemotePdb
rdb = RemotePdb(host="127.0.0.1", port=0)
rdb.set_trace(frame=frame)
except ImportError:
log.warning(
"remote_pdb unavailable. Please install remote_pdb to "
"allow remote debugging."
)
# Restore signal handler for later
signal.signal(signum, remote_pdb_handler) | Handler to drop us into a remote debugger upon receiving SIGUSR1 |
def _open_file(self, filename):
"""Open a file to be tailed"""
if not self._os_is_windows:
self._fh = open(filename, "rb")
self.filename = filename
self._fh.seek(0, os.SEEK_SET)
self.oldsize = 0
return
# if we're in Windows, we need to use the WIN32 API to open the
# file without locking it
import win32file
import msvcrt
handle = win32file.CreateFile(filename,
win32file.GENERIC_READ,
win32file.FILE_SHARE_DELETE |
win32file.FILE_SHARE_READ |
win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
0,
None)
detached_handle = handle.Detach()
file_descriptor = msvcrt.open_osfhandle(
detached_handle, os.O_RDONLY)
self._fh = open(file_descriptor, "rb")
self.filename = filename
self._fh.seek(0, os.SEEK_SET)
self.oldsize = 0 | Open a file to be tailed |
def read_until(self, marker):
"""
Reads data from the socket until a marker is found. Data read includes
the marker.
:param marker:
A byte string or regex object from re.compile(). Used to determine
when to stop reading. Regex objects are more inefficient since
they must scan the entire byte string of read data each time data
is read off the socket.
:return:
A byte string of the data read, including the marker
"""
if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern):
raise TypeError(pretty_message(
'''
marker must be a byte string or compiled regex object, not %s
''',
type_name(marker)
))
output = b''
is_regex = isinstance(marker, Pattern)
while True:
if len(self._decrypted_bytes) > 0:
chunk = self._decrypted_bytes
self._decrypted_bytes = b''
else:
to_read = self._os_buffered_size() or 8192
chunk = self.read(to_read)
offset = len(output)
output += chunk
if is_regex:
match = marker.search(output)
if match is not None:
end = match.end()
break
else:
# If the marker was not found last time, we have to start
# at a position where the marker would have its final char
# in the newly read chunk
start = max(0, offset - len(marker) - 1)
match = output.find(marker, start)
if match != -1:
end = match + len(marker)
break
self._decrypted_bytes = output[end:] + self._decrypted_bytes
return output[0:end] | Reads data from the socket until a marker is found. Data read includes
the marker.
:param marker:
A byte string or regex object from re.compile(). Used to determine
when to stop reading. Regex objects are more inefficient since
they must scan the entire byte string of read data each time data
is read off the socket.
:return:
A byte string of the data read, including the marker |
async def ensure_usable_media(self, media: BaseMedia) -> UrlMedia:
"""
So far, let's just accept URL media. We'll see in the future how it
goes.
"""
if not isinstance(media, UrlMedia):
raise ValueError('Facebook platform only accepts URL media')
return media | So far, let's just accept URL media. We'll see in the future how it
goes. |
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
pysolvers.minisatgh_pbudget(self.minisat, budget) | Set limit on the number of propagations. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.