code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def unzip_file(source_file, dest_dir=None, mkdir=False):
"""Unzip a compressed file.
Args:
source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)
dest_dir: Target folder to extract to (e.g. c:/ladybug).
Default is set to the same directory as the source file.
mkdir: Set to True to create the directory if doesn't exist (Default: False)
"""
# set default dest_dir and create it if need be.
if dest_dir is None:
dest_dir, fname = os.path.split(source_file)
elif not os.path.isdir(dest_dir):
if mkdir:
preparedir(dest_dir)
else:
created = preparedir(dest_dir, False)
if not created:
raise ValueError("Failed to find %s." % dest_dir)
# extract files to destination
with zipfile.ZipFile(source_file) as zf:
for member in zf.infolist():
words = member.filename.split('\\')
for word in words[:-1]:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir, ''):
continue
dest_dir = os.path.join(dest_dir, word)
zf.extract(member, dest_dir) | Unzip a compressed file.
Args:
source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)
dest_dir: Target folder to extract to (e.g. c:/ladybug).
Default is set to the same directory as the source file.
mkdir: Set to True to create the directory if doesn't exist (Default: False) |
def collect(self, target):
"""Recursively collect all potential triggers/targets in this node and its children.
Define targets and triggers of this particular callable in :meth:`_give_triggers`
and :meth:`_give_targets`.
:param str target: valid values: ``'targets'`` and ``'triggers'``
"""
statusobjects = set()
callables = set()
objs_from_this_obj = getattr(self, '_give_%s' % target)()
if not is_iterable(objs_from_this_obj):
objs_from_this_obj = [objs_from_this_obj]
if is_iterable(objs_from_this_obj):
for i in (self.name_to_system_object(j) for j in objs_from_this_obj):
if isinstance(i, AbstractStatusObject):
statusobjects.add(i)
elif isinstance(i, AbstractCallable):
callables.add(i)
for i in (self.name_to_system_object(j) for j in deep_iterate(callables)):
if isinstance(i, AbstractCallable):
statusobjects.update(getattr(i, target))
return statusobjects | Recursively collect all potential triggers/targets in this node and its children.
Define targets and triggers of this particular callable in :meth:`_give_triggers`
and :meth:`_give_targets`.
:param str target: valid values: ``'targets'`` and ``'triggers'`` |
def run(self):
"""Main entrypoint method.
Returns
-------
new_nodes : `list`
Nodes to add to the doctree.
"""
logger = getLogger(__name__)
try:
config_class_name = self.arguments[0]
except IndexError:
raise SphinxError(
'{} directive requires a Config class '
'name as an argument'.format(self.directive_name))
logger.debug('%s using Config class %s', self.directive_name,
config_class_name)
config_class = get_type(config_class_name)
config_fields = get_task_config_fields(config_class)
all_nodes = []
for field_name, field in config_fields.items():
field_id = format_configfield_id(
'.'.join((config_class.__module__,
config_class.__name__)),
field_name)
try:
format_field_nodes = get_field_formatter(field)
except ValueError:
logger.debug('Skipping unknown config field type, '
'{0!r}'.format(field))
continue
all_nodes.append(
format_field_nodes(field_name, field, field_id, self.state,
self.lineno)
)
# Fallback if no configuration items are present
if len(all_nodes) == 0:
message = 'No configuration fields.'
return [nodes.paragraph(text=message)]
return all_nodes | Main entrypoint method.
Returns
-------
new_nodes : `list`
Nodes to add to the doctree. |
def visit_extslice(self, node, parent):
"""visit an ExtSlice node by returning a fresh instance of it"""
newnode = nodes.ExtSlice(parent=parent)
newnode.postinit([self.visit(dim, newnode) for dim in node.dims])
return newnode | visit an ExtSlice node by returning a fresh instance of it |
def add_f95_to_env(env):
"""Add Builders and construction variables for f95 to an Environment."""
try:
F95Suffixes = env['F95FILESUFFIXES']
except KeyError:
F95Suffixes = ['.f95']
#print("Adding %s to f95 suffixes" % F95Suffixes)
try:
F95PPSuffixes = env['F95PPFILESUFFIXES']
except KeyError:
F95PPSuffixes = []
DialectAddToEnv(env, "F95", F95Suffixes, F95PPSuffixes,
support_module = 1) | Add Builders and construction variables for f95 to an Environment. |
def check_cluster(
cluster_config,
data_path,
java_home,
check_replicas,
batch_size,
minutes,
start_time,
end_time,
):
"""Check the integrity of the Kafka log files in a cluster.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the log folder on the broker
:type data_path: str
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param check_replicas: also checks the replica files
:type check_replicas: bool
:param batch_size: the size of the batch
:type batch_size: int
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
"""
brokers = get_broker_list(cluster_config)
broker_files = find_files(data_path, brokers, minutes, start_time, end_time)
if not check_replicas: # remove replicas
broker_files = filter_leader_files(cluster_config, broker_files)
processes = []
print("Starting {n} parallel processes".format(n=len(broker_files)))
try:
for broker, host, files in broker_files:
print(
" Broker: {host}, {n} files to check".format(
host=host,
n=len(files)),
)
p = Process(
name="dump_process_" + host,
target=check_files_on_host,
args=(java_home, host, files, batch_size),
)
p.start()
processes.append(p)
print("Processes running:")
for process in processes:
process.join()
except KeyboardInterrupt:
print("Terminating all processes")
for process in processes:
process.terminate()
process.join()
print("All processes terminated")
sys.exit(1) | Check the integrity of the Kafka log files in a cluster.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the log folder on the broker
:type data_path: str
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param check_replicas: also checks the replica files
:type check_replicas: bool
:param batch_size: the size of the batch
:type batch_size: int
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str |
def read_config(config_path=default_config_path):
"""
Read configuration file and produce a dictionary of the following structure:
{'<instance1>': {'username': '<user>', 'password': '<pass>',
'verify': <True/False>, 'cert': '<path-to-cert>'}
'<instance2>': {...},
...}
Format of the file:
[https://artifactory-instance.local/artifactory]
username = foo
password = @dmin
verify = false
cert = ~/path-to-cert
config-path - specifies where to read the config from
"""
config_path = os.path.expanduser(config_path)
if not os.path.isfile(config_path):
raise OSError(errno.ENOENT,
"Artifactory configuration file not found: '%s'" %
config_path)
p = configparser.ConfigParser()
p.read(config_path)
result = {}
for section in p.sections():
username = p.get(section, 'username') if p.has_option(section, 'username') else None
password = p.get(section, 'password') if p.has_option(section, 'password') else None
verify = p.getboolean(section, 'verify') if p.has_option(section, 'verify') else True
cert = p.get(section, 'cert') if p.has_option(section, 'cert') else None
result[section] = {'username': username,
'password': password,
'verify': verify,
'cert': cert}
# certificate path may contain '~', and we'd better expand it properly
if result[section]['cert']:
result[section]['cert'] = \
os.path.expanduser(result[section]['cert'])
return result | Read configuration file and produce a dictionary of the following structure:
{'<instance1>': {'username': '<user>', 'password': '<pass>',
'verify': <True/False>, 'cert': '<path-to-cert>'}
'<instance2>': {...},
...}
Format of the file:
[https://artifactory-instance.local/artifactory]
username = foo
password = @dmin
verify = false
cert = ~/path-to-cert
config-path - specifies where to read the config from |
def plot(self, ax=None, **kwargs):
"""
Plot the histogram with matplotlib, returns `matplotlib` figure.
"""
ax, fig, plt = get_ax_fig_plt(ax)
yy = [len(v) for v in self.values]
ax.plot(self.binvals, yy, **kwargs)
return fig | Plot the histogram with matplotlib, returns `matplotlib` figure. |
def _set_blob_properties(self, ud):
# type: (Uploader, blobxfer.models.upload.Descriptor) -> None
"""Set blob properties (md5, cache control)
:param Uploader self: this
:param blobxfer.models.upload.Descriptor ud: upload descriptor
"""
if ud.requires_non_encrypted_md5_put:
digest = blobxfer.util.base64_encode_as_string(ud.md5.digest())
else:
digest = None
blobxfer.operations.azure.blob.set_blob_properties(ud.entity, digest)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_properties(ase, digest) | Set blob properties (md5, cache control)
:param Uploader self: this
:param blobxfer.models.upload.Descriptor ud: upload descriptor |
def fetch(url, binary, outfile, noprint, rendered):
'''
Fetch a specified URL's content, and output it to the console.
'''
with chrome_context.ChromeContext(binary=binary) as cr:
resp = cr.blocking_navigate_and_get_source(url)
if rendered:
resp['content'] = cr.get_rendered_page_source()
resp['binary'] = False
resp['mimie'] = 'text/html'
if not noprint:
if resp['binary'] is False:
print(resp['content'])
else:
print("Response is a binary file")
print("Cannot print!")
if outfile:
with open(outfile, "wb") as fp:
if resp['binary']:
fp.write(resp['content'])
else:
fp.write(resp['content'].encode("UTF-8")) | Fetch a specified URL's content, and output it to the console. |
def _prepare_ws(self, w0, mmap, n_steps):
"""
Decide how to make the return array. If mmap is False, this returns a
full array of zeros, but with the correct shape as the output. If mmap
is True, return a pointer to a memory-mapped array. The latter is
particularly useful for integrating a large number of orbits or
integrating a large number of time steps.
"""
from ..dynamics import PhaseSpacePosition
if not isinstance(w0, PhaseSpacePosition):
w0 = PhaseSpacePosition.from_w(w0)
arr_w0 = w0.w(self._func_units)
self.ndim, self.norbits = arr_w0.shape
self.ndim = self.ndim//2
return_shape = (2*self.ndim, n_steps+1, self.norbits)
if mmap is None:
# create the return arrays
ws = np.zeros(return_shape, dtype=float)
else:
if mmap.shape != return_shape:
raise ValueError("Shape of memory-mapped array doesn't match "
"expected shape of return array ({} vs {})"
.format(mmap.shape, return_shape))
if not mmap.flags.writeable:
raise TypeError("Memory-mapped array must be a writable mode, "
" not '{}'".format(mmap.mode))
ws = mmap
return w0, arr_w0, ws | Decide how to make the return array. If mmap is False, this returns a
full array of zeros, but with the correct shape as the output. If mmap
is True, return a pointer to a memory-mapped array. The latter is
particularly useful for integrating a large number of orbits or
integrating a large number of time steps. |
def find_boundary_types(model, boundary_type, external_compartment=None):
"""Find specific boundary reactions.
Arguments
---------
model : cobra.Model
A cobra model.
boundary_type : str
What boundary type to check for. Must be one of
"exchange", "demand", or "sink".
external_compartment : str or None
The id for the external compartment. If None it will be detected
automatically.
Returns
-------
list of cobra.reaction
A list of likely boundary reactions of a user defined type.
"""
if not model.boundary:
LOGGER.warning("There are no boundary reactions in this model. "
"Therefore specific types of boundary reactions such "
"as 'exchanges', 'demands' or 'sinks' cannot be "
"identified.")
return []
if external_compartment is None:
external_compartment = find_external_compartment(model)
return model.reactions.query(
lambda r: is_boundary_type(r, boundary_type, external_compartment)) | Find specific boundary reactions.
Arguments
---------
model : cobra.Model
A cobra model.
boundary_type : str
What boundary type to check for. Must be one of
"exchange", "demand", or "sink".
external_compartment : str or None
The id for the external compartment. If None it will be detected
automatically.
Returns
-------
list of cobra.reaction
A list of likely boundary reactions of a user defined type. |
def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None):
"""Attention to the source and a neighborhood to the left within a block.
The sequence is divided into blocks of length block_length. Attention for a
given query position can only see memory positions less than or equal to the
query position in the corresponding block.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
"""
with tf.variable_scope(
name, default_name="within_local_attention_1d", values=[q, k, v]):
batch, heads, length, depth_k = common_layers.shape_list(q)
depth_v = common_layers.shape_list(v)[-1]
if isinstance(block_length, tf.Tensor):
const = tf.contrib.util.constant_value(block_length)
if const is not None:
block_length = int(const)
# Pad query, key, value to ensure multiple of block length.
original_length = length
padding_size = tf.mod(-length, block_length)
length += padding_size
padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]]
q = tf.pad(q, padding)
k = tf.pad(k, padding)
v = tf.pad(v, padding)
# Compute attention for all subsequent query blocks.
num_blocks = tf.div(length, block_length)
q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k])
k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k])
v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v])
# [batch, heads, num_blocks, block_length, block_length]
attention = tf.matmul(q, k, transpose_b=True)
attention += tf.reshape(attention_bias_lower_triangle(block_length),
[1, 1, 1, block_length, block_length])
attention = tf.nn.softmax(attention)
# [batch, heads, num_blocks, block_length, depth_v]
output = tf.matmul(attention, v)
output = tf.reshape(output, [batch, heads, -1, depth_v])
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in
(batch, heads, length, depth_v)])
return output | Attention to the source and a neighborhood to the left within a block.
The sequence is divided into blocks of length block_length. Attention for a
given query position can only see memory positions less than or equal to the
query position in the corresponding block.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v] |
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ):
"""
Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error
"""
if key_name is not None:
assert is_valid_keyname(key_name)
if key_server is None:
key_server = DEFAULT_KEY_SERVER
if gpghome is None:
gpghome = get_default_gpg_home()
put_res = {}
extra_fields = {}
key_data = None
if key_name is not None:
extra_fields = {'keyName': key_name}
if key_url is None:
gpg = gnupg.GPG( homedir=gpghome )
if use_key_server:
# replicate key data to default server first
res = gpg.send_keys( key_server, key_id )
if len(res.data) > 0:
# error
log.error("GPG failed to upload key '%s'" % key_id)
log.error("GPG error:\n%s" % res.stderr)
return {'error': 'Failed to repliate GPG key to default keyserver'}
key_data = gpg.export_keys( [key_id] )
if immutable:
# replicate to immutable storage
immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys )
if 'error' in immutable_result:
return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])}
else:
put_res['transaction_hash'] = immutable_result['transaction_hash']
put_res['zonefile_hash'] = immutable_result['zonefile_hash']
key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) )
else:
# replicate to mutable storage
mutable_name = key_name
if key_name is None:
mutable_name = key_id
mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_result:
return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])}
key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] )
put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields )
if 'error' in put_account_res:
return put_account_res
else:
put_account_res.update( put_res )
put_account_res['key_url'] = key_url
put_account_res['key_id'] = key_id
return put_account_res | Put a local GPG key into a blockchain ID's global account.
If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success
Return {'error': ...} on error |
def bucket(cls, bucket_name, connection=None):
"""Gives the bucket from couchbase server.
:param bucket_name: Bucket name to fetch.
:type bucket_name: str
:returns: couchbase driver's Bucket object.
:rtype: :class:`couchbase.client.Bucket`
:raises: :exc:`RuntimeError` If the credentials wasn't set.
"""
connection = cls.connection if connection == None else connection
if bucket_name not in cls._buckets:
connection = "{connection}/{bucket_name}".format(connection=connection, bucket_name=bucket_name)
if cls.password:
cls._buckets[connection] = Bucket(connection, password=cls.password)
else:
cls._buckets[connection] = Bucket(connection)
return cls._buckets[connection] | Gives the bucket from couchbase server.
:param bucket_name: Bucket name to fetch.
:type bucket_name: str
:returns: couchbase driver's Bucket object.
:rtype: :class:`couchbase.client.Bucket`
:raises: :exc:`RuntimeError` If the credentials wasn't set. |
def from_group(cls, group):
"""
Construct tags from the regex group
"""
if not group:
return
tag_items = group.split(";")
return list(map(cls.parse, tag_items)) | Construct tags from the regex group |
def from_extension(extension):
"""
Look up the BioPython file type corresponding with input extension.
Look up is case insensitive.
"""
if not extension.startswith('.'):
raise ValueError("Extensions must begin with a period.")
try:
return EXTENSION_TO_TYPE[extension.lower()]
except KeyError:
raise UnknownExtensionError(
"seqmagick does not know how to handle " +
"files with extensions like this: " + extension) | Look up the BioPython file type corresponding with input extension.
Look up is case insensitive. |
def IsSocket(self):
"""Determines if the file entry is a socket.
Returns:
bool: True if the file entry is a socket.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_SOCKET | Determines if the file entry is a socket.
Returns:
bool: True if the file entry is a socket. |
def hash_data(data, hashlen=None, alphabet=None):
r"""
Get a unique hash depending on the state of the data.
Args:
data (object): any sort of loosely organized data
hashlen (None): (default = None)
alphabet (None): (default = None)
Returns:
str: text - hash string
CommandLine:
python -m utool.util_hash hash_data
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> counter = [0]
>>> failed = []
>>> def check_hash(input_, want=None):
>>> count = counter[0] = counter[0] + 1
>>> got = ut.hash_data(input_)
>>> print('({}) {}'.format(count, got))
>>> if want is not None and not got.startswith(want):
>>> failed.append((got, input_, count, want))
>>> check_hash('1', 'wuvrng')
>>> check_hash(['1'], 'dekbfpby')
>>> check_hash(tuple(['1']), 'dekbfpby')
>>> check_hash(b'12', 'marreflbv')
>>> check_hash([b'1', b'2'], 'nwfs')
>>> check_hash(['1', '2', '3'], 'arfrp')
>>> check_hash(['1', np.array([1,2,3]), '3'], 'uyqwcq')
>>> check_hash('123', 'ehkgxk')
>>> check_hash(zip([1, 2, 3], [4, 5, 6]), 'mjcpwa')
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> check_hash(rng.rand(100000), 'bdwosuey')
>>> for got, input_, count, want in failed:
>>> print('failed {} on {}'.format(count, input_))
>>> print('got={}, want={}'.format(got, want))
>>> assert not failed
"""
if alphabet is None:
alphabet = ALPHABET_27
if hashlen is None:
hashlen = HASH_LEN2
if isinstance(data, stringlike) and len(data) == 0:
# Make a special hash for empty data
text = (alphabet[0] * hashlen)
else:
hasher = hashlib.sha512()
_update_hasher(hasher, data)
# Get a 128 character hex string
text = hasher.hexdigest()
# Shorten length of string (by increasing base)
hashstr2 = convert_hexstr_to_bigbase(text, alphabet, bigbase=len(alphabet))
# Truncate
text = hashstr2[:hashlen]
return text | r"""
Get a unique hash depending on the state of the data.
Args:
data (object): any sort of loosely organized data
hashlen (None): (default = None)
alphabet (None): (default = None)
Returns:
str: text - hash string
CommandLine:
python -m utool.util_hash hash_data
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> counter = [0]
>>> failed = []
>>> def check_hash(input_, want=None):
>>> count = counter[0] = counter[0] + 1
>>> got = ut.hash_data(input_)
>>> print('({}) {}'.format(count, got))
>>> if want is not None and not got.startswith(want):
>>> failed.append((got, input_, count, want))
>>> check_hash('1', 'wuvrng')
>>> check_hash(['1'], 'dekbfpby')
>>> check_hash(tuple(['1']), 'dekbfpby')
>>> check_hash(b'12', 'marreflbv')
>>> check_hash([b'1', b'2'], 'nwfs')
>>> check_hash(['1', '2', '3'], 'arfrp')
>>> check_hash(['1', np.array([1,2,3]), '3'], 'uyqwcq')
>>> check_hash('123', 'ehkgxk')
>>> check_hash(zip([1, 2, 3], [4, 5, 6]), 'mjcpwa')
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> check_hash(rng.rand(100000), 'bdwosuey')
>>> for got, input_, count, want in failed:
>>> print('failed {} on {}'.format(count, input_))
>>> print('got={}, want={}'.format(got, want))
>>> assert not failed |
def generate_project(self):
"""
Generate the whole project. Returns True if at least one
file has been generated, False otherwise."""
# checks needed properties
if not self.name or not self.destdir or \
not os.path.isdir(self.destdir):
raise ValueError("Empty or invalid property values: run with 'help' command")
_log("Generating project '%s'" % self.name)
_log("Destination directory is: '%s'" % self.destdir)
top = os.path.join(self.destdir, self.name)
src = os.path.join(top, self.src_name)
resources = os.path.join(top, self.res_name)
utils = os.path.join(src, "utils")
if self.complex:
models = os.path.join(src, "models")
ctrls = os.path.join(src, "ctrls")
views = os.path.join(src, "views")
else: models = ctrls = views = src
res = self.__generate_tree(top, src, resources, models, ctrls, views, utils)
res = self.__generate_classes(models, ctrls, views) or res
res = self.__mksrc(os.path.join(utils, "globals.py"), templates.glob) or res
if self.complex: self.templ.update({'model_import' : "from models.application import ApplModel",
'ctrl_import' : "from ctrls.application import ApplCtrl",
'view_import' : "from views.application import ApplView"})
else: self.templ.update({'model_import' : "from ApplModel import ApplModel",
'ctrl_import' : "from ApplCtrl import ApplCtrl",
'view_import' : "from ApplView import ApplView"})
res = self.__mksrc(os.path.join(top, "%s.py" % self.name), templates.main) or res
# builder file
if self.builder:
res = self.__generate_builder(resources) or res
if self.dist_gtkmvc3: res = self.__copy_framework(os.path.join(resources, "external")) or res
if not res: _log("No actions were taken")
else: _log("Done")
return res | Generate the whole project. Returns True if at least one
file has been generated, False otherwise. |
def ext_pillar(minion_id, pillar, *args, **kwargs):
'''
Query NetBox API for minion data
'''
if minion_id == '*':
log.info('There\'s no data to collect from NetBox for the Master')
return {}
# Pull settings from kwargs
api_url = kwargs['api_url'].rstrip('/')
api_token = kwargs.get('api_token')
site_details = kwargs.get('site_details', True)
site_prefixes = kwargs.get('site_prefixes', True)
proxy_username = kwargs.get('proxy_username', None)
proxy_return = kwargs.get('proxy_return', True)
ret = {}
# Fetch device from API
headers = {}
if api_token:
headers = {
'Authorization': 'Token {}'.format(api_token)
}
device_url = '{api_url}/{app}/{endpoint}'.format(api_url=api_url,
app='dcim',
endpoint='devices')
device_results = salt.utils.http.query(device_url,
params={'name': minion_id},
header_dict=headers,
decode=True)
# Check status code for API call
if 'error' in device_results:
log.error('API query failed for "%s", status code: %d',
minion_id, device_results['status'])
log.error(device_results['error'])
return ret
# Assign results from API call to "netbox" key
devices = device_results['dict']['results']
if len(devices) == 1:
ret['netbox'] = devices[0]
elif len(devices) > 1:
log.error('More than one device found for "%s"', minion_id)
return ret
else:
log.error('Unable to pull NetBox data for "%s"', minion_id)
return ret
site_id = ret['netbox']['site']['id']
site_name = ret['netbox']['site']['name']
if site_details:
log.debug('Retrieving site details for "%s" - site %s (ID %d)',
minion_id, site_name, site_id)
site_url = '{api_url}/{app}/{endpoint}/{site_id}/'.format(api_url=api_url,
app='dcim',
endpoint='sites',
site_id=site_id)
site_details_ret = salt.utils.http.query(site_url,
header_dict=headers,
decode=True)
if 'error' in site_details_ret:
log.error('Unable to retrieve site details for %s (ID %d)',
site_name, site_id)
log.error('Status code: %d, error: %s',
site_details_ret['status'],
site_details_ret['error'])
else:
ret['netbox']['site'] = site_details_ret['dict']
if site_prefixes:
log.debug('Retrieving site prefixes for "%s" - site %s (ID %d)',
minion_id, site_name, site_id)
prefixes_url = '{api_url}/{app}/{endpoint}'.format(api_url=api_url,
app='ipam',
endpoint='prefixes')
site_prefixes_ret = salt.utils.http.query(prefixes_url,
params={'site_id': site_id},
header_dict=headers,
decode=True)
if 'error' in site_prefixes_ret:
log.error('Unable to retrieve site prefixes for %s (ID %d)',
site_name, site_id)
log.error('Status code: %d, error: %s',
site_prefixes_ret['status'],
site_prefixes_ret['error'])
else:
ret['netbox']['site']['prefixes'] = site_prefixes_ret['dict']['results']
if proxy_return:
# Attempt to add "proxy" key, based on platform API call
try:
# Fetch device from API
platform_results = salt.utils.http.query(ret['netbox']['platform']['url'],
header_dict=headers,
decode=True)
# Check status code for API call
if 'error' in platform_results:
log.info('API query failed for "%s": %s',
minion_id, platform_results['error'])
# Assign results from API call to "proxy" key if the platform has a
# napalm_driver defined.
napalm_driver = platform_results['dict'].get('napalm_driver')
if napalm_driver:
ret['proxy'] = {
'host': str(ipaddress.IPv4Interface(
ret['netbox']['primary_ip4']['address']).ip),
'driver': napalm_driver,
'proxytype': 'napalm',
}
if proxy_username:
ret['proxy']['username'] = proxy_username
except Exception:
log.debug(
'Could not create proxy config data for "%s"', minion_id)
return ret | Query NetBox API for minion data |
def deserialize(self, data):
'''
Deserialize the data based on request content type headers
'''
ct_in_map = {
'application/x-www-form-urlencoded': self._form_loader,
'application/json': salt.utils.json.loads,
'application/x-yaml': salt.utils.yaml.safe_load,
'text/yaml': salt.utils.yaml.safe_load,
# because people are terrible and don't mean what they say
'text/plain': salt.utils.json.loads
}
try:
# Use cgi.parse_header to correctly separate parameters from value
value, parameters = cgi.parse_header(self.request.headers['Content-Type'])
return ct_in_map[value](tornado.escape.native_str(data))
except KeyError:
self.send_error(406)
except ValueError:
self.send_error(400) | Deserialize the data based on request content type headers |
def createHeaderMenu(self, index):
"""
Creates a new header menu to be displayed.
:return <QtGui.QMenu>
"""
menu = QtGui.QMenu(self)
act = menu.addAction("Hide '%s'" % self.columnOf(index))
act.triggered.connect( self.headerHideColumn )
menu.addSeparator()
act = menu.addAction('Sort Ascending')
act.setIcon(QtGui.QIcon(resources.find('img/sort_ascending.png')))
act.triggered.connect( self.headerSortAscending )
act = menu.addAction('Sort Descending')
act.setIcon(QtGui.QIcon(resources.find('img/sort_descending.png')))
act.triggered.connect( self.headerSortDescending )
act = menu.addAction('Resize to Contents')
act.setIcon(QtGui.QIcon(resources.find('img/treeview/fit.png')))
act.triggered.connect( self.resizeToContents )
menu.addSeparator()
colmenu = menu.addMenu( 'Show/Hide Columns' )
colmenu.setIcon(QtGui.QIcon(resources.find('img/columns.png')))
colmenu.addAction('Show All')
colmenu.addAction('Hide All')
colmenu.addSeparator()
hitem = self.headerItem()
columns = self.columns()
for column in sorted(columns):
col = self.column(column)
action = colmenu.addAction(column)
action.setCheckable(True)
action.setChecked(not self.isColumnHidden(col))
colmenu.triggered.connect( self.toggleColumnByAction )
menu.addSeparator()
exporters = self.exporters()
if exporters:
submenu = menu.addMenu('Export as')
submenu.setIcon(QtGui.QIcon(resources.find('img/export.png')))
for exporter in exporters:
act = submenu.addAction(exporter.name())
act.setData(wrapVariant(exporter.filetype()))
submenu.triggered.connect(self.exportAs)
return menu | Creates a new header menu to be displayed.
:return <QtGui.QMenu> |
def l2traceroute_result_input_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute_result = ET.Element("l2traceroute_result")
config = l2traceroute_result
input = ET.SubElement(l2traceroute_result, "input")
session_id = ET.SubElement(input, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def parse(self, filename=None, file=None, debuglevel=0):
""" Parse file.
kwargs:
filename (str): File to parse
debuglevel (int): Parser debuglevel
"""
self.scope.push()
if not file:
# We use a path.
file = filename
else:
# We use a stream and try to extract the name from the stream.
if hasattr(file, 'name'):
if filename is not None:
raise AssertionError(
'names of file and filename are in conflict')
filename = file.name
else:
filename = '(stream)'
self.target = filename
if self.verbose and not self.fail_with_exc:
print('Compiling target: %s' % filename, file=sys.stderr)
self.result = self.parser.parse(file, lexer=self.lex, debug=debuglevel)
self.post_parse()
self.register.close() | Parse file.
kwargs:
filename (str): File to parse
debuglevel (int): Parser debuglevel |
def undo(self):
''' Undo the last action. '''
if self.canundo():
undoable = self._undos.pop()
with self._pausereceiver():
try:
undoable.undo()
except:
self.clear()
raise
else:
self._redos.append(undoable)
self.undocallback() | Undo the last action. |
def _qteRunQueuedMacro(self, macroName: str,
widgetObj: QtGui.QWidget=None,
keysequence: QtmacsKeysequence=None):
"""
Execute the next macro in the macro queue.
This method is triggered by the ``timerEvent`` in conjunction
with the focus manager to ensure the event loop updates the
GUI in between any two macros.
.. warning:: Never call this method directly.
|Args|
* ``macroName`` (**str**): name of macro
* ``widgetObj`` (**QWidget**): widget (if any) for which the
macro applies
* ``keysequence* (**QtmacsKeysequence**): key sequence that
triggered the macro.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Fetch the applet holding the widget (this may be None).
app = qteGetAppletFromWidget(widgetObj)
# Double check that the applet still exists, unless there is
# no applet (can happen when the windows are empty).
if app is not None:
if sip.isdeleted(app):
msg = 'Ignored macro <b>{}</b> because it targeted a'
msg += ' nonexistent applet.'.format(macroName)
self.qteLogger.warning(msg)
return
# Fetch a signature compatible macro object.
macroObj = self.qteGetMacroObject(macroName, widgetObj)
# Log an error if no compatible macro was found.
if macroObj is None:
msg = 'No <b>{}</b>-macro compatible with {}:{}-type applet'
msg = msg.format(macroName, app.qteAppletSignature(),
widgetObj._qteAdmin.widgetSignature)
self.qteLogger.warning(msg)
return
# Update the 'last_key_sequence' variable in case the macros,
# or slots triggered by that macro, have access to it.
self.qteDefVar('last_key_sequence', keysequence,
doc="Last valid key sequence that triggered a macro.")
# Set some variables in the macro object for convenient access
# from inside the macro.
if app is None:
macroObj.qteApplet = macroObj.qteWidget = None
else:
macroObj.qteApplet = app
macroObj.qteWidget = widgetObj
# Run the macro and trigger the focus manager.
macroObj.qtePrepareToRun() | Execute the next macro in the macro queue.
This method is triggered by the ``timerEvent`` in conjunction
with the focus manager to ensure the event loop updates the
GUI in between any two macros.
.. warning:: Never call this method directly.
|Args|
* ``macroName`` (**str**): name of macro
* ``widgetObj`` (**QWidget**): widget (if any) for which the
macro applies
* ``keysequence* (**QtmacsKeysequence**): key sequence that
triggered the macro.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type. |
def _parse(fileobj):
"""Parse fileobj for a shebang."""
fileobj.seek(0)
try:
part = fileobj.read(2)
except UnicodeDecodeError:
part = ""
if part == "#!":
shebang = shlex.split(fileobj.readline().strip())
if (platform.system() == "Windows" and
len(shebang) and
os.path.basename(shebang[0]) == "env"):
return shebang[1:]
return shebang
return [] | Parse fileobj for a shebang. |
def get_face_mask(self, subdomain):
"""Get faces which are fully in subdomain.
"""
if subdomain is None:
# https://stackoverflow.com/a/42392791/353337
return numpy.s_[:]
if subdomain not in self.subdomains:
self._mark_vertices(subdomain)
# A face is inside if all its edges are in.
# An edge is inside if all its nodes are in.
is_in = self.subdomains[subdomain]["vertices"][self.idx_hierarchy]
# Take `all()` over all axes except the last two (face_ids, cell_ids).
n = len(is_in.shape)
is_inside = numpy.all(is_in, axis=tuple(range(n - 2)))
if subdomain.is_boundary_only:
# Filter for boundary
is_inside = is_inside & self.is_boundary_facet
return is_inside | Get faces which are fully in subdomain. |
def circlescan(x0, y0, r1, r2):
"""Scan pixels in a circle pattern around a center point
:param x0: Center x-coordinate
:type x0: float
:param y0: Center y-coordinate
:type y0: float
:param r1: Initial radius
:type r1: float
:param r2: Final radius
:type r2: float
:returns: Coordinate generator
:rtype: function
"""
# Validate inputs
if r1 < 0: raise ValueError("Initial radius must be non-negative")
if r2 < 0: raise ValueError("Final radius must be non-negative")
# List of pixels visited in previous diameter
previous = []
# Scan distances outward (1) or inward (-1)
rstep = 1 if r2 >= r1 else -1
for distance in range(r1, r2 + rstep, rstep):
if distance == 0:
yield x0, y0
else:
# Computes points for first octant and the rotate by multiples of
# 45 degrees to compute the other octants
a = 0.707107
rotations = {0: [[ 1, 0], [ 0, 1]],
1: [[ a, a], [-a, a]],
2: [[ 0, 1], [-1, 0]],
3: [[-a, a], [-a,-a]],
4: [[-1, 0], [ 0,-1]],
5: [[-a,-a], [ a,-a]],
6: [[ 0,-1], [ 1, 0]],
7: [[ a,-a], [ a, a]]}
nangles = len(rotations)
# List of pixels visited in current diameter
current = []
for angle in range(nangles):
x = 0
y = distance
d = 1 - distance
while x < y:
xr = rotations[angle][0][0]*x + rotations[angle][0][1]*y
yr = rotations[angle][1][0]*x + rotations[angle][1][1]*y
xr = x0 + xr
yr = y0 + yr
# First check if point was in previous diameter
# since our scan pattern can lead to duplicates in
# neighboring diameters
point = (int(round(xr)), int(round(yr)))
if point not in previous:
yield xr, yr
current.append(point)
# Move pixel according to circle constraint
if (d < 0):
d += 3 + 2 * x
else:
d += 5 - 2 * (y-x)
y -= 1
x += 1
previous = current | Scan pixels in a circle pattern around a center point
:param x0: Center x-coordinate
:type x0: float
:param y0: Center y-coordinate
:type y0: float
:param r1: Initial radius
:type r1: float
:param r2: Final radius
:type r2: float
:returns: Coordinate generator
:rtype: function |
def parseDockerAppliance(appliance):
"""
Takes string describing a docker image and returns the parsed
registry, image reference, and tag for that image.
Example: "quay.io/ucsc_cgl/toil:latest"
Should return: "quay.io", "ucsc_cgl/toil", "latest"
If a registry is not defined, the default is: "docker.io"
If a tag is not defined, the default is: "latest"
:param appliance: The full url of the docker image originally
specified by the user (or the default).
e.g. "quay.io/ucsc_cgl/toil:latest"
:return: registryName, imageName, tag
"""
appliance = appliance.lower()
# get the tag
if ':' in appliance:
tag = appliance.split(':')[-1]
appliance = appliance[:-(len(':' + tag))] # remove only the tag
else:
# default to 'latest' if no tag is specified
tag = 'latest'
# get the registry and image
registryName = 'docker.io' # default if not specified
imageName = appliance # will be true if not specified
if '/' in appliance and '.' in appliance.split('/')[0]:
registryName = appliance.split('/')[0]
imageName = appliance[len(registryName):]
registryName = registryName.strip('/')
imageName = imageName.strip('/')
return registryName, imageName, tag | Takes string describing a docker image and returns the parsed
registry, image reference, and tag for that image.
Example: "quay.io/ucsc_cgl/toil:latest"
Should return: "quay.io", "ucsc_cgl/toil", "latest"
If a registry is not defined, the default is: "docker.io"
If a tag is not defined, the default is: "latest"
:param appliance: The full url of the docker image originally
specified by the user (or the default).
e.g. "quay.io/ucsc_cgl/toil:latest"
:return: registryName, imageName, tag |
def prefetch_docker_image_on_private_agents(
image,
timeout=timedelta(minutes=5).total_seconds()):
""" Given a docker image. An app with the image is scale across the private
agents to ensure that the image is prefetched to all nodes.
:param image: docker image name
:type image: str
:param timeout: timeout for deployment wait in secs (default: 5m)
:type password: int
"""
agents = len(shakedown.get_private_agents())
app = {
"id": "/prefetch",
"instances": agents,
"container": {
"type": "DOCKER",
"docker": {"image": image}
},
"cpus": 0.1,
"mem": 128
}
client = marathon.create_client()
client.add_app(app)
shakedown.deployment_wait(timeout)
shakedown.delete_all_apps()
shakedown.deployment_wait(timeout) | Given a docker image. An app with the image is scale across the private
agents to ensure that the image is prefetched to all nodes.
:param image: docker image name
:type image: str
:param timeout: timeout for deployment wait in secs (default: 5m)
:type password: int |
def get_mode(device):
'''
Report whether the quota system for this device is on or off
CLI Example:
.. code-block:: bash
salt '*' quota.get_mode
'''
ret = {}
cmd = 'quotaon -p {0}'.format(device)
out = __salt__['cmd.run'](cmd, python_shell=False)
for line in out.splitlines():
comps = line.strip().split()
if comps[3] not in ret:
if comps[0].startswith('quotaon'):
if comps[1].startswith('Mountpoint'):
ret[comps[4]] = 'disabled'
continue
elif comps[1].startswith('Cannot'):
ret[device] = 'Not found'
return ret
continue
ret[comps[3]] = {
'device': comps[4].replace('(', '').replace(')', ''),
}
ret[comps[3]][comps[0]] = comps[6]
return ret | Report whether the quota system for this device is on or off
CLI Example:
.. code-block:: bash
salt '*' quota.get_mode |
def clearkml(self):
'''Clear the kmls from the map'''
#go through all the current layers and remove them
for layer in self.curlayers:
self.mpstate.map.remove_object(layer)
for layer in self.curtextlayers:
self.mpstate.map.remove_object(layer)
self.allayers = []
self.curlayers = []
self.alltextlayers = []
self.curtextlayers = []
self.menu_needs_refreshing = True | Clear the kmls from the map |
def get(self, key, default=None):
""" Get key value, return default if key doesn't exist """
if self.in_memory:
return self._memory_db.get(key, default)
else:
db = self._read_file()
return db.get(key, default) | Get key value, return default if key doesn't exist |
def ds_discrete(self, d_min=None, d_max=None, pts=20, limit=1e-9,
method='logarithmic'):
r'''Create a particle spacing mesh to perform calculations with,
according to one of several ways. The allowable meshes are
'linear', 'logarithmic', a geometric series specified by a Renard
number such as 'R10', or the meshes available in one of several sieve
standards.
Parameters
----------
d_min : float, optional
The minimum diameter at which the mesh starts, [m]
d_max : float, optional
The maximum diameter at which the mesh ends, [m]
pts : int, optional
The number of points to return for the mesh (note this is not
respected by sieve meshes), [-]
limit : float
If `d_min` or `d_max` is not specified, it will be calculated as the
`dn` at which this limit or 1-limit exists (this is ignored for
Renard numbers), [-]
method : str, optional
Either 'linear', 'logarithmic', a Renard number like 'R10' or 'R5'
or'R2.5', or one of the sieve standards 'ISO 3310-1 R40/3',
'ISO 3310-1 R20', 'ISO 3310-1 R20/3', 'ISO 3310-1',
'ISO 3310-1 R10', 'ASTM E11', [-]
Returns
-------
ds : list[float]
The generated mesh diameters, [m]
Notes
-----
Note that when specifying a Renard series, only one of `d_min` or `d_max` can
be respected! Provide only one of those numbers.
Note that when specifying a sieve standard the number of points is not
respected!
References
----------
.. [1] ASTM E11 - 17 - Standard Specification for Woven Wire Test Sieve
Cloth and Test Sieves.
.. [2] ISO 3310-1:2016 - Test Sieves -- Technical Requirements and Testing
-- Part 1: Test Sieves of Metal Wire Cloth.
'''
if method[0] not in ('R', 'r'):
if d_min is None:
d_min = self.dn(limit)
if d_max is None:
d_max = self.dn(1.0 - limit)
return psd_spacing(d_min=d_min, d_max=d_max, pts=pts, method=method) | r'''Create a particle spacing mesh to perform calculations with,
according to one of several ways. The allowable meshes are
'linear', 'logarithmic', a geometric series specified by a Renard
number such as 'R10', or the meshes available in one of several sieve
standards.
Parameters
----------
d_min : float, optional
The minimum diameter at which the mesh starts, [m]
d_max : float, optional
The maximum diameter at which the mesh ends, [m]
pts : int, optional
The number of points to return for the mesh (note this is not
respected by sieve meshes), [-]
limit : float
If `d_min` or `d_max` is not specified, it will be calculated as the
`dn` at which this limit or 1-limit exists (this is ignored for
Renard numbers), [-]
method : str, optional
Either 'linear', 'logarithmic', a Renard number like 'R10' or 'R5'
or'R2.5', or one of the sieve standards 'ISO 3310-1 R40/3',
'ISO 3310-1 R20', 'ISO 3310-1 R20/3', 'ISO 3310-1',
'ISO 3310-1 R10', 'ASTM E11', [-]
Returns
-------
ds : list[float]
The generated mesh diameters, [m]
Notes
-----
Note that when specifying a Renard series, only one of `d_min` or `d_max` can
be respected! Provide only one of those numbers.
Note that when specifying a sieve standard the number of points is not
respected!
References
----------
.. [1] ASTM E11 - 17 - Standard Specification for Woven Wire Test Sieve
Cloth and Test Sieves.
.. [2] ISO 3310-1:2016 - Test Sieves -- Technical Requirements and Testing
-- Part 1: Test Sieves of Metal Wire Cloth. |
def create_png(cls_name, meth_name, graph, dir_name='graphs2'):
"""
Creates a PNG from a given :class:`~androguard.decompiler.dad.graph.Graph`.
:param str cls_name: name of the class
:param str meth_name: name of the method
:param androguard.decompiler.dad.graph.Graph graph:
:param str dir_name: output directory
"""
m_name = ''.join(x for x in meth_name if x.isalnum())
name = ''.join((cls_name.split('/')[-1][:-1], '#', m_name))
graph.draw(name, dir_name) | Creates a PNG from a given :class:`~androguard.decompiler.dad.graph.Graph`.
:param str cls_name: name of the class
:param str meth_name: name of the method
:param androguard.decompiler.dad.graph.Graph graph:
:param str dir_name: output directory |
def heartbeat_encode(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version=2):
'''
The heartbeat message shows that a system is present and responding.
The type of the MAV and Autopilot hardware allow the
receiving system to treat further messages from this
system appropriate (e.g. by laying out the user
interface based on the autopilot).
type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t)
autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t)
base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t)
custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t)
system_status : System status flag, see MAV_STATE ENUM (uint8_t)
mavlink_version : MAVLink version (uint8_t)
'''
return MAVLink_heartbeat_message(type, autopilot, base_mode, custom_mode, system_status, mavlink_version) | The heartbeat message shows that a system is present and responding.
The type of the MAV and Autopilot hardware allow the
receiving system to treat further messages from this
system appropriate (e.g. by laying out the user
interface based on the autopilot).
type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t)
autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t)
base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t)
custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t)
system_status : System status flag, see MAV_STATE ENUM (uint8_t)
mavlink_version : MAVLink version (uint8_t) |
def place_items_in_square(items, t):
"""
Returns a list of rows that are stored as a priority queue to be
used with heapq functions.
>>> place_items_in_square([1,5,7], 4)
[(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])]
>>> place_items_in_square([1,5,7], 3)
[(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])]
"""
# A minheap (because that's all that heapq supports :/)
# of the length of each row. Why this is important is because
# we'll be popping the largest rows when figuring out row displacements.
# Each item is a tuple of (t - |row|, y, [(xpos_1, item_1), ...]).
# Until the call to heapq.heapify(), the rows are ordered in
# increasing row number (y).
rows = [(t, y, []) for y in range(t)]
for item in items:
# Calculate the cell the item should fall in.
x = item % t
y = item // t
# Push the item to its corresponding row...
inverse_length, _, row_contents = rows[y]
heapq.heappush(row_contents, (x, item))
# Ensure the heap key is kept intact.
rows[y] = inverse_length - 1, y, row_contents
assert all(inv_len == t - len(rows) for inv_len, _, rows in rows)
heapq.heapify(rows)
# Return only rows that are populated.
return [row for row in rows if row[2]] | Returns a list of rows that are stored as a priority queue to be
used with heapq functions.
>>> place_items_in_square([1,5,7], 4)
[(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])]
>>> place_items_in_square([1,5,7], 3)
[(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])] |
def get_and_set(self, value):
'''
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value = value
return oldval | Atomically sets the value to `value` and returns the old value.
:param value: The value to set. |
def build_arch(self, arch):
"""simple shared compile"""
env = self.get_recipe_env(arch, with_flags_in_cc=False)
for path in (
self.get_build_dir(arch.arch),
join(self.ctx.python_recipe.get_build_dir(arch.arch), 'Lib'),
join(self.ctx.python_recipe.get_build_dir(arch.arch), 'Include')):
if not exists(path):
info("creating {}".format(path))
shprint(sh.mkdir, '-p', path)
cli = env['CC'].split()[0]
# makes sure first CC command is the compiler rather than ccache, refs:
# https://github.com/kivy/python-for-android/issues/1398
if 'ccache' in cli:
cli = env['CC'].split()[1]
cc = sh.Command(cli)
with current_directory(self.get_build_dir(arch.arch)):
cflags = env['CFLAGS'].split()
cflags.extend(['-I.', '-c', '-l.', 'ifaddrs.c', '-I.'])
shprint(cc, *cflags, _env=env)
cflags = env['CFLAGS'].split()
cflags.extend(['-shared', '-I.', 'ifaddrs.o', '-o', 'libifaddrs.so'])
cflags.extend(env['LDFLAGS'].split())
shprint(cc, *cflags, _env=env)
shprint(sh.cp, 'libifaddrs.so', self.ctx.get_libs_dir(arch.arch)) | simple shared compile |
def attributes_diagram(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Forecast Probability",
ylabel="Observed Relative Frequency", ticks=np.arange(0, 1.05, 0.05), dpi=300,
title="Attributes Diagram", legend_params=None, inset_params=None,
inset_position=(0.12, 0.72, 0.25, 0.25), bootstrap_sets=None, ci=(2.5, 97.5)):
"""
Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their
observed relative frequency. Also adds gray areas to show where the climatological probabilities lie and what
areas result in a positive Brier Skill Score.
Args:
rel_objs (list): List of DistributedReliability objects.
obj_labels (list): List of labels describing the forecast model associated with each curve.
colors (list): List of colors for each line
markers (list): List of line markers
filename (str): Where to save the figure.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): X-axis label
ylabel (str): Y-axis label
ticks (array): Tick value labels for the x and y axes.
dpi (int): resolution of the saved figure in dots per inch.
title (str): Title of figure
legend_params (dict): Keyword arguments for the plot legend.
inset_params (dict): Keyword arguments for the inset axis.
inset_position (tuple): Position of the inset axis in normalized axes coordinates (left, bottom, width, height)
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles
"""
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
if inset_params is None:
inset_params = dict(width="25%", height="25%", loc=2, axes_kwargs=dict(axisbg='white'))
fig, ax = plt.subplots(figsize=figsize)
plt.plot(ticks, ticks, "k--")
inset_hist = inset_axes(ax, **inset_params)
ip = InsetPosition(ax, inset_position)
inset_hist.set_axes_locator(ip)
climo = rel_objs[0].climatology()
no_skill = 0.5 * ticks + 0.5 * climo
skill_x = [climo, climo, 1, 1, climo, climo, 0, 0, climo]
skill_y = [climo, 1, 1, no_skill[-1], climo, 0, 0, no_skill[0], climo]
f = ax.fill(skill_x, skill_y, "0.8")
f[0].set_zorder(1)
ax.plot(ticks, np.ones(ticks.shape) * climo, "k--")
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
brel_curves = np.vstack([b_rel.reliability_curve()["Positive_Relative_Freq"].values for b_rel in b_set])
rel_range = np.nanpercentile(brel_curves, ci, axis=0)
fb = ax.fill_between(b_rel.thresholds[:-1], rel_range[1], rel_range[0], alpha=0.5, color=colors[b])
fb.set_zorder(2)
for r, rel_obj in enumerate(rel_objs):
rel_curve = rel_obj.reliability_curve()
ax.plot(rel_curve["Bin_Start"], rel_curve["Positive_Relative_Freq"], color=colors[r],
marker=markers[r], label=obj_labels[r])
inset_hist.semilogy(rel_curve["Bin_Start"] * 100, rel_obj.frequencies["Total_Freq"][:-1], color=colors[r],
marker=markers[r])
inset_hist.set_xlabel("Forecast Probability")
inset_hist.set_ylabel("Frequency")
ax.annotate("No Skill", (0.6, no_skill[12]), rotation=22.5)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticks(ticks)
ax.set_xticklabels((ticks * 100).astype(int))
ax.set_yticks(ticks)
ax.set_yticklabels((ticks * 100).astype(int))
ax.legend(**legend_params)
ax.set_title(title)
plt.savefig(filename, dpi=dpi, bbox_inches="tight")
plt.close() | Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their
observed relative frequency. Also adds gray areas to show where the climatological probabilities lie and what
areas result in a positive Brier Skill Score.
Args:
rel_objs (list): List of DistributedReliability objects.
obj_labels (list): List of labels describing the forecast model associated with each curve.
colors (list): List of colors for each line
markers (list): List of line markers
filename (str): Where to save the figure.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): X-axis label
ylabel (str): Y-axis label
ticks (array): Tick value labels for the x and y axes.
dpi (int): resolution of the saved figure in dots per inch.
title (str): Title of figure
legend_params (dict): Keyword arguments for the plot legend.
inset_params (dict): Keyword arguments for the inset axis.
inset_position (tuple): Position of the inset axis in normalized axes coordinates (left, bottom, width, height)
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles |
def rolling_count(self, window_start, window_end):
"""
Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is executed on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1]
"""
agg_op = '__builtin__nonnull__count__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, 0)) | Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is executed on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1] |
def derive_and_set_name_fields_and_slug(
self, set_name_sort=True, set_slug=True
):
"""
Override this method from `CreatorBase` to handle additional name
fields for Person creators.
This method is called during `save()`
"""
super(PersonCreator, self).derive_and_set_name_fields_and_slug(
set_name_sort=False, set_slug=False)
# Collect person name fields, but only if they are not empty
person_names = [
name for name in [self.name_family, self.name_given]
if not is_empty(name)
]
# if empty, set `name_sort` = '{name_family}, {name_given}' if these
# person name values are available otherwise `name_full`
if set_name_sort and is_empty(self.name_sort):
if person_names:
self.name_sort = ', '.join(person_names)
else:
self.name_sort = self.name_full
# if empty, set `slug` to slugified '{name_family} {name_given}' if
# these person name values are available otherwise slugified
# `name_full`
if set_slug and is_empty(self.slug):
if person_names:
self.slug = slugify(' '.join(person_names))
else:
self.slug = slugify(self.name_full) | Override this method from `CreatorBase` to handle additional name
fields for Person creators.
This method is called during `save()` |
def _inverse_i(self, y, i):
"""return inverse of y in component i"""
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
if 1 < 3:
if not lb <= y <= ub:
raise ValueError('argument of inverse must be within the given bounds')
if y < lb + al:
return (lb - al) + 2 * (al * (y - lb))**0.5
elif y < ub - au:
return y
else:
return (ub + au) - 2 * (au * (ub - y))**0.5 | return inverse of y in component i |
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname | Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel. |
def fetch_html(self, msg_nums):
"""
Given a message number that we found with imap_search,
get the text/html content.
@Params
msg_nums - message number to get html message for
@Returns
HTML content of message matched by message number
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
return self.__imap_fetch_content_type(msg_nums, self.HTML) | Given a message number that we found with imap_search,
get the text/html content.
@Params
msg_nums - message number to get html message for
@Returns
HTML content of message matched by message number |
def main():
"""d"""
config = Common.open_file(F_CONFIG)
# Delete the complete .build folder
Common.clean_build(config['p_build'])
# Create main folders
Common.make_dir(config['p_build'])
for language in config['languages']:
Common.make_dir(config['p_build'] + language) | d |
def splitread(args):
"""
%prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle.
"""
p = OptionParser(splitread.__doc__)
p.add_option("-n", dest="n", default=76, type="int",
help="Split at N-th base position [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement second read [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
base = op.basename(pairsfastq).split(".")[0]
fq1 = base + ".1.fastq"
fq2 = base + ".2.fastq"
fw1 = must_open(fq1, "w")
fw2 = must_open(fq2, "w")
fp = must_open(pairsfastq)
n = opts.n
minsize = n * 8 / 5
for name, seq, qual in FastqGeneralIterator(fp):
if len(seq) < minsize:
logging.error("Skipping read {0}, length={1}".format(name, len(seq)))
continue
name = "@" + name
rec1 = FastqLite(name, seq[:n], qual[:n])
rec2 = FastqLite(name, seq[n:], qual[n:])
if opts.rc:
rec2.rc()
print(rec1, file=fw1)
print(rec2, file=fw2)
logging.debug("Reads split into `{0},{1}`".format(fq1, fq2))
fw1.close()
fw2.close() | %prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle. |
def mmap(func, iterable):
"""Wrapper to make map() behave the same on Py2 and Py3."""
if sys.version_info[0] > 2:
return [i for i in map(func, iterable)]
else:
return map(func, iterable) | Wrapper to make map() behave the same on Py2 and Py3. |
def nn_getsockopt(socket, level, option, value):
"""retrieve a socket option
socket - socket number
level - option level
option - option
value - a writable byte buffer (e.g. a bytearray) which the option value
will be copied to
returns - number of bytes copied or on error nunber < 0
"""
if memoryview(value).readonly:
raise TypeError('Writable buffer is required')
size_t_size = ctypes.c_size_t(len(value))
rtn = _nn_getsockopt(socket, level, option, ctypes.addressof(value),
ctypes.byref(size_t_size))
return (rtn, size_t_size.value) | retrieve a socket option
socket - socket number
level - option level
option - option
value - a writable byte buffer (e.g. a bytearray) which the option value
will be copied to
returns - number of bytes copied or on error nunber < 0 |
def _start_update_server(auth_token):
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server | Start a TCP server to receive accumulator updates in a daemon thread, and returns it |
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str) | Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask |
def _normalize_stmt_idx(self, block_addr, stmt_idx):
"""
For each statement ID, convert 'default' to (last_stmt_idx+1)
:param block_addr: The block address.
:param stmt_idx: Statement ID.
:returns: New statement ID.
"""
if type(stmt_idx) is int:
return stmt_idx
if stmt_idx == DEFAULT_STATEMENT:
vex_block = self.project.factory.block(block_addr).vex
return len(vex_block.statements)
raise AngrBackwardSlicingError('Unsupported statement ID "%s"' % stmt_idx) | For each statement ID, convert 'default' to (last_stmt_idx+1)
:param block_addr: The block address.
:param stmt_idx: Statement ID.
:returns: New statement ID. |
def build_data(self):
"""
get build data.
:return: build data or None if not found
"""
# pylint: disable=len-as-condition
if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None):
return self.dutinformation.get(0).build.get_data()
return None | get build data.
:return: build data or None if not found |
def _set_batch(self, batch, fg, bg, bgblend=1, nullChar=False):
"""
Try to perform a batch operation otherwise fall back to _set_char.
If fg and bg are defined then this is faster but not by very
much.
if any character is None then nullChar is True
batch is a iterable of [(x, y), ch] items
"""
for (x, y), char in batch:
self._set_char(x, y, char, fg, bg, bgblend) | Try to perform a batch operation otherwise fall back to _set_char.
If fg and bg are defined then this is faster but not by very
much.
if any character is None then nullChar is True
batch is a iterable of [(x, y), ch] items |
def approve(
self,
allowed_address: Address,
allowance: TokenAmount,
):
""" Aprove `allowed_address` to transfer up to `deposit` amount of token.
Note:
For channel deposit please use the channel proxy, since it does
additional validations.
"""
# Note that given_block_identifier is not used here as there
# are no preconditions to check before sending the transaction
log_details = {
'node': pex(self.node_address),
'contract': pex(self.address),
'allowed_address': pex(allowed_address),
'allowance': allowance,
}
checking_block = self.client.get_checking_block()
error_prefix = 'Call to approve will fail'
gas_limit = self.proxy.estimate_gas(
checking_block,
'approve',
to_checksum_address(allowed_address),
allowance,
)
if gas_limit:
error_prefix = 'Call to approve failed'
log.debug('approve called', **log_details)
transaction_hash = self.proxy.transact(
'approve',
safe_gas_limit(gas_limit),
to_checksum_address(allowed_address),
allowance,
)
self.client.poll(transaction_hash)
receipt_or_none = check_transaction_threw(self.client, transaction_hash)
transaction_executed = gas_limit is not None
if not transaction_executed or receipt_or_none:
if transaction_executed:
block = receipt_or_none['blockNumber']
else:
block = checking_block
self.proxy.jsonrpc_client.check_for_insufficient_eth(
transaction_name='approve',
transaction_executed=transaction_executed,
required_gas=GAS_REQUIRED_FOR_APPROVE,
block_identifier=block,
)
msg = self._check_why_approved_failed(allowance, block)
error_msg = f'{error_prefix}. {msg}'
log.critical(error_msg, **log_details)
raise RaidenUnrecoverableError(error_msg)
log.info('approve successful', **log_details) | Aprove `allowed_address` to transfer up to `deposit` amount of token.
Note:
For channel deposit please use the channel proxy, since it does
additional validations. |
def verify_signature(message_path: str,
sigfile_path: str,
cert_path: str) -> None:
"""
Verify the signature (assumed, of the hash file)
It is assumed that the public key for the signature is in the keyring
:param message_path: The path to the message file to check
:param sigfile_path: The path to the signature to check
:param cert_path: The path to the certificate to check the signature with
:returns True: If the signature verifies
:raises SignatureMismatch: If the signature does not verify
"""
with tempfile.TemporaryDirectory() as pubkey_dir:
pubkey_contents = subprocess.check_output(
['openssl', 'x509', '-in', cert_path,
'-pubkey', '-noout'])
pubkey_file = os.path.join(pubkey_dir, 'pubkey')
open(pubkey_file, 'wb').write(pubkey_contents)
try:
verification = subprocess.check_output(
['openssl', 'dgst', '-sha256', '-verify', pubkey_file,
'-signature', sigfile_path, message_path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as cpe:
verification = cpe.output
if verification.strip() == b'Verified OK':
LOG.info(f"Verification passed from cert {cert_path}")
else:
LOG.error(
f"Verification failed with cert {cert_path}: {verification}")
raise SignatureMismatch(f'Signature check failed') | Verify the signature (assumed, of the hash file)
It is assumed that the public key for the signature is in the keyring
:param message_path: The path to the message file to check
:param sigfile_path: The path to the signature to check
:param cert_path: The path to the certificate to check the signature with
:returns True: If the signature verifies
:raises SignatureMismatch: If the signature does not verify |
def get_context(pid_file, daemon=False):
"""Get context of running notebook.
A context file is created when notebook starts.
:param daemon: Are we trying to fetch the context inside the daemon. Otherwise do the death check.
:return: dict or None if the process is dead/not launcherd
"""
port_file = get_context_file_name(pid_file)
if not os.path.exists(port_file):
return None
with open(port_file, "rt") as f:
json_data = f.read()
try:
data = json.loads(json_data)
except ValueError as e:
logger.error("Damaged context json data %s", json_data)
return None
if not daemon:
pid = data.get("pid")
if pid and not check_pid(int(pid)):
# The Notebook daemon has exited uncleanly, as the PID does not point to any valid process
return None
return data | Get context of running notebook.
A context file is created when notebook starts.
:param daemon: Are we trying to fetch the context inside the daemon. Otherwise do the death check.
:return: dict or None if the process is dead/not launcherd |
def plot_spectra(self, nmax, convention='power', unit='per_l', base=10.,
maxcolumns=3, xscale='lin', yscale='log', grid=True,
xlim=(None, None), ylim=(None, None), show=True,
title=True, axes_labelsize=None, tick_labelsize=None,
title_labelsize=None, ax=None, fname=None):
"""
Plot the spectra of the best-concentrated Slepian functions.
Usage
-----
x.plot_spectra(nmax, [convention, unit, base, maxcolumns, xscale,
yscale, grid, xlim, ylim, show, title,
axes_labelsize, tick_labelsize, title_labelsize,
ax, fname])
Parameters
----------
nmax : int
The number of Slepian functions to plot.
convention : str, optional, default = 'power'
The type of spectra to plot: 'power' for power spectrum, and
'energy' for energy spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
maxcolumns : int, optional, default = 3
The maximum number of columns to use when plotting the spectra
of multiple localization windows.
xscale : str, optional, default = 'lin'
Scale of the x axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'log'
Scale of the y axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
xlim : tuple, optional, default = (None, None)
The upper and lower limits used for the x axis.
ylim : tuple, optional, default = (None, None)
The lower and upper limits used for the y axis.
show : bool, optional, default = True
If True, plot the image to the screen.
title : bool, optional, default = True
If True, plot a legend on top of each subplot providing the taper
number and 1 minus the concentration factor.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
title_labelsize : int, optional, default = None
The font size for the subplot titles.
ax : matplotlib axes object, optional, default = None
An array of matplotlib axes objects where the plots will appear.
fname : str, optional, default = None
If present, save the image to the file.
"""
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if title_labelsize is None:
title_labelsize = _mpl.rcParams['axes.titlesize']
degrees = self.degrees()
spectrum = self.spectra(nmax=nmax, convention=convention, unit=unit,
base=base)
ncolumns = min(maxcolumns, nmax)
nrows = _np.ceil(nmax / ncolumns).astype(int)
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0]
* 0.7 * nrows / ncolumns + 0.41)
if ax is None:
fig, axes = _plt.subplots(nrows, ncolumns, figsize=figsize,
sharex='all', sharey='all')
else:
if hasattr(ax, 'flatten') and ax.size < nmax:
raise ValueError('ax.size must be greater or equal to nmax. ' +
'nmax = {:s}'.format(repr(nmax)) +
' and ax.size = {:s}.'.format(repr(ax.size)))
axes = ax
if ax is None:
if nrows > 1:
for axtemp in axes[:-1, :].flatten():
for xlabel_i in axtemp.get_xticklabels():
xlabel_i.set_visible(False)
axtemp.set_xlabel('', visible=False)
for axtemp in axes[:, 1:].flatten():
for ylabel_i in axtemp.get_yticklabels():
ylabel_i.set_visible(False)
axtemp.set_ylabel('', visible=False)
elif nmax > 1:
for axtemp in axes[1:].flatten():
for ylabel_i in axtemp.get_yticklabels():
ylabel_i.set_visible(False)
axtemp.set_ylabel('', visible=False)
if ylim == (None, None):
upper = spectrum[:, :min(self.nmax, nmax)].max()
lower = upper * 1.e-6
ylim = (lower, 5 * upper)
if xlim == (None, None):
if xscale == 'lin':
xlim = (degrees[0], degrees[-1])
for alpha in range(min(self.nmax, nmax)):
evalue = self.eigenvalues[alpha]
if min(self.nmax, nmax) == 1 and ax is None:
axtemp = axes
elif hasattr(axes, 'flatten'):
axtemp = axes.flatten()[alpha]
else:
axtemp = axes[alpha]
if (convention == 'power'):
axtemp.set_ylabel('Power', fontsize=axes_labelsize)
else:
axtemp.set_ylabel('Energy', fontsize=axes_labelsize)
if yscale == 'log':
axtemp.set_yscale('log', basey=base)
if xscale == 'log':
axtemp.set_xscale('log', basex=base)
axtemp.plot(degrees[1:], spectrum[1:, alpha],
label='#{:d} [loss={:2.2g}]'
.format(alpha, 1-evalue))
else:
axtemp.plot(degrees[0:], spectrum[0:, alpha],
label='#{:d} [loss={:2.2g}]'
.format(alpha, 1-evalue))
axtemp.set_xlabel('Spherical harmonic degree',
fontsize=axes_labelsize)
axtemp.set(xlim=xlim, ylim=ylim)
axtemp.minorticks_on()
axtemp.grid(grid, which='major')
axtemp.tick_params(labelsize=tick_labelsize)
if title is True:
axtemp.set_title('#{:d} [loss={:2.2g}]'
.format(alpha, 1-evalue),
fontsize=title_labelsize)
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes | Plot the spectra of the best-concentrated Slepian functions.
Usage
-----
x.plot_spectra(nmax, [convention, unit, base, maxcolumns, xscale,
yscale, grid, xlim, ylim, show, title,
axes_labelsize, tick_labelsize, title_labelsize,
ax, fname])
Parameters
----------
nmax : int
The number of Slepian functions to plot.
convention : str, optional, default = 'power'
The type of spectra to plot: 'power' for power spectrum, and
'energy' for energy spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
maxcolumns : int, optional, default = 3
The maximum number of columns to use when plotting the spectra
of multiple localization windows.
xscale : str, optional, default = 'lin'
Scale of the x axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'log'
Scale of the y axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
xlim : tuple, optional, default = (None, None)
The upper and lower limits used for the x axis.
ylim : tuple, optional, default = (None, None)
The lower and upper limits used for the y axis.
show : bool, optional, default = True
If True, plot the image to the screen.
title : bool, optional, default = True
If True, plot a legend on top of each subplot providing the taper
number and 1 minus the concentration factor.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
title_labelsize : int, optional, default = None
The font size for the subplot titles.
ax : matplotlib axes object, optional, default = None
An array of matplotlib axes objects where the plots will appear.
fname : str, optional, default = None
If present, save the image to the file. |
def _get_ssh_public_key(self):
"""Generate SSH public key from private key."""
key = ipa_utils.generate_public_ssh_key(self.ssh_private_key_file)
return '{user}:{key} {user}'.format(
user=self.ssh_user,
key=key.decode()
) | Generate SSH public key from private key. |
def allocate(self, dut_configuration_list, args=None):
"""
Allocates resources from available local devices.
:param dut_configuration_list: List of ResourceRequirements objects
:param args: Not used
:return: AllocationContextList with allocated resources
"""
dut_config_list = dut_configuration_list.get_dut_configuration()
# if we need one or more local hardware duts let's search attached
# devices using DutDetection
if not isinstance(dut_config_list, list):
raise AllocationError("Invalid dut configuration format!")
if next((item for item in dut_config_list if item.get("type") == "hardware"), False):
self._available_devices = DutDetection().get_available_devices()
if len(self._available_devices) < len(dut_config_list):
raise AllocationError("Required amount of devices not available.")
# Enumerate all required DUT's
try:
for dut_config in dut_config_list:
if not self.can_allocate(dut_config.get_requirements()):
raise AllocationError("Resource type is not supported")
self._allocate(dut_config)
except AllocationError:
# Locally allocated don't need to be released any way for
# now, so just re-raise the error
raise
alloc_list = AllocationContextList()
res_id = None
for conf in dut_config_list:
if conf.get("type") == "mbed":
res_id = conf.get("allocated").get("target_id")
context = AllocationContext(resource_id=res_id, alloc_data=conf)
alloc_list.append(context)
alloc_list.set_dut_init_function("serial", init_generic_serial_dut)
alloc_list.set_dut_init_function("process", init_process_dut)
alloc_list.set_dut_init_function("mbed", init_mbed_dut)
return alloc_list | Allocates resources from available local devices.
:param dut_configuration_list: List of ResourceRequirements objects
:param args: Not used
:return: AllocationContextList with allocated resources |
def serve(path=None, host=None, port=None, user_content=False, context=None,
username=None, password=None, render_offline=False,
render_wide=False, render_inline=False, api_url=None, title=None,
autorefresh=True, browser=False, quiet=None, grip_class=None):
"""
Starts a server to render the specified file or directory containing
a README.
"""
app = create_app(path, user_content, context, username, password,
render_offline, render_wide, render_inline, api_url,
title, None, autorefresh, quiet, grip_class)
app.run(host, port, open_browser=browser) | Starts a server to render the specified file or directory containing
a README. |
def quick_plot(cmap, fname=None, fig=None, ax=None, N=10):
'''Show quick test of a colormap.
'''
x = np.linspace(0, 10, N)
X, _ = np.meshgrid(x, x)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
mappable = ax.pcolor(X, cmap=cmap)
ax.set_title(cmap.name, fontsize=14)
ax.set_xticks([])
ax.set_yticks([])
plt.colorbar(mappable)
plt.show()
if fname is not None:
plt.savefig(fname + '.png', bbox_inches='tight') | Show quick test of a colormap. |
def get_realms_by_explosion(self, realms):
"""Get all members of this realm including members of sub-realms on multi-levels
:param realms: realms list, used to look for a specific one
:type realms: alignak.objects.realm.Realms
:return: list of members and add realm to realm_members attribute
:rtype: list
"""
# If rec_tag is already set, then we detected a loop in the realms hierarchy!
if getattr(self, 'rec_tag', False):
self.add_error("Error: there is a loop in the realm definition %s" % self.get_name())
return None
# Ok, not in a loop, we tag the realm and parse its members
self.rec_tag = True
# Order realm members list by name
self.realm_members = sorted(self.realm_members)
for member in self.realm_members:
realm = realms.find_by_name(member)
if not realm:
self.add_unknown_members(member)
continue
children = realm.get_realms_by_explosion(realms)
if children is None:
# We got a loop in our children definition
self.all_sub_members = []
self.realm_members = []
return None
# Return the list of all unique members
return self.all_sub_members | Get all members of this realm including members of sub-realms on multi-levels
:param realms: realms list, used to look for a specific one
:type realms: alignak.objects.realm.Realms
:return: list of members and add realm to realm_members attribute
:rtype: list |
def reset(self):
"Close the current failed connection and prepare for a new one"
log.info("resetting client")
rpc_client = self._rpc_client
self._addrs.append(self._peer.addr)
self.__init__(self._addrs)
self._rpc_client = rpc_client
self._dispatcher.rpc_client = rpc_client
rpc_client._client = weakref.ref(self) | Close the current failed connection and prepare for a new one |
def atlas_zonefile_push_dequeue( zonefile_queue=None ):
"""
Dequeue a zonefile's information to replicate
Return None if there are none queued
"""
ret = None
with AtlasZonefileQueueLocked(zonefile_queue) as zfq:
if len(zfq) > 0:
ret = zfq.pop(0)
return ret | Dequeue a zonefile's information to replicate
Return None if there are none queued |
def generate_datafile_old(number_items=1000):
"""
Create the samples.py file
"""
from utils import get_names, generate_dataset
from pprint import pprint
filename = "samples.py"
dataset = generate_dataset(number_items)
fo = open(filename, "wb")
fo.write("#!/usr/bin/env python\n")
fo.write("# -*- coding: utf-8 -*-\n")
fo.write("#Brainaetic: http://www.thenetplanet.com\n\n")
fo.write("SAMPLES = ")
pprint(dataset, fo)
fo.close()
print "%s generated with %d samples" % (filename, number_items) | Create the samples.py file |
def are_dicts_equivalent(*args, **kwargs):
"""Indicate if :ref:`dicts <python:dict>` passed to this function have identical
keys and values.
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` have identical keys/values, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
# pylint: disable=too-many-return-statements
if not args:
return False
if len(args) == 1:
return True
if not all(is_dict(x) for x in args):
return False
first_item = args[0]
for item in args[1:]:
if len(item) != len(first_item):
return False
for key in item:
if key not in first_item:
return False
if not are_equivalent(item[key], first_item[key]):
return False
for key in first_item:
if key not in item:
return False
if not are_equivalent(first_item[key], item[key]):
return False
return True | Indicate if :ref:`dicts <python:dict>` passed to this function have identical
keys and values.
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` have identical keys/values, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator |
def get_full_recirc_content(self, published=True):
"""performs es search and gets all content objects
"""
q = self.get_query()
search = custom_search_model(Content, q, published=published, field_map={
"feature_type": "feature_type.slug",
"tag": "tags.slug",
"content-type": "_type"
})
return search | performs es search and gets all content objects |
def main(self):
"""Parse package trees and report on any discrepancies."""
args = self.args
parsed_pytree, pypackages = self.parse_py_tree(pytree=args.pytree)
parsed_doctree = self.parse_doc_tree(doctree=args.doctree, pypackages=pypackages)
return self.compare_trees(parsed_pytree=parsed_pytree, parsed_doctree=parsed_doctree) | Parse package trees and report on any discrepancies. |
def gradient_black(
self, text=None, fore=None, back=None, style=None,
start=None, step=1, reverse=False,
linemode=True, movefactor=2, rgb_mode=False):
""" Return a black and white gradient.
Arguments:
text : String to colorize.
This will always be greater than 0.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting 256-color number.
The `start` will be adjusted if it is not within
bounds.
This will always be > 15.
This will be adjusted to fit within a 6-length
gradient, or the 24-length black/white gradient.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
linemode : Colorize each line in the input.
Default: True
movefactor : Factor for offset increase on each line when
using linemode.
Minimum value: 0
Default: 2
rgb_mode : Use true color (rgb) method and codes.
"""
gradargs = {
'step': step,
'fore': fore,
'back': back,
'style': style,
'reverse': reverse,
'rgb_mode': rgb_mode,
}
if linemode:
gradargs['movefactor'] = 2 if movefactor is None else movefactor
method = self._gradient_black_lines
else:
method = self._gradient_black_line
if text:
return self.__class__(
''.join((
self.data or '',
method(
text,
start or (255 if reverse else 232),
**gradargs)
))
)
# Operating on self.data.
return self.__class__(
method(
self.stripped(),
start or (255 if reverse else 232),
**gradargs)
) | Return a black and white gradient.
Arguments:
text : String to colorize.
This will always be greater than 0.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting 256-color number.
The `start` will be adjusted if it is not within
bounds.
This will always be > 15.
This will be adjusted to fit within a 6-length
gradient, or the 24-length black/white gradient.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
linemode : Colorize each line in the input.
Default: True
movefactor : Factor for offset increase on each line when
using linemode.
Minimum value: 0
Default: 2
rgb_mode : Use true color (rgb) method and codes. |
def MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc):
"""
Creates the function that calculates the potential given
the position (in volts) and the radius of the particle.
Parameters
----------
kBT_Gamma : float
Value of kB*T/Gamma
density : float
density of the nanoparticle
SpringPotnlFunc : function
Function which takes the value of position (in volts)
and returns the spring potential
Returns
-------
PotentialFunc : function
function that calculates the potential given
the position (in volts) and the radius of the
particle.
"""
def PotentialFunc(xdata, Radius):
"""
calculates the potential given the position (in volts)
and the radius of the particle.
Parameters
----------
xdata : ndarray
Positon data (in volts)
Radius : float
Radius in units of nm
Returns
-------
Potential : ndarray
Dynamical Spring Potential at positions given by xdata
"""
mass = ((4/3)*np.pi*((Radius*10**-9)**3))*density
yfit=(kBT_Gamma/mass)
Y = yfit*SpringPotnlFunc(xdata)
return Y
return PotentialFunc | Creates the function that calculates the potential given
the position (in volts) and the radius of the particle.
Parameters
----------
kBT_Gamma : float
Value of kB*T/Gamma
density : float
density of the nanoparticle
SpringPotnlFunc : function
Function which takes the value of position (in volts)
and returns the spring potential
Returns
-------
PotentialFunc : function
function that calculates the potential given
the position (in volts) and the radius of the
particle. |
def build_damage_dt(dstore, mean_std=True):
"""
:param dstore: a datastore instance
:param mean_std: a flag (default True)
:returns:
a composite dtype loss_type -> (mean_ds1, stdv_ds1, ...) or
loss_type -> (ds1, ds2, ...) depending on the flag mean_std
"""
oq = dstore['oqparam']
damage_states = ['no_damage'] + list(
dstore.get_attr('risk_model', 'limit_states'))
dt_list = []
for ds in damage_states:
ds = str(ds)
if mean_std:
dt_list.append(('%s_mean' % ds, F32))
dt_list.append(('%s_stdv' % ds, F32))
else:
dt_list.append((ds, F32))
damage_dt = numpy.dtype(dt_list)
loss_types = oq.loss_dt().names
return numpy.dtype([(lt, damage_dt) for lt in loss_types]) | :param dstore: a datastore instance
:param mean_std: a flag (default True)
:returns:
a composite dtype loss_type -> (mean_ds1, stdv_ds1, ...) or
loss_type -> (ds1, ds2, ...) depending on the flag mean_std |
def _get_previous_mz(self, mzs):
'''given an mz array, return the mz_data (disk location)
if the mz array was not previously written, write to disk first'''
mzs = tuple(mzs) # must be hashable
if mzs in self.lru_cache:
return self.lru_cache[mzs]
# mz not recognized ... check hash
mz_hash = "%s-%s-%s" % (hash(mzs), sum(mzs), len(mzs))
if mz_hash in self.hashes:
for mz_data in self.hashes[mz_hash]:
test_mz = self._read_mz(*mz_data)
if mzs == test_mz:
self.lru_cache[test_mz] = mz_data
return mz_data
# hash not recognized
# must be a new mz array ... write it, add it to lru_cache and hashes
mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression)
self.hashes[mz_hash].append(mz_data)
self.lru_cache[mzs] = mz_data
return mz_data | given an mz array, return the mz_data (disk location)
if the mz array was not previously written, write to disk first |
def nonzero_monies(self):
"""Get a list of the underlying ``Money`` instances that are not zero
Returns:
([Money]): A list of zero or more money instances. Currencies will be unique.
"""
return [copy.copy(m) for m in self._money_obs if m.amount != 0] | Get a list of the underlying ``Money`` instances that are not zero
Returns:
([Money]): A list of zero or more money instances. Currencies will be unique. |
def get_db_uri(config, output_dir):
"""Process results_database parameters in config to format them for
set database function
:param dict config: project configuration dict
:param str output_dir: output directory for results
:return: string for db uri
"""
db_config = config.get("results_database", {"db_uri": "default"})
if db_config['db_uri'] == 'default':
return os.path.join(output_dir, "results.sqlite")
return db_config['db_uri'] | Process results_database parameters in config to format them for
set database function
:param dict config: project configuration dict
:param str output_dir: output directory for results
:return: string for db uri |
def year_origin_filter(year_predicate=None, origin_predicate=None):
"""\
Returns a predicate for cable identifiers where `year_predicate` and
`origin_predicate` must hold true.
If `year_predicate` and `origin_predicate` is ``None`` the returned
predicate holds always true.
`year_predicate`
A predicate which returns ``True`` or ``False`` for a cable
year.
``origin_predicate`
A predicate which returns ``True`` or ``False`` for a given
cable origin
"""
def accept(cable_id, predicate):
year, origin = _YEAR_ORIGIN_PATTERN.match(
canonicalize_id(cable_id)).groups()
return predicate(year, origin)
if year_predicate and origin_predicate:
return partial(accept, predicate=lambda y, o: year_predicate(y) \
and origin_predicate(o))
elif year_predicate:
return partial(accept, predicate=lambda y, o: year_predicate(y))
elif origin_predicate:
return partial(accept, predicate=lambda y, o: origin_predicate(o))
return lambda cable_id: True | \
Returns a predicate for cable identifiers where `year_predicate` and
`origin_predicate` must hold true.
If `year_predicate` and `origin_predicate` is ``None`` the returned
predicate holds always true.
`year_predicate`
A predicate which returns ``True`` or ``False`` for a cable
year.
``origin_predicate`
A predicate which returns ``True`` or ``False`` for a given
cable origin |
def collect_fragment(event, agora_host):
"""
Execute a search plan for the declared graph pattern and sends all obtained triples to the corresponding
collector functions (config
"""
agora = Agora(agora_host)
graph_pattern = ""
for tp in __triple_patterns:
graph_pattern += '{} . '.format(tp)
fragment, _, graph = agora.get_fragment_generator('{%s}' % graph_pattern, stop_event=event, workers=4)
__extract_pattern_nodes(graph)
log.info('querying { %s}' % graph_pattern)
for (t, s, p, o) in fragment:
collectors = __triple_patterns[str(__plan_patterns[t])]
for c, args in collectors:
log.debug('Sending triple {} {} {} to {}'.format(s.n3(graph.namespace_manager), graph.qname(p),
o.n3(graph.namespace_manager), c))
c((s, p, o))
if event.isSet():
raise Exception('Abort collecting fragment')
yield (c.func_name, (t, s, p, o)) | Execute a search plan for the declared graph pattern and sends all obtained triples to the corresponding
collector functions (config |
def parse_data_shape(data_shape_str):
"""Parse string to tuple or int"""
ds = data_shape_str.strip().split(',')
if len(ds) == 1:
data_shape = (int(ds[0]), int(ds[0]))
elif len(ds) == 2:
data_shape = (int(ds[0]), int(ds[1]))
else:
raise ValueError("Unexpected data_shape: %s", data_shape_str)
return data_shape | Parse string to tuple or int |
def combine_first(self, other):
"""Combine two Datasets, default to data_vars of self.
The new coordinates follow the normal broadcasting and alignment rules
of ``join='outer'``. Vacant cells in the expanded coordinates are
filled with np.nan.
Parameters
----------
other : DataArray
Used to fill all matching missing values in this array.
Returns
-------
DataArray
"""
out = ops.fillna(self, other, join="outer", dataset_join="outer")
return out | Combine two Datasets, default to data_vars of self.
The new coordinates follow the normal broadcasting and alignment rules
of ``join='outer'``. Vacant cells in the expanded coordinates are
filled with np.nan.
Parameters
----------
other : DataArray
Used to fill all matching missing values in this array.
Returns
-------
DataArray |
def delete_all(self, filter, timeout=-1, force=False):
"""
Deletes all Server Profile objects from the appliance that match the provided filter.
Filters are supported only for the following profile attributes: name, description, serialnumber, uuid,
mactype, wwntype, serialnumbertype, status, and state.
Examples:
>>> server_profile_client.delete_all(filter="name='Exchange Server'")
# Remove all profiles that match the name "Exchange Server"
>>> server_profile_client.delete_all(filter="name matches'%25Database%25'")
# Remove all profiles that have the word "Database" in its name
The filter function here operates similarly to the function defined for GET Server Profiles. It allows
for both actual and partial matches of data in the profile. Any requests that use a wildcard match
must include a %25 as illustrated in the previous example. This is how you encode that character for
transmission to the appliance.
Args:
filter (dict): Object to delete.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
bool: Indicates whether the server profile was successfully deleted.
"""
return self._helper.delete_all(filter=filter, force=force, timeout=timeout) | Deletes all Server Profile objects from the appliance that match the provided filter.
Filters are supported only for the following profile attributes: name, description, serialnumber, uuid,
mactype, wwntype, serialnumbertype, status, and state.
Examples:
>>> server_profile_client.delete_all(filter="name='Exchange Server'")
# Remove all profiles that match the name "Exchange Server"
>>> server_profile_client.delete_all(filter="name matches'%25Database%25'")
# Remove all profiles that have the word "Database" in its name
The filter function here operates similarly to the function defined for GET Server Profiles. It allows
for both actual and partial matches of data in the profile. Any requests that use a wildcard match
must include a %25 as illustrated in the previous example. This is how you encode that character for
transmission to the appliance.
Args:
filter (dict): Object to delete.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
bool: Indicates whether the server profile was successfully deleted. |
def guggenheim_katayama(target, K2, n, temperature='pore.temperature',
critical_temperature='pore.critical_temperature',
critical_pressure='pore.critical_pressure'):
r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
K2 : scalar
Fluid specific constant
n : scalar
Fluid specific constant
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
critical_pressure : string
The dictionary key containing the critical pressure values (K)
"""
T = target[temperature]
Pc = target[critical_pressure]
Tc = target[critical_temperature]
sigma_o = K2*Tc**(1/3)*Pc**(2/3)
value = sigma_o*(1-T/Tc)**n
return value | r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
K2 : scalar
Fluid specific constant
n : scalar
Fluid specific constant
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
critical_pressure : string
The dictionary key containing the critical pressure values (K) |
def _auth(profile=None, api_version=1, **connection_args):
'''
Set up heat credentials, returns
`heatclient.client.Client`. Optional parameter
"api_version" defaults to 1.
Only intended to be used within heat-enabled modules
'''
if profile:
prefix = profile + ':keystone.'
else:
prefix = 'keystone.'
def get(key, default=None):
'''
Checks connection_args, then salt-minion config,
falls back to specified default value.
'''
return connection_args.get('connection_' + key,
__salt__['config.get'](prefix + key, default))
user = get('user', 'admin')
password = get('password', None)
tenant = get('tenant', 'admin')
tenant_id = get('tenant_id')
auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')
insecure = get('insecure', False)
admin_token = get('token')
region_name = get('region_name', None)
if admin_token and api_version != 1 and not password:
# If we had a password we could just
# ignore the admin-token and move on...
raise SaltInvocationError('Only can use keystone admin token ' +
'with Heat API v1')
elif password:
# Can't use the admin-token anyway
kwargs = {'username': user,
'password': password,
'tenant_id': tenant_id,
'auth_url': auth_url,
'region_name': region_name,
'tenant_name': tenant}
# 'insecure' keyword not supported by all v2.0 keystone clients
# this ensures it's only passed in when defined
if insecure:
kwargs['insecure'] = True
elif api_version == 1 and admin_token:
kwargs = {'token': admin_token,
'auth_url': auth_url}
else:
raise SaltInvocationError('No credentials to authenticate with.')
token = __salt__['keystone.token_get'](profile)
kwargs['token'] = token['id']
# This doesn't realy prevent the password to show up
# in the minion log as keystoneclient.session is
# logging it anyway when in debug-mode
kwargs.pop('password')
try:
heat_endpoint = __salt__['keystone.endpoint_get']('heat', profile)['url']
except KeyError:
heat_endpoint = __salt__['keystone.endpoint_get']('heat', profile)['publicurl']
heat_endpoint = heat_endpoint % token
log.debug('Calling heatclient.client.Client(%s, %s, **%s)',
api_version, heat_endpoint, kwargs)
# may raise exc.HTTPUnauthorized, exc.HTTPNotFound
# but we deal with those elsewhere
return heatclient.client.Client(api_version, endpoint=heat_endpoint, **kwargs) | Set up heat credentials, returns
`heatclient.client.Client`. Optional parameter
"api_version" defaults to 1.
Only intended to be used within heat-enabled modules |
def generate_routes(config):
"""Generate a list of urls that map to generated proxy views.
generate_routes({
'test_proxy': {
'base_url': 'https://google.com/',
'prefix': '/test_prefix/',
'verify_ssl': False,
'csrf_exempt: False',
'middleware': ['djproxy.proxy_middleware.AddXFF'],
'append_middleware': ['djproxy.proxy_middleware.AddXFF'],
'timeout': 3.0,
'cert': None
}
})
Required configuration keys:
* `base_url`
* `prefix`
Optional configuration keys:
* `verify_ssl`: defaults to `True`.
* `csrf_exempt`: defaults to `True`.
* `cert`: defaults to `None`.
* `timeout`: defaults to `None`.
* `middleware`: Defaults to `None`. Specifying `None` causes djproxy to use
the default middleware set. If a list is passed, the default middleware
list specified by the HttpProxy definition will be replaced with the
provided list.
* `append_middleware`: Defaults to `None`. `None` results in no changes to
the default middleware set. If a list is specified, the list will be
appended to the default middleware list specified in the HttpProxy
definition or, if provided, the middleware key specificed in the config
dict.
Returns:
[
url(r'^test_prefix/', GeneratedProxy.as_view(), name='test_proxy')),
]
"""
routes = []
for name, config in iteritems(config):
pattern = r'^%s(?P<url>.*)$' % re.escape(config['prefix'].lstrip('/'))
proxy = generate_proxy(
prefix=config['prefix'], base_url=config['base_url'],
verify_ssl=config.get('verify_ssl', True),
middleware=config.get('middleware'),
append_middleware=config.get('append_middleware'),
cert=config.get('cert'),
timeout=config.get('timeout'))
proxy_view_function = proxy.as_view()
proxy_view_function.csrf_exempt = config.get('csrf_exempt', True)
routes.append(url(pattern, proxy_view_function, name=name))
return routes | Generate a list of urls that map to generated proxy views.
generate_routes({
'test_proxy': {
'base_url': 'https://google.com/',
'prefix': '/test_prefix/',
'verify_ssl': False,
'csrf_exempt: False',
'middleware': ['djproxy.proxy_middleware.AddXFF'],
'append_middleware': ['djproxy.proxy_middleware.AddXFF'],
'timeout': 3.0,
'cert': None
}
})
Required configuration keys:
* `base_url`
* `prefix`
Optional configuration keys:
* `verify_ssl`: defaults to `True`.
* `csrf_exempt`: defaults to `True`.
* `cert`: defaults to `None`.
* `timeout`: defaults to `None`.
* `middleware`: Defaults to `None`. Specifying `None` causes djproxy to use
the default middleware set. If a list is passed, the default middleware
list specified by the HttpProxy definition will be replaced with the
provided list.
* `append_middleware`: Defaults to `None`. `None` results in no changes to
the default middleware set. If a list is specified, the list will be
appended to the default middleware list specified in the HttpProxy
definition or, if provided, the middleware key specificed in the config
dict.
Returns:
[
url(r'^test_prefix/', GeneratedProxy.as_view(), name='test_proxy')),
] |
def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:
"""Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.getPresence", http_verb="GET", params=kwargs) | Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890' |
def lnprior(self, X):
"""
Use a uniform, bounded prior.
"""
if np.any(X < self._lower_left) or np.any(X > self._upper_right):
return -np.inf
else:
return 0.0 | Use a uniform, bounded prior. |
def persist(arg, depth=Ellipsis, on_mutable=None):
'''
persist(x) yields a persistent version of x if possible, or yields x itself.
The transformations performed by persist(x) are as follows:
* If x is an immutable object, yields x.persist()
* If x is a set, yield a frozenset of of persist(u) for all u in x.
* If x is a numpy array, yield imm_array(x).
* If x is a map, yields a persistent version of x with all keys and values replaced with their
persist()'ed form; note that this respects laziness and itables.
* If x is a list/tuple type, yields a tuple of persist()'ed contents.
* Otherwise, if the type of x is not recognized, yields x.
The depth to which persist() searches the argument's elements is controlled by the depth option;
the default behavior is to persist objects down to the point that a persistent object is found,
at which its elements are not checked for persistence.
Note that persist() is not guaranteed to recognize a particular object; it is intended as a
utility function for basic functional-style and immutable data code in Python. In particular,
it is usefl for pimms's immutable objects; dicts and mappings; pimms's lazy-maps and itables;
pyrsistent's sets, vectors, and maps; sets and frozensets (which are both communted to
frozensets); and anything implementing __iter__ (which are commuted to tuples). Objects that are
not numbers or strings are considered potentially-mutable and will trigger the on_mutable case.
The optional arguments may be passed to persist:
* depth (default: Ellipsis) specifies the depth to which toe persist() function should search
when persisting objects. The given argument is considered depth 0, so persist(arg, 0) will
persist only arg and not its contents, if it is a collection. If None is given, then goes to
any depth; if Ellipsis is given, then searches until a persistent object is found, but does
not attempt to persist the elements of already-persistent containers (this is the default).
* on_mutable (default: None) specifies what to do when a non-persistable object is encountered
in the search. If None, then the object is left; if 'error', then an error is raised;
otherwise, this must be a function that is passed the object--the return value of this
function is the replacement used in the object returned from persist().
'''
from .immutable import (is_imm, imm_copy)
# Parse the on_mutable argument
if on_mutable is None: on_mutable = lambda x:x
elif on_mutable == 'error':
def _raise(x):
raise ValueError('non-persistable: %s' % x)
on_mutable = _raise
if depth in (None, Ellipsis): depth_next = depth
elif depth < 0: return arg
else: depth_next = depth - 1
precur = lambda x:persist(x, depth=depth_next, on_mutable=on_mutable)
# See if we have an easy type to handle
if is_imm(arg): return imm_copy(arg)
if is_quantity(arg):
(m,u) = (mag(arg), unit(arg))
mm = precur(m)
if mm is m: return arg
else: return quant(mm, u)
elif isinstance(arg, np.ndarray): return imm_array(arg)
elif isinstance(arg, np.generic):
x = type(arg)(arg)
x.setflags(write=False)
return x
elif is_str(arg) or is_number(arg): return arg
elif isinstance(arg, ps.PVector):
if depth is Ellipsis or depth == 0: return arg
for (k,v0) in zip(range(len(arg)), arg):
v = precur(v0)
if v0 is not v: arg = arg.set(k,v)
return arg
elif isinstance(arg, ps.PSet):
if depth is Ellipsis or depth == 0: return arg
for v0 in arg:
v = precur(v0)
if v0 is not v: arg = arg.discard(v0).add(v)
return arg
elif is_pmap(arg):
if depth is Ellipsis or depth == 0: return arg
return key_map(precur, value_map(precur, arg))
elif is_map(arg):
if not is_pmap(arg): arg = ps.pmap(arg)
if depth == 0: return arg
return key_map(precur, value_map(precur, arg))
elif isinstance(arg, frozenset):
if depth is Ellipsis or depth == 0: return frozenset(arg)
a = [x for x in arg]
q = [precur(x) for x in a]
if all(ai is qi for (ai,qi) in zip(a,q)): return arg
return frozenset(q)
elif isinstance(arg, set):
if depth == 0: return frozenset(arg)
a = [x for x in arg]
q = [precur(x) for x in a]
if isinstance(arg, frozenset) and all(ai is qi for (ai,qi) in zip(a,q)): return arg
return frozenset(q)
elif hasattr(arg, '__iter__'):
if depth == 0 or (depth is Ellipsis and isinstance(arg, tuple)): return tuple(arg)
q = tuple(precur(x) for x in arg)
if isinstance(arg, tuple) and all(ai is qi for (ai,qi) in zip(arg,q)): return arg
else: return q
elif isinstance(arg, (types.FunctionType, partial)):
return arg
else: return on_mutable(arg) | persist(x) yields a persistent version of x if possible, or yields x itself.
The transformations performed by persist(x) are as follows:
* If x is an immutable object, yields x.persist()
* If x is a set, yield a frozenset of of persist(u) for all u in x.
* If x is a numpy array, yield imm_array(x).
* If x is a map, yields a persistent version of x with all keys and values replaced with their
persist()'ed form; note that this respects laziness and itables.
* If x is a list/tuple type, yields a tuple of persist()'ed contents.
* Otherwise, if the type of x is not recognized, yields x.
The depth to which persist() searches the argument's elements is controlled by the depth option;
the default behavior is to persist objects down to the point that a persistent object is found,
at which its elements are not checked for persistence.
Note that persist() is not guaranteed to recognize a particular object; it is intended as a
utility function for basic functional-style and immutable data code in Python. In particular,
it is usefl for pimms's immutable objects; dicts and mappings; pimms's lazy-maps and itables;
pyrsistent's sets, vectors, and maps; sets and frozensets (which are both communted to
frozensets); and anything implementing __iter__ (which are commuted to tuples). Objects that are
not numbers or strings are considered potentially-mutable and will trigger the on_mutable case.
The optional arguments may be passed to persist:
* depth (default: Ellipsis) specifies the depth to which toe persist() function should search
when persisting objects. The given argument is considered depth 0, so persist(arg, 0) will
persist only arg and not its contents, if it is a collection. If None is given, then goes to
any depth; if Ellipsis is given, then searches until a persistent object is found, but does
not attempt to persist the elements of already-persistent containers (this is the default).
* on_mutable (default: None) specifies what to do when a non-persistable object is encountered
in the search. If None, then the object is left; if 'error', then an error is raised;
otherwise, this must be a function that is passed the object--the return value of this
function is the replacement used in the object returned from persist(). |
def render_to_message(self, extra_context=None, **kwargs):
"""
Renders and returns an unsent message with the provided context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering the
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class`
"""
if extra_context is None:
extra_context = {}
# Ensure our custom headers are added to the underlying message class.
kwargs.setdefault('headers', {}).update(self.headers)
context = self.get_context_data(**extra_context)
return self.message_class(
subject=self.render_subject(context),
body=self.render_body(context),
**kwargs) | Renders and returns an unsent message with the provided context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering the
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class` |
def parse(text, encoding='utf8'):
"""Parse the querystring into a normalized form."""
# Decode the text if we got bytes.
if isinstance(text, six.binary_type):
text = text.decode(encoding)
return Query(text, split_segments(text)) | Parse the querystring into a normalized form. |
def get_url(self):
"""IFTTT Webhook url
:return: url
:rtype: str
"""
if not self.data[self.execute_name]:
raise InvalidConfig(extra_body='Value for IFTTT is required on {} device. Get your key here: '
'https://ifttt.com/services/maker_webhooks/settings'.format(self.name))
if not self.data.get('event'):
raise InvalidConfig(extra_body='Event option is required for IFTTT on {} device. '
'You define the event name when creating a Webhook '
'applet'.format(self.name))
url = self.url_pattern.format(event=self.data['event'], key=self.data[self.execute_name])
return url | IFTTT Webhook url
:return: url
:rtype: str |
def ball_pick(n, d, rng=None):
"""Return cartesian vectors uniformly picked on the unit ball in an
arbitrary number of dimensions.
The unit ball is the space enclosed by the unit sphere.
The picking is done by rejection sampling in the unit cube.
In 3-dimensional space, the fraction `\pi / 6 \sim 0.52` points are valid.
Parameters
----------
n: integer
Number of points to return.
d: integer
Number of dimensions of the space in which the ball lives
Returns
-------
r: array, shape (n, d)
Sample cartesian vectors.
"""
def valid(r):
return vector_mag_sq(r) < 1.0
return rejection_pick(L=2.0, n=n, d=d, valid=valid, rng=rng) | Return cartesian vectors uniformly picked on the unit ball in an
arbitrary number of dimensions.
The unit ball is the space enclosed by the unit sphere.
The picking is done by rejection sampling in the unit cube.
In 3-dimensional space, the fraction `\pi / 6 \sim 0.52` points are valid.
Parameters
----------
n: integer
Number of points to return.
d: integer
Number of dimensions of the space in which the ball lives
Returns
-------
r: array, shape (n, d)
Sample cartesian vectors. |
def get_records(self, name):
"""Return all the records for the given name in the cache.
Args:
name (string): The name which the required models are stored under.
Returns:
list: A list of :class:`cinder_data.model.CinderModel` models.
"""
if name in self._cache:
return self._cache[name].values()
else:
return [] | Return all the records for the given name in the cache.
Args:
name (string): The name which the required models are stored under.
Returns:
list: A list of :class:`cinder_data.model.CinderModel` models. |
def publish(self, message_type, message_payload):
"""
Publish the specified object that the function automatically converts
into a JSON string representation.
This function use the lowered class name of the service as the AMQP
routing key. For instance, if the class ``ExampleService`` inherits
from the base class ``BaseService``, the methods of an instance of
this class will publish messages using the routing key named
``exampleservice``.
@param message_type: string representing the type of the event, more
likely ``on_something_happened`.
@param message_payload: an object to convert into a JSON string
representation and to publish.
"""
payload = json.dumps(jsonpickle.Pickler(unpicklable=False).flatten(message_payload))
message = amqp.Message(payload)
message.properties["delivery_mode"] = 2
name = 'majormode.%s.%s.%s' % (settings.ENVIRONMENT_STAGE, self.service_name.lower(), message_type.lower())
self.channel.queue_declare(queue=name, durable=True, exclusive=False, auto_delete=False)
self.channel.exchange_declare(exchange=name, type="direct", durable=True, auto_delete=False,)
self.channel.queue_bind(queue=name, exchange=name, routing_key=name)
self.channel.basic_publish(message, exchange=name, routing_key=name) | Publish the specified object that the function automatically converts
into a JSON string representation.
This function use the lowered class name of the service as the AMQP
routing key. For instance, if the class ``ExampleService`` inherits
from the base class ``BaseService``, the methods of an instance of
this class will publish messages using the routing key named
``exampleservice``.
@param message_type: string representing the type of the event, more
likely ``on_something_happened`.
@param message_payload: an object to convert into a JSON string
representation and to publish. |
def is_defined(self, objtxt, force_import=False):
"""Return True if object is defined"""
return isdefined(objtxt, force_import=force_import,
namespace=self.locals) | Return True if object is defined |
def findRequirements():
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
requirementsPath = os.path.join(REPO_DIR, "requirements.txt")
requirements = parse_file(requirementsPath)
# User has a pre-release version of numenta packages installed, which is only
# possible if the user installed and built the packages from source and
# it is up to the user to decide when to update these packages. We'll
# quietly remove the entries in requirements.txt so as to not conflate the
# two.
if nupicPrereleaseInstalled():
requirements = [req for req in requirements if "nupic" not in req]
if htmresearchCorePrereleaseInstalled():
requirements = [req for req in requirements if "htmresearch-core" not in req]
return requirements | Read the requirements.txt file and parse into requirements for setup's
install_requirements option. |
def force_move(source, destination):
""" Force the move of the source inside the destination even if the destination has already a folder with the
name inside. In the case, the folder will be replaced.
:param string source: path of the source to move.
:param string destination: path of the folder to move the source to.
"""
if not os.path.exists(destination):
raise RuntimeError(
'The code could not be moved to {destination} '
'because the folder does not exist'.format(destination=destination))
destination_folder = os.path.join(destination, os.path.split(source)[-1])
if os.path.exists(destination_folder):
shutil.rmtree(destination_folder)
shutil.move(source, destination) | Force the move of the source inside the destination even if the destination has already a folder with the
name inside. In the case, the folder will be replaced.
:param string source: path of the source to move.
:param string destination: path of the folder to move the source to. |
def get_by_username(cls, username):
"""
Return a User by email address
"""
return cls.query().filter(cls.username == username).first() | Return a User by email address |
def __collectGarbage(self, ignored=None):
"""Collects garbage"""
del ignored # unused argument
collected = []
level0, level1, level2 = gc.get_count()
if level0 > 0:
collected.append(gc.collect(0))
if level1 > 0:
collected.append(gc.collect(1))
if level2 > 0:
collected.append(gc.collect(2))
if self.__where == GCPluginConfigDialog.SILENT:
return
message = ""
if collected:
for index in range(len(collected)):
if collected[index] == 0:
continue
if message:
message += ", "
message += "generation " + str(index) + ": " + \
str(collected[index])
if message:
message = "GC objects: " + message
else:
message = "No GC objects"
if not message:
return
if self.__where == GCPluginConfigDialog.STATUS_BAR:
self.ide.showStatusBarMessage(message)
else:
logging.info(message) | Collects garbage |
def _listChunks(self):
"""
Lists stored chunks
:return: sorted list of available chunk indices
"""
chunks = []
for fileName in os.listdir(self.dataDir):
index = ChunkedFileStore._fileNameToChunkIndex(fileName)
if index is not None:
chunks.append(index)
return sorted(chunks) | Lists stored chunks
:return: sorted list of available chunk indices |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.