code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir, followlinks=True):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files | Find all files under 'dir' and return the list of full filenames
(relative to 'dir'). |
def unit_vector_game(n, avoid_pure_nash=False, random_state=None):
"""
Return a NormalFormGame instance of the 2-player game "unit vector
game" (Savani and von Stengel, 2016). Payoffs for player 1 are
chosen randomly from the [0, 1) range. For player 0, each column
contains exactly one 1 payoff and the rest is 0.
Parameters
----------
n : scalar(int)
Number of actions.
avoid_pure_nash : bool, optional(default=False)
If True, player 0's payoffs will be placed in order to avoid
pure Nash equilibria. (If necessary, the payoffs for player 1
are redrawn so as not to have a dominant action.)
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = unit_vector_game(4, random_state=1234)
>>> g.players[0]
Player([[ 1., 0., 1., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., 0., 0.],
[ 0., 1., 0., 0.]])
>>> g.players[1]
Player([[ 0.19151945, 0.62210877, 0.43772774, 0.78535858],
[ 0.77997581, 0.27259261, 0.27646426, 0.80187218],
[ 0.95813935, 0.87593263, 0.35781727, 0.50099513],
[ 0.68346294, 0.71270203, 0.37025075, 0.56119619]])
With `avoid_pure_nash=True`:
>>> g = unit_vector_game(4, avoid_pure_nash=True, random_state=1234)
>>> g.players[0]
Player([[ 1., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 0., 0.]])
>>> g.players[1]
Player([[ 0.19151945, 0.62210877, 0.43772774, 0.78535858],
[ 0.77997581, 0.27259261, 0.27646426, 0.80187218],
[ 0.95813935, 0.87593263, 0.35781727, 0.50099513],
[ 0.68346294, 0.71270203, 0.37025075, 0.56119619]])
>>> pure_nash_brute(g)
[]
"""
random_state = check_random_state(random_state)
payoff_arrays = (np.zeros((n, n)), random_state.random_sample((n, n)))
if not avoid_pure_nash:
ones_ind = random_state.randint(n, size=n)
payoff_arrays[0][ones_ind, np.arange(n)] = 1
else:
if n == 1:
raise ValueError('Cannot avoid pure Nash with n=1')
maxes = payoff_arrays[1].max(axis=0)
is_suboptimal = payoff_arrays[1] < maxes
nums_suboptimal = is_suboptimal.sum(axis=1)
while (nums_suboptimal==0).any():
payoff_arrays[1][:] = random_state.random_sample((n, n))
payoff_arrays[1].max(axis=0, out=maxes)
np.less(payoff_arrays[1], maxes, out=is_suboptimal)
is_suboptimal.sum(axis=1, out=nums_suboptimal)
for i in range(n):
one_ind = random_state.randint(n)
while not is_suboptimal[i, one_ind]:
one_ind = random_state.randint(n)
payoff_arrays[0][one_ind, i] = 1
g = NormalFormGame(
[Player(payoff_array) for payoff_array in payoff_arrays]
)
return g | Return a NormalFormGame instance of the 2-player game "unit vector
game" (Savani and von Stengel, 2016). Payoffs for player 1 are
chosen randomly from the [0, 1) range. For player 0, each column
contains exactly one 1 payoff and the rest is 0.
Parameters
----------
n : scalar(int)
Number of actions.
avoid_pure_nash : bool, optional(default=False)
If True, player 0's payoffs will be placed in order to avoid
pure Nash equilibria. (If necessary, the payoffs for player 1
are redrawn so as not to have a dominant action.)
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = unit_vector_game(4, random_state=1234)
>>> g.players[0]
Player([[ 1., 0., 1., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., 0., 0.],
[ 0., 1., 0., 0.]])
>>> g.players[1]
Player([[ 0.19151945, 0.62210877, 0.43772774, 0.78535858],
[ 0.77997581, 0.27259261, 0.27646426, 0.80187218],
[ 0.95813935, 0.87593263, 0.35781727, 0.50099513],
[ 0.68346294, 0.71270203, 0.37025075, 0.56119619]])
With `avoid_pure_nash=True`:
>>> g = unit_vector_game(4, avoid_pure_nash=True, random_state=1234)
>>> g.players[0]
Player([[ 1., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 0., 0.]])
>>> g.players[1]
Player([[ 0.19151945, 0.62210877, 0.43772774, 0.78535858],
[ 0.77997581, 0.27259261, 0.27646426, 0.80187218],
[ 0.95813935, 0.87593263, 0.35781727, 0.50099513],
[ 0.68346294, 0.71270203, 0.37025075, 0.56119619]])
>>> pure_nash_brute(g)
[] |
def contains(self, key):
"""Does this configuration contain a given key?"""
if self._jconf is not None:
return self._jconf.contains(key)
else:
return key in self._conf | Does this configuration contain a given key? |
def get_queryset(self):
"""
Implements before date filtering on ``date_field``
"""
kwargs = {}
if self.ends_at:
kwargs.update({'%s__lt' % self.date_field: self.ends_at})
return super(BeforeMixin, self).get_queryset().filter(**kwargs) | Implements before date filtering on ``date_field`` |
def remove_unreferenced_items(self, stale_cts):
"""
See if there are items that no longer point to an existing parent.
"""
stale_ct_ids = list(stale_cts.keys())
parent_types = (ContentItem.objects.order_by()
.exclude(polymorphic_ctype__in=stale_ct_ids)
.values_list('parent_type', flat=True).distinct())
num_unreferenced = 0
for ct_id in parent_types:
parent_ct = ContentType.objects.get_for_id(ct_id)
unreferenced_items = (ContentItem.objects
.filter(parent_type=ct_id)
.order_by('polymorphic_ctype', 'pk'))
if parent_ct.model_class() is not None:
# Only select the items that are part of removed pages,
# unless the parent type was removed - then removing all is correct.
unreferenced_items = unreferenced_items.exclude(
parent_id__in=parent_ct.get_all_objects_for_this_type()
)
if unreferenced_items:
for item in unreferenced_items:
self.stdout.write(
"- {cls}#{id} points to nonexisting {app_label}.{model}".format(
cls=item.__class__.__name__, id=item.pk,
app_label=parent_ct.app_label, model=parent_ct.model
))
num_unreferenced += 1
if not self.dry_run and self.remove_unreferenced:
item.delete()
if not num_unreferenced:
self.stdout.write("No unreferenced items found.")
else:
self.stdout.write("{0} unreferenced items found.".format(num_unreferenced))
if not self.remove_unreferenced:
self.stdout.write("Re-run this command with --remove-unreferenced to remove these items") | See if there are items that no longer point to an existing parent. |
def get_organizer(self, id, **data):
"""
GET /organizers/:id/
Gets an :format:`organizer` by ID as ``organizer``.
"""
return self.get("/organizers/{0}/".format(id), data=data) | GET /organizers/:id/
Gets an :format:`organizer` by ID as ``organizer``. |
def _copy_from(self, node,
copy_leaves=True,
overwrite=False,
with_links=True):
"""Pass a ``node`` to insert the full tree to the trajectory.
Considers all links in the given node!
Ignored nodes already found in the current trajectory.
:param node: The node to insert
:param copy_leaves:
If leaves should be **shallow** copied or simply referred to by both trees.
**Shallow** copying is established using the copy module.
Accepts the setting ``'explored'`` to only copy explored parameters.
Note that ``v_full_copy`` determines how these will be copied.
:param overwrite:
If existing elemenst should be overwritten. Requries ``__getstate__`` and
``__setstate__`` being implemented in the leaves.
:param with_links: If links should be ignored or followed and copied as well
:return: The corresponding (new) node in the tree.
"""
def _copy_skeleton(node_in, node_out):
"""Copies the skeleton of from `node_out` to `node_in`"""
new_annotations = node_out.v_annotations
node_in._annotations = new_annotations
node_in.v_comment = node_out.v_comment
def _add_leaf(leaf):
"""Adds a leaf to the trajectory"""
leaf_full_name = leaf.v_full_name
try:
found_leaf = self.f_get(leaf_full_name,
with_links=False,
shortcuts=False,
auto_load=False)
if overwrite:
found_leaf.__setstate__(leaf.__getstate__())
return found_leaf
except AttributeError:
pass
if copy_leaves is True or (copy_leaves == 'explored' and
leaf.v_is_parameter and leaf.v_explored):
new_leaf = self.f_add_leaf(cp.copy(leaf))
else:
new_leaf = self.f_add_leaf(leaf)
if new_leaf.v_is_parameter and new_leaf.v_explored:
self._explored_parameters[new_leaf.v_full_name] = new_leaf
return new_leaf
def _add_group(group):
"""Adds a new group to the trajectory"""
group_full_name = group.v_full_name
try:
found_group = self.f_get(group_full_name,
with_links=False,
shortcuts=False,
auto_load=False)
if overwrite:
_copy_skeleton(found_group, group)
return found_group
except AttributeError:
pass
new_group = self.f_add_group(group_full_name)
_copy_skeleton(new_group, group)
return new_group
is_run = self._is_run
self._is_run = False # So that we can copy Config Groups and Config Data
try:
if node.v_is_leaf:
return _add_leaf(node)
elif node.v_is_group:
other_root = node.v_root
if other_root is self:
raise RuntimeError('You cannot copy a given tree to itself!')
result = _add_group(node)
nodes_iterator = node.f_iter_nodes(recursive=True, with_links=with_links)
has_links = []
if node._links:
has_links.append(node)
for child in nodes_iterator:
if child.v_is_leaf:
_add_leaf(child)
else:
_add_group(child)
if child._links:
has_links.append(child)
if with_links:
for current in has_links:
mine = self.f_get(current.v_full_name, with_links=False,
shortcuts=False, auto_load=False)
my_link_set = set(mine._links.keys())
other_link_set = set(current._links.keys())
new_links = other_link_set - my_link_set
for link in new_links:
where_full_name = current._links[link].v_full_name
mine.f_add_link(link, where_full_name)
return result
else:
raise RuntimeError('You shall not pass!')
except Exception:
self._is_run = is_run | Pass a ``node`` to insert the full tree to the trajectory.
Considers all links in the given node!
Ignored nodes already found in the current trajectory.
:param node: The node to insert
:param copy_leaves:
If leaves should be **shallow** copied or simply referred to by both trees.
**Shallow** copying is established using the copy module.
Accepts the setting ``'explored'`` to only copy explored parameters.
Note that ``v_full_copy`` determines how these will be copied.
:param overwrite:
If existing elemenst should be overwritten. Requries ``__getstate__`` and
``__setstate__`` being implemented in the leaves.
:param with_links: If links should be ignored or followed and copied as well
:return: The corresponding (new) node in the tree. |
def clear_modules(self):
"""
Clears the modules snapshot.
"""
for aModule in compat.itervalues(self.__moduleDict):
aModule.clear()
self.__moduleDict = dict() | Clears the modules snapshot. |
def add(self, name, path):
"""Add a workspace entry in user config file."""
if not (os.path.exists(path)):
raise ValueError("Workspace path `%s` doesn't exists." % path)
if (self.exists(name)):
raise ValueError("Workspace `%s` already exists." % name)
self.config["workspaces"][name] = {"path": path, "repositories": {}}
self.config.write() | Add a workspace entry in user config file. |
def read_raw_parser_conf(data: str) -> dict:
"""We expect to have a section like this
```
[commitizen]
name = cz_jira
files = [
"commitizen/__version__.py",
"pyproject.toml"
] # this tab at the end is important
```
"""
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(data)
try:
_data: dict = dict(config["commitizen"])
if "files" in _data:
files = _data["files"]
_f = json.loads(files)
_data.update({"files": _f})
return _data
except KeyError:
return {} | We expect to have a section like this
```
[commitizen]
name = cz_jira
files = [
"commitizen/__version__.py",
"pyproject.toml"
] # this tab at the end is important
``` |
async def close(self):
"""Attempt to gracefully close all connections in the pool.
Wait until all pool connections are released, close them and
shut down the pool. If any error (including cancellation) occurs
in ``close()`` the pool will terminate by calling
:meth:`Pool.terminate() <pool.Pool.terminate>`.
It is advisable to use :func:`python:asyncio.wait_for` to set
a timeout.
.. versionchanged:: 0.16.0
``close()`` now waits until all pool connections are released
before closing them and the pool. Errors raised in ``close()``
will cause immediate pool termination.
"""
if self._closed:
return
self._check_init()
self._closing = True
warning_callback = None
try:
warning_callback = self._loop.call_later(
60, self._warn_on_long_close)
release_coros = [
ch.wait_until_released() for ch in self._holders]
await asyncio.gather(*release_coros, loop=self._loop)
close_coros = [
ch.close() for ch in self._holders]
await asyncio.gather(*close_coros, loop=self._loop)
except Exception:
self.terminate()
raise
finally:
if warning_callback is not None:
warning_callback.cancel()
self._closed = True
self._closing = False | Attempt to gracefully close all connections in the pool.
Wait until all pool connections are released, close them and
shut down the pool. If any error (including cancellation) occurs
in ``close()`` the pool will terminate by calling
:meth:`Pool.terminate() <pool.Pool.terminate>`.
It is advisable to use :func:`python:asyncio.wait_for` to set
a timeout.
.. versionchanged:: 0.16.0
``close()`` now waits until all pool connections are released
before closing them and the pool. Errors raised in ``close()``
will cause immediate pool termination. |
def solvent_per_layer(self):
"""Determine the number of solvent molecules per single layer. """
if self._solvent_per_layer:
return self._solvent_per_layer
assert not (self.solvent_per_lipid is None and self.n_solvent is None)
if self.solvent_per_lipid is not None:
assert self.n_solvent is None
self._solvent_per_layer = self.n_lipids_per_layer * self.solvent_per_lipid
elif self.n_solvent is not None:
assert self.solvent_per_lipid is None
self._solvent_per_layer = self.n_solvent / 2
return self._solvent_per_layer | Determine the number of solvent molecules per single layer. |
def bulk_insert(self, rows, return_model=False):
"""Creates multiple new records in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
rows:
An array of dictionaries, where each dictionary
describes the fields to insert.
return_model (default: False):
If model instances should be returned rather than
just dicts.
Returns:
A list of either the dicts of the rows inserted, including the pk or
the models of the rows inserted with defaults for any fields not specified
"""
if self.conflict_target or self.conflict_action:
compiler = self._build_insert_compiler(rows)
objs = compiler.execute_sql(return_id=True)
if return_model:
return [self.model(**dict(r, **k)) for r, k in zip(rows, objs)]
else:
return [dict(r, **k) for r, k in zip(rows, objs)]
# no special action required, use the standard Django bulk_create(..)
return super().bulk_create([self.model(**fields) for fields in rows]) | Creates multiple new records in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
rows:
An array of dictionaries, where each dictionary
describes the fields to insert.
return_model (default: False):
If model instances should be returned rather than
just dicts.
Returns:
A list of either the dicts of the rows inserted, including the pk or
the models of the rows inserted with defaults for any fields not specified |
def add(self, iocb):
"""Add an IOCB to the group, you can also add other groups."""
if _debug: IOGroup._debug("add %r", iocb)
# add this to our members
self.ioMembers.append(iocb)
# assume all of our members have not completed yet
self.ioState = PENDING
self.ioComplete.clear()
# when this completes, call back to the group. If this
# has already completed, it will trigger
iocb.add_callback(self.group_callback) | Add an IOCB to the group, you can also add other groups. |
def labels(self):
"""
Return the unique labels assigned to the documents.
"""
return [
name for name in os.listdir(self.root)
if os.path.isdir(os.path.join(self.root, name))
] | Return the unique labels assigned to the documents. |
def add_url(self, url, description=None):
"""Add a personal website.
Args:
:param url: url to the person's website.
:type url: string
:param description: short description of the website.
:type description: string
"""
url = {
'value': url,
}
if description:
url['description'] = description
self._append_to('urls', url) | Add a personal website.
Args:
:param url: url to the person's website.
:type url: string
:param description: short description of the website.
:type description: string |
def encode_all_features(dataset, vocabulary):
"""Encode all features.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
Returns:
a tf.data.Dataset
"""
def my_fn(features):
ret = {}
for k, v in features.items():
v = vocabulary.encode_tf(v)
v = tf.concat([tf.to_int64(v), [1]], 0)
ret[k] = v
return ret
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | Encode all features.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
Returns:
a tf.data.Dataset |
def create(cls, env, filenames, trim=False):
"""Create and return a final graph.
Args:
env: An environment.Environment object
filenames: A list of filenames
trim: Whether to trim the dependencies of builtin and system files.
Returns:
An immutable ImportGraph with the recursive dependencies of all the
files in filenames
"""
import_graph = cls(env)
for filename in filenames:
import_graph.add_file_recursive(os.path.abspath(filename), trim)
import_graph.build()
return import_graph | Create and return a final graph.
Args:
env: An environment.Environment object
filenames: A list of filenames
trim: Whether to trim the dependencies of builtin and system files.
Returns:
An immutable ImportGraph with the recursive dependencies of all the
files in filenames |
def check_exists(path, type='file'):
""" Check if a file or a folder exists """
if type == 'file':
if not os.path.isfile(path):
raise RuntimeError('The file `%s` does not exist.' % path)
else:
if not os.path.isdir(path):
raise RuntimeError('The folder `%s` does not exist.' % path)
return True | Check if a file or a folder exists |
def api_walk(uri, per_page=100, key="login"):
"""
For a GitHub URI, walk all the pages until there's no more content
"""
page = 1
result = []
while True:
response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page))
if len(response) == 0:
break
else:
page += 1
for r in response:
if key == USER_LOGIN:
result.append(user_login(r))
else:
result.append(r[key])
return list(set(result)) | For a GitHub URI, walk all the pages until there's no more content |
def terminate_ex(self, nodes, threads=False, attempts=3):
"""Wrapper method for terminate.
:param nodes: Nodes to be destroyed.
:type nodes: ``list``
:param attempts: The amount of attempts for retrying to terminate failed instances.
:type attempts: ``int``
:param threads: Whether to use the threaded approach or not.
:type threads: ``bool``
"""
while nodes and attempts > 0:
if threads:
nodes = self.terminate_with_threads(nodes)
else:
nodes = self.terminate(nodes)
if nodes:
logger.info("Attempt to terminate the remaining instances once more.")
attempts -= 1
return nodes | Wrapper method for terminate.
:param nodes: Nodes to be destroyed.
:type nodes: ``list``
:param attempts: The amount of attempts for retrying to terminate failed instances.
:type attempts: ``int``
:param threads: Whether to use the threaded approach or not.
:type threads: ``bool`` |
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events) | Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled |
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
page_items = self.object_list[bottom:top]
# check moved from validate_number
if not page_items:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return InfinitePage(page_items, number, self) | Returns a Page object for the given 1-based page number. |
async def update_data_status(self, **kwargs):
"""Update (PATCH) Data object.
:param kwargs: The dictionary of
:class:`~resolwe.flow.models.Data` attributes to be changed.
"""
await self._send_manager_command(ExecutorProtocol.UPDATE, extra_fields={
ExecutorProtocol.UPDATE_CHANGESET: kwargs
}) | Update (PATCH) Data object.
:param kwargs: The dictionary of
:class:`~resolwe.flow.models.Data` attributes to be changed. |
def get_neighbor_ip(ip_addr, cidr="30"):
"""
Function to figure out the IP's between neighbors address
Args:
ip_addr: Unicast IP address in the following format 192.168.1.1
cidr: CIDR value of 30, or 31
Returns: returns Our IP and the Neighbor IP in a tuple
"""
our_octet = None
neighbor_octet = None
try:
ip_addr_split = ip_addr.split(".")
max_counter = 0
if int(cidr) == 30:
ranger = 4
elif int(cidr) == 31:
ranger = 2
while max_counter < 256:
try:
if int(ip_addr_split[3]) >= max_counter and int(ip_addr_split[3]) < (max_counter + ranger):
if ranger == 4:
our_octet = max_counter + 1
neighbor_octet = max_counter + 2
break
elif ranger == 2:
our_octet = max_counter
neighbor_octet = max_counter + 1
break
max_counter += ranger
except UnboundLocalError:
print("The mask between the neighbors must be 30, or 31")
exit("BAD NEIGHBOR MASK")
if int(ip_addr_split[3]) == our_octet:
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
elif int(ip_addr_split[3]) == neighbor_octet:
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
else:
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
return our_ip_addr, neighbor_ip_addr
except IndexError:
LOGGER.critical('Function get_neighbor_ip IndexError ip_addr {item} cidr {cidr}'.format(item=ip_addr,
cidr=cidr))
raise IndexError("You have entered invalid input, you must enter a ipv4 address") | Function to figure out the IP's between neighbors address
Args:
ip_addr: Unicast IP address in the following format 192.168.1.1
cidr: CIDR value of 30, or 31
Returns: returns Our IP and the Neighbor IP in a tuple |
def get_state_variable_from_storage(
self, address: str, params: Optional[List[str]] = None
) -> str:
"""
Get variables from the storage
:param address: The contract address
:param params: The list of parameters
param types: [position, length] or ["mapping", position, key1, key2, ... ]
or [position, length, array]
:return: The corresponding storage slot and its value
"""
params = params or []
(position, length, mappings) = (0, 1, [])
try:
if params[0] == "mapping":
if len(params) < 3:
raise CriticalError("Invalid number of parameters.")
position = int(params[1])
position_formatted = utils.zpad(utils.int_to_big_endian(position), 32)
for i in range(2, len(params)):
key = bytes(params[i], "utf8")
key_formatted = utils.rzpad(key, 32)
mappings.append(
int.from_bytes(
utils.sha3(key_formatted + position_formatted),
byteorder="big",
)
)
length = len(mappings)
if length == 1:
position = mappings[0]
else:
if len(params) >= 4:
raise CriticalError("Invalid number of parameters.")
if len(params) >= 1:
position = int(params[0])
if len(params) >= 2:
length = int(params[1])
if len(params) == 3 and params[2] == "array":
position_formatted = utils.zpad(
utils.int_to_big_endian(position), 32
)
position = int.from_bytes(
utils.sha3(position_formatted), byteorder="big"
)
except ValueError:
raise CriticalError(
"Invalid storage index. Please provide a numeric value."
)
outtxt = []
try:
if length == 1:
outtxt.append(
"{}: {}".format(
position, self.eth.eth_getStorageAt(address, position)
)
)
else:
if len(mappings) > 0:
for i in range(0, len(mappings)):
position = mappings[i]
outtxt.append(
"{}: {}".format(
hex(position),
self.eth.eth_getStorageAt(address, position),
)
)
else:
for i in range(position, position + length):
outtxt.append(
"{}: {}".format(
hex(i), self.eth.eth_getStorageAt(address, i)
)
)
except FileNotFoundError as e:
raise CriticalError("IPC error: " + str(e))
except ConnectionError:
raise CriticalError(
"Could not connect to RPC server. "
"Make sure that your node is running and that RPC parameters are set correctly."
)
return "\n".join(outtxt) | Get variables from the storage
:param address: The contract address
:param params: The list of parameters
param types: [position, length] or ["mapping", position, key1, key2, ... ]
or [position, length, array]
:return: The corresponding storage slot and its value |
def parse_argv(self, argv=None, location='Command line.'):
"""Parse command line arguments.
args <list str> or None:
The argument list to parse. None means use a copy of sys.argv. argv[0] is
ignored.
location = '' <str>:
A user friendly string describing where the parser got this
data from. '' means use "Command line." if args == None, and
"Builtin default." otherwise.
"""
if argv is None:
argv = list(sys.argv)
argv.pop(0)
self._parse_options(argv, location)
self._parse_positional_arguments(argv) | Parse command line arguments.
args <list str> or None:
The argument list to parse. None means use a copy of sys.argv. argv[0] is
ignored.
location = '' <str>:
A user friendly string describing where the parser got this
data from. '' means use "Command line." if args == None, and
"Builtin default." otherwise. |
async def close_authenticator_async(self):
"""Close the CBS auth channel and session asynchronously."""
_logger.info("Shutting down CBS session on connection: %r.", self._connection.container_id)
try:
self._cbs_auth.destroy()
_logger.info("Auth closed, destroying session on connection: %r.", self._connection.container_id)
await self._session.destroy_async()
finally:
_logger.info("Finished shutting down CBS session on connection: %r.", self._connection.container_id) | Close the CBS auth channel and session asynchronously. |
def formula_dual(input_formula: str) -> str:
""" Returns the dual of the input formula.
The dual operation on formulas in :math:`B^+(X)` is defined as:
the dual :math:`\overline{θ}` of a formula :math:`θ` is obtained from θ by
switching :math:`∧` and :math:`∨`, and
by switching :math:`true` and :math:`false`.
:param str input_formula: original string.
:return: *(str)*, dual of input formula.
"""
conversion_dictionary = {
'and': 'or',
'or': 'and',
'True': 'False',
'False': 'True'
}
return re.sub(
'|'.join(re.escape(key) for key in conversion_dictionary.keys()),
lambda k: conversion_dictionary[k.group(0)], input_formula) | Returns the dual of the input formula.
The dual operation on formulas in :math:`B^+(X)` is defined as:
the dual :math:`\overline{θ}` of a formula :math:`θ` is obtained from θ by
switching :math:`∧` and :math:`∨`, and
by switching :math:`true` and :math:`false`.
:param str input_formula: original string.
:return: *(str)*, dual of input formula. |
def get_total_contributors(self, repo):
"""
Retrieves the number of contributors to a repo in the organization.
Also adds to unique contributor list.
"""
repo_contributors = 0
for contributor in repo.iter_contributors():
repo_contributors += 1
self.unique_contributors[contributor.id].append(repo.name)
self.contributors_json[repo.name].append(contributor.to_json())
return repo_contributors | Retrieves the number of contributors to a repo in the organization.
Also adds to unique contributor list. |
def wait_until_done(self, timeout=None):
"""Wait for the background load to complete."""
start = datetime.now()
if not self.__th:
raise IndraDBRestResponseError("There is no thread waiting to "
"complete.")
self.__th.join(timeout)
now = datetime.now()
dt = now - start
if self.__th.is_alive():
logger.warning("Timed out after %0.3f seconds waiting for "
"statement load to complete." % dt.total_seconds())
ret = False
else:
logger.info("Waited %0.3f seconds for statements to finish loading."
% dt.total_seconds())
ret = True
return ret | Wait for the background load to complete. |
def manage_recurring_payments_profile_status(self, profileid, action,
note=None):
"""Shortcut to the ManageRecurringPaymentsProfileStatus method.
``profileid`` is the same profile id used for getting profile details.
``action`` should be either 'Cancel', 'Suspend', or 'Reactivate'.
``note`` is optional and is visible to the user. It contains the
reason for the change in status.
"""
args = self._sanitize_locals(locals())
if not note:
del args['note']
return self._call('ManageRecurringPaymentsProfileStatus', **args) | Shortcut to the ManageRecurringPaymentsProfileStatus method.
``profileid`` is the same profile id used for getting profile details.
``action`` should be either 'Cancel', 'Suspend', or 'Reactivate'.
``note`` is optional and is visible to the user. It contains the
reason for the change in status. |
def set_write_bit(fn):
# type: (str) -> None
"""
Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None
"""
fn = fs_encode(fn)
if not os.path.exists(fn):
return
file_stat = os.stat(fn).st_mode
os.chmod(fn, file_stat | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
if not os.path.isdir(fn):
for path in [fn, os.path.dirname(fn)]:
try:
os.chflags(path, 0)
except AttributeError:
pass
return None
for root, dirs, files in os.walk(fn, topdown=False):
for dir_ in [os.path.join(root, d) for d in dirs]:
set_write_bit(dir_)
for file_ in [os.path.join(root, f) for f in files]:
set_write_bit(file_) | Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None |
def distance_to_interval(self, start, end):
"""
Find the distance between intervals [start1, end1] and [start2, end2].
If the intervals overlap then the distance is 0.
"""
if self.start > end:
# interval is before this exon
return self.start - end
elif self.end < start:
# exon is before the interval
return start - self.end
else:
return 0 | Find the distance between intervals [start1, end1] and [start2, end2].
If the intervals overlap then the distance is 0. |
def expanded_counts_map(self):
""" return the full counts map """
if self.hpx._ipix is None:
return self.counts
output = np.zeros(
(self.counts.shape[0], self.hpx._maxpix), self.counts.dtype)
for i in range(self.counts.shape[0]):
output[i][self.hpx._ipix] = self.counts[i]
return output | return the full counts map |
def output(self, resource):
"""Wrap a resource (as a flask view function).
This is for cases where the resource does not directly return
a response object. Now everything should be a Response object.
:param resource: The resource as a flask view function
"""
@wraps(resource)
def wrapper(*args, **kwargs):
rv = resource(*args, **kwargs)
rv = self.responder(rv)
return rv
return wrapper | Wrap a resource (as a flask view function).
This is for cases where the resource does not directly return
a response object. Now everything should be a Response object.
:param resource: The resource as a flask view function |
def record_launch(self, queue_id): # retcode):
"""Save submission"""
self.launches.append(
AttrDict(queue_id=queue_id, mpi_procs=self.mpi_procs, omp_threads=self.omp_threads,
mem_per_proc=self.mem_per_proc, timelimit=self.timelimit))
return len(self.launches) | Save submission |
def safe_print(msg):
"""
Safely print a given Unicode string to stdout,
possibly replacing characters non-printable
in the current stdout encoding.
:param string msg: the message
"""
try:
print(msg)
except UnicodeEncodeError:
try:
# NOTE encoding and decoding so that in Python 3 no b"..." is printed
encoded = msg.encode(sys.stdout.encoding, "replace")
decoded = encoded.decode(sys.stdout.encoding, "replace")
print(decoded)
except (UnicodeDecodeError, UnicodeEncodeError):
print(u"[ERRO] An unexpected error happened while printing to stdout.")
print(u"[ERRO] Please check that your file/string encoding matches the shell encoding.")
print(u"[ERRO] If possible, set your shell encoding to UTF-8 and convert any files with legacy encodings.") | Safely print a given Unicode string to stdout,
possibly replacing characters non-printable
in the current stdout encoding.
:param string msg: the message |
def data_size(self, live_data=None):
"""Uses `nodetool info` to get the size of a node's data in KB."""
if live_data is not None:
warnings.warn("The 'live_data' keyword argument is deprecated.",
DeprecationWarning)
output = self.nodetool('info')[0]
return _get_load_from_info_output(output) | Uses `nodetool info` to get the size of a node's data in KB. |
def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None,
exchange='TEST'):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
'symbol': symbols,
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'asset_name': list(names),
'exchange': exchange,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
) | Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets. |
def set_usage_rights_courses(self, file_ids, course_id, usage_rights_use_justification, folder_ids=None, publish=None, usage_rights_legal_copyright=None, usage_rights_license=None):
"""
Set usage rights.
Sets copyright and license information for one or more files
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - file_ids
"""List of ids of files to set usage rights for."""
data["file_ids"] = file_ids
# OPTIONAL - folder_ids
"""List of ids of folders to search for files to set usage rights for.
Note that new files uploaded to these folders do not automatically inherit these rights."""
if folder_ids is not None:
data["folder_ids"] = folder_ids
# OPTIONAL - publish
"""Whether the file(s) or folder(s) should be published on save, provided that usage rights have been specified (set to `true` to publish on save)."""
if publish is not None:
data["publish"] = publish
# REQUIRED - usage_rights[use_justification]
"""The intellectual property justification for using the files in Canvas"""
self._validate_enum(usage_rights_use_justification, ["own_copyright", "used_by_permission", "fair_use", "public_domain", "creative_commons"])
data["usage_rights[use_justification]"] = usage_rights_use_justification
# OPTIONAL - usage_rights[legal_copyright]
"""The legal copyright line for the files"""
if usage_rights_legal_copyright is not None:
data["usage_rights[legal_copyright]"] = usage_rights_legal_copyright
# OPTIONAL - usage_rights[license]
"""The license that applies to the files. See the {api:UsageRightsController#licenses List licenses endpoint} for the supported license types."""
if usage_rights_license is not None:
data["usage_rights[license]"] = usage_rights_license
self.logger.debug("PUT /api/v1/courses/{course_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/usage_rights".format(**path), data=data, params=params, single_item=True) | Set usage rights.
Sets copyright and license information for one or more files |
def tcounts(self):
"""
:return: a data frame containing the names and sizes for all tables
"""
df = pd.DataFrame([[t.name(), t.size()] for t in self.tables()], columns=["name", "size"])
df.index = df.name
return df | :return: a data frame containing the names and sizes for all tables |
def _npy2fits(d, table_type='binary', write_bitcols=False):
"""
d is the full element from the descr
"""
npy_dtype = d[1][1:]
if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
name, form, dim = _npy_string2fits(d, table_type=table_type)
else:
name, form, dim = _npy_num2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
return name, form, dim | d is the full element from the descr |
def delay_on(self):
"""
The `timer` trigger will periodically change the LED brightness between
0 and the current brightness setting. The `on` time can
be specified via `delay_on` attribute in milliseconds.
"""
# Workaround for ev3dev/ev3dev#225.
# 'delay_on' and 'delay_off' attributes are created when trigger is set
# to 'timer', and destroyed when it is set to anything else.
# This means the file cache may become outdated, and we may have to
# reopen the file.
for retry in (True, False):
try:
self._delay_on, value = self.get_attr_int(self._delay_on, 'delay_on')
return value
except OSError:
if retry:
self._delay_on = None
else:
raise | The `timer` trigger will periodically change the LED brightness between
0 and the current brightness setting. The `on` time can
be specified via `delay_on` attribute in milliseconds. |
def import_from_api(request):
"""
Import a part of a source site's page tree via a direct API request from
this Wagtail Admin to the source site
The source site's base url and the source page id of the point in the
tree to import defined what to import and the destination parent page
defines where to import it to.
"""
if request.method == 'POST':
form = ImportFromAPIForm(request.POST)
if form.is_valid():
# remove trailing slash from base url
base_url = re.sub(r'\/$', '', form.cleaned_data['source_site_base_url'])
import_url = (
base_url + reverse('wagtailimportexport:export', args=[form.cleaned_data['source_page_id']])
)
r = requests.get(import_url)
import_data = r.json()
parent_page = form.cleaned_data['parent_page']
try:
page_count = import_pages(import_data, parent_page)
except LookupError as e:
messages.error(request, _(
"Import failed: %(reason)s") % {'reason': e}
)
else:
messages.success(request, ungettext(
"%(count)s page imported.",
"%(count)s pages imported.",
page_count) % {'count': page_count}
)
return redirect('wagtailadmin_explore', parent_page.pk)
else:
form = ImportFromAPIForm()
return render(request, 'wagtailimportexport/import_from_api.html', {
'form': form,
}) | Import a part of a source site's page tree via a direct API request from
this Wagtail Admin to the source site
The source site's base url and the source page id of the point in the
tree to import defined what to import and the destination parent page
defines where to import it to. |
async def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return await self.execute_command('SETEX', name, time, value) | Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object. |
def copy(self):
""" Create a copy of the mapping, including formatting information """
dup = type(self)()
dup._indices = OrderedDict(
(k, list(v)) for k,v in six.iteritems(self._indices)
)
dup._lines = self._lines.copy()
return dup | Create a copy of the mapping, including formatting information |
def set_figure(self, figure, handle=None):
"""Call this with the Bokeh figure object."""
self.figure = figure
self.bkimage = None
self._push_handle = handle
wd = figure.plot_width
ht = figure.plot_height
self.configure_window(wd, ht)
doc = curdoc()
self.logger.info(str(dir(doc)))
#doc.add_periodic_callback(self.timer_cb, 100)
self.logger.info("figure set") | Call this with the Bokeh figure object. |
def get(key, default=None):
"""Retrieves env vars and makes Python boolean replacements"""
val = os.environ.get(key, default)
if val == 'True':
val = True
elif val == 'False':
val = False
return val | Retrieves env vars and makes Python boolean replacements |
def substitution(self, substitution):
"""Add substitutions to the email
:param value: Add substitutions to the email
:type value: Substitution, list(Substitution)
"""
if isinstance(substitution, list):
for s in substitution:
self.add_substitution(s)
else:
self.add_substitution(substitution) | Add substitutions to the email
:param value: Add substitutions to the email
:type value: Substitution, list(Substitution) |
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) | Verify that install_requires is a valid requirements list |
def _check_algorithm_values(item):
"""Check for misplaced inputs in the algorithms.
- Identify incorrect boolean values where a choice is required.
"""
problems = []
for k, v in item.get("algorithm", {}).items():
if v is True and k not in ALG_ALLOW_BOOLEANS:
problems.append("%s set as true" % k)
elif v is False and (k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE):
problems.append("%s set as false" % k)
if len(problems) > 0:
raise ValueError("Incorrect settings in 'algorithm' section for %s:\n%s"
"\nSee configuration documentation for supported options:\n%s\n"
% (item["description"], "\n".join(problems), ALG_DOC_URL)) | Check for misplaced inputs in the algorithms.
- Identify incorrect boolean values where a choice is required. |
def load(self, table_names=None, table_schemas=None, table_rowgens=None):
'''
Initiates the tables, schemas and record generators for this database.
Parameters
----------
table_names : list of str, str or None
List of tables to load into this database. If `auto_load` is true, inserting a record
into a new table not provided here will automatically create that table.
table_schemas : dict of <table_name, column_list> or None
Dictionary with each table name as a key and a list of its columns as value. Any keys
present here but not present in `table_names` will also trigger table creation, so
table names provided in both parameters are redundant but harmless.
table_rowgens: dict of <table_name, function> or None
For all tables present in the keys of the provided dictionary, when an insert operation
occurs, the corresponding function is called. The function must return a dictionary and
is used as a "base record" which is complemented by the actual record being inserted.
For example, when a table has a rowgen like `lambda: {"Timestamp": time.ctime()}` and
a record like `{"Name": "John"}` is inserted, the database will then contain a record
like `{"Timestamp": "Sun Jan 10 08:36:12 2016", "Name": "John"}`.
'''
# Check for table schemas
if table_schemas is not None:
table_schemas = self._check_case_dict(table_schemas, warn=True)
for schema_key, schema_value in table_schemas.items():
table_schemas[schema_key] = self._check_columns(schema_value, add_id=True)
elif not self.dynamic_schema:
raise ValueError('Table schemas must be provided if dynamic schema is disabled')
# Check for row generators
if table_rowgens is not None:
table_rowgens = self._check_case_dict(table_rowgens, warn=True)
# If table_names is not directly provided, infer it from one of the other parameters
if table_names is None:
if table_schemas is not None:
table_names = list(table_schemas.keys())
self._print(
'Inferring table name from table_schemas for tables %r'% table_names)
elif table_rowgens is not None:
table_names = list(table_rowgens.keys())
self._print(
'Inferring table name from table_rowgens for tables %r' % table_names)
else:
req_params = 'table_names,table_schemas,table_rowgens'
raise ValueError(
'At least one of the parameters must be provided: [%s]' % req_params)
table_names = self._check_table_names(table_names, warn=True)
self._print('Loading tables %r' % table_names)
# Update schemas and row generators without losing previous ones
for tname in table_names:
if table_schemas is not None and tname in table_schemas:
self._schemas[tname] = list(table_schemas[tname]) # make a copy
if table_rowgens is not None and tname in table_rowgens:
self._rowgens[tname] = table_rowgens[tname]
with self._lock:
for tname in table_names:
# Standardize case, since Windows paths are case insensitive
tname = self._check_case_str(tname, warn=True)
# CSV has same filename as table under database folder
tpath = os.path.join(self.root_dir, self.name, tname + '.csv')
# Table already exists, simply load it
if os.path.isfile(tpath):
if self.auto_load:
dataframe = read_csv(tpath, dtype=str)
self._db[tname] = dataframe
schema = self._check_columns(dataframe.columns.tolist())
self._schemas[tname] = schema
elif self.persistent:
raise ValueError(
'Auto load tables is disabled but table "%s" already exists and would'
'be overwritten' % tname)
# Table not found, try to create it using given schema
elif table_schemas is not None and tname in self._schemas:
self._db[tname] = DataFrame(columns=self._schemas[tname], dtype=str)
# Table not found, dynamic schema
elif self.dynamic_schema:
self._print('Creating table "%s" using dynamic schema' % tname)
self._db[tname] = DataFrame(columns=self._blank_schema, dtype=str)
self._schemas[tname] = list(self._blank_schema)
# Table not found and schema not given when dynamic_schema not enabled
else:
raise ValueError(
'Table %s not found and schema was not passed as a parameter' % tname) | Initiates the tables, schemas and record generators for this database.
Parameters
----------
table_names : list of str, str or None
List of tables to load into this database. If `auto_load` is true, inserting a record
into a new table not provided here will automatically create that table.
table_schemas : dict of <table_name, column_list> or None
Dictionary with each table name as a key and a list of its columns as value. Any keys
present here but not present in `table_names` will also trigger table creation, so
table names provided in both parameters are redundant but harmless.
table_rowgens: dict of <table_name, function> or None
For all tables present in the keys of the provided dictionary, when an insert operation
occurs, the corresponding function is called. The function must return a dictionary and
is used as a "base record" which is complemented by the actual record being inserted.
For example, when a table has a rowgen like `lambda: {"Timestamp": time.ctime()}` and
a record like `{"Name": "John"}` is inserted, the database will then contain a record
like `{"Timestamp": "Sun Jan 10 08:36:12 2016", "Name": "John"}`. |
def filter_reads(self, input_bam, output_bam, metrics_file, paired=False, cpus=16, Q=30):
"""
Remove duplicates, filter for >Q, remove multiple mapping reads.
For paired-end reads, keep only proper pairs.
"""
nodups = re.sub("\.bam$", "", output_bam) + ".nodups.nofilter.bam"
cmd1 = self.tools.sambamba + " markdup -t {0} -r --compression-level=0 {1} {2} 2> {3}".format(cpus, input_bam, nodups, metrics_file)
cmd2 = self.tools.sambamba + ' view -t {0} -f bam --valid'.format(cpus)
if paired:
cmd2 += ' -F "not (unmapped or mate_is_unmapped) and proper_pair'
else:
cmd2 += ' -F "not unmapped'
cmd2 += ' and not (secondary_alignment or supplementary) and mapping_quality >= {0}"'.format(Q)
cmd2 += ' {0} |'.format(nodups)
cmd2 += self.tools.sambamba + " sort -t {0} /dev/stdin -o {1}".format(cpus, output_bam)
cmd3 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups)
cmd4 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups + ".bai")
return [cmd1, cmd2, cmd3, cmd4] | Remove duplicates, filter for >Q, remove multiple mapping reads.
For paired-end reads, keep only proper pairs. |
def _from_dict(cls, _dict):
"""Initialize a TopHitsResults object from a json dictionary."""
args = {}
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
if 'hits' in _dict:
args['hits'] = [
QueryResult._from_dict(x) for x in (_dict.get('hits'))
]
return cls(**args) | Initialize a TopHitsResults object from a json dictionary. |
def call_rpc(*inputs, **kwargs):
"""Call an RPC based on the encoded value read from input b.
The response of the RPC must be a 4 byte value that is used as
the output of this call. The encoded RPC must be a 32 bit value
encoded as "BBH":
B: ignored, should be 0
B: the address of the tile that we should call
H: The id of the RPC to call
All other readings are then skipped so that there are no
readings in any input queue when this function returns
Returns:
list(IOTileReading)
"""
rpc_executor = kwargs['rpc_executor']
output = []
try:
value = inputs[1].pop()
addr = value.value >> 16
rpc_id = value.value & 0xFFFF
reading_value = rpc_executor.rpc(addr, rpc_id)
output.append(IOTileReading(0, 0, reading_value))
except (HardwareError, StreamEmptyError):
pass
for input_x in inputs:
input_x.skip_all()
return output | Call an RPC based on the encoded value read from input b.
The response of the RPC must be a 4 byte value that is used as
the output of this call. The encoded RPC must be a 32 bit value
encoded as "BBH":
B: ignored, should be 0
B: the address of the tile that we should call
H: The id of the RPC to call
All other readings are then skipped so that there are no
readings in any input queue when this function returns
Returns:
list(IOTileReading) |
def init(plugin_manager, _, _2, _3):
"""
Init the plugin.
Available configuration in configuration.yaml:
::
- plugin_module: "inginious.frontend.plugins.scoreboard"
Available configuration in course.yaml:
::
- scoreboard: #you can define multiple scoreboards
- content: "taskid1" #creates a scoreboard for taskid1
name: "Scoreboard task 1"
- content: ["taskid2", "taskid3"] #creates a scoreboard for taskid2 and taskid3 (sum of both score is taken as overall score)
name: "Scoreboard for task 2 and 3"
- content: {"taskid4": 2, "taskid5": 3} #creates a scoreboard where overall score is 2*score of taskid4 + 3*score of taskid5
name: "Another scoreboard"
reverse: True #reverse the score (less is better)
"""
page_pattern_course = r'/scoreboard/([a-z0-9A-Z\-_]+)'
page_pattern_scoreboard = r'/scoreboard/([a-z0-9A-Z\-_]+)/([0-9]+)'
plugin_manager.add_page(page_pattern_course, ScoreBoardCourse)
plugin_manager.add_page(page_pattern_scoreboard, ScoreBoard)
plugin_manager.add_hook('course_menu', course_menu)
plugin_manager.add_hook('task_menu', task_menu) | Init the plugin.
Available configuration in configuration.yaml:
::
- plugin_module: "inginious.frontend.plugins.scoreboard"
Available configuration in course.yaml:
::
- scoreboard: #you can define multiple scoreboards
- content: "taskid1" #creates a scoreboard for taskid1
name: "Scoreboard task 1"
- content: ["taskid2", "taskid3"] #creates a scoreboard for taskid2 and taskid3 (sum of both score is taken as overall score)
name: "Scoreboard for task 2 and 3"
- content: {"taskid4": 2, "taskid5": 3} #creates a scoreboard where overall score is 2*score of taskid4 + 3*score of taskid5
name: "Another scoreboard"
reverse: True #reverse the score (less is better) |
def cite(self, max_authors=5):
"""
Return string with a citation for the record, formatted as:
'{authors} ({year}). {title} {journal} {volume}({issue}): {pages}.'
"""
citation_data = {
'title': self.title,
'authors': self.authors_et_al(max_authors),
'year': self.year,
'journal': self.journal,
'volume': self.volume,
'issue': self.issue,
'pages': self.pages,
}
citation = "{authors} ({year}). {title} {journal}".format(
**citation_data)
if self.volume and self.issue and self.pages:
citation += " {volume}({issue}): {pages}.".format(**citation_data)
elif self.volume and self.issue:
citation += " {volume}({issue}).".format(**citation_data)
elif self.volume and self.pages:
citation += " {volume}: {pages}.".format(**citation_data)
elif self.volume:
citation += " {volume}.".format(**citation_data)
elif self.pages:
citation += " {pages}.".format(**citation_data)
else:
citation += "."
return citation | Return string with a citation for the record, formatted as:
'{authors} ({year}). {title} {journal} {volume}({issue}): {pages}.' |
def news(symbol, count=10, token='', version=''):
'''News about company
https://iexcloud.io/docs/api/#news
Continuous
Args:
symbol (string); Ticker to request
count (int): limit number of results
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
return _getJson('stock/' + symbol + '/news/last/' + str(count), token, version) | News about company
https://iexcloud.io/docs/api/#news
Continuous
Args:
symbol (string); Ticker to request
count (int): limit number of results
token (string); Access token
version (string); API version
Returns:
dict: result |
def plot(self, flip=False, ax_channels=None, ax=None, *args, **kwargs):
"""
{_gate_plot_doc}
"""
if ax == None:
ax = pl.gca()
kwargs.setdefault('color', 'black')
if ax_channels is not None:
flip = self._find_orientation(ax_channels)
if not flip:
a1 = ax.axes.axvline(self.vert[0], *args, **kwargs)
a2 = ax.axes.axhline(self.vert[1], *args, **kwargs)
else:
a1 = ax.axes.axvline(self.vert[1], *args, **kwargs)
a2 = ax.axes.axhline(self.vert[0], *args, **kwargs)
return (a1, a2) | {_gate_plot_doc} |
def pow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
return self._binary_op(
"pow", other, axis=axis, level=level, fill_value=fill_value
) | Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied. |
def from_string(cls, string):
"""
Parse ``string`` into a CPPType instance
"""
cls.TYPE.setParseAction(cls.make)
try:
return cls.TYPE.parseString(string, parseAll=True)[0]
except ParseException:
log.error("Failed to parse '{0}'".format(string))
raise | Parse ``string`` into a CPPType instance |
def cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-1 operation on real general matrix.
"""
status = _libcublas.cublasSger_v2(handle,
m, n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx,
int(y), incy, int(A), lda)
cublasCheckStatus(status) | Rank-1 operation on real general matrix. |
def set_quota_volume(name, path, size, enable_quota=False):
'''
Set quota to glusterfs volume.
name
Name of the gluster volume
path
Folder path for restriction in volume ("/")
size
Hard-limit size of the volume (MB/GB)
enable_quota
Enable quota before set up restriction
CLI Example:
.. code-block:: bash
salt '*' glusterfs.set_quota_volume <volume> <path> <size> enable_quota=True
'''
cmd = 'volume quota {0}'.format(name)
if path:
cmd += ' limit-usage {0}'.format(path)
if size:
cmd += ' {0}'.format(size)
if enable_quota:
if not enable_quota_volume(name):
pass
if not _gluster(cmd):
return False
return True | Set quota to glusterfs volume.
name
Name of the gluster volume
path
Folder path for restriction in volume ("/")
size
Hard-limit size of the volume (MB/GB)
enable_quota
Enable quota before set up restriction
CLI Example:
.. code-block:: bash
salt '*' glusterfs.set_quota_volume <volume> <path> <size> enable_quota=True |
def follow_bytes(self, s, index):
"Follows transitions."
for ch in s:
index = self.follow_char(int_from_byte(ch), index)
if index is None:
return None
return index | Follows transitions. |
def removeTab(self, index):
"""
Removes the tab at the inputed index.
:param index | <int>
"""
curr_index = self.currentIndex()
items = list(self.items())
item = items[index]
item.close()
if index <= curr_index:
self._currentIndex -= 1 | Removes the tab at the inputed index.
:param index | <int> |
def simxStart(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(connectionAddress) is str):
connectionAddress=connectionAddress.encode('utf-8')
return c_Start(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs) | Please have a look at the function description/documentation in the V-REP user manual |
def hasReaders(self, ulBuffer):
"""inexpensively checks for readers to allow writers to fast-fail potentially expensive copies and writes."""
fn = self.function_table.hasReaders
result = fn(ulBuffer)
return result | inexpensively checks for readers to allow writers to fast-fail potentially expensive copies and writes. |
def apply_mask(self, x=None):
'''
Returns the outlier mask, an array of indices corresponding to the
non-outliers.
:param numpy.ndarray x: If specified, returns the masked version of \
:py:obj:`x` instead. Default :py:obj:`None`
'''
if x is None:
return np.delete(np.arange(len(self.time)), self.mask)
else:
return np.delete(x, self.mask, axis=0) | Returns the outlier mask, an array of indices corresponding to the
non-outliers.
:param numpy.ndarray x: If specified, returns the masked version of \
:py:obj:`x` instead. Default :py:obj:`None` |
def command_x(self, x, to=None):
"""
Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command.
"""
if to is None:
ActionChains(self.driver) \
.send_keys([Keys.COMMAND, x, Keys.COMMAND]) \
.perform()
else:
self.send_keys(to, [Keys.COMMAND, x, Keys.COMMAND]) | Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command. |
def crop_frequencies(self, low=None, high=None, copy=False):
"""Crop this `Spectrogram` to the specified frequencies
Parameters
----------
low : `float`
lower frequency bound for cropped `Spectrogram`
high : `float`
upper frequency bound for cropped `Spectrogram`
copy : `bool`
if `False` return a view of the original data, otherwise create
a fresh memory copy
Returns
-------
spec : `Spectrogram`
A new `Spectrogram` with a subset of data from the frequency
axis
"""
if low is not None:
low = units.Quantity(low, self._default_yunit)
if high is not None:
high = units.Quantity(high, self._default_yunit)
# check low frequency
if low is not None and low == self.f0:
low = None
elif low is not None and low < self.f0:
warnings.warn('Spectrogram.crop_frequencies given low frequency '
'cutoff below f0 of the input Spectrogram. Low '
'frequency crop will have no effect.')
# check high frequency
if high is not None and high.value == self.band[1]:
high = None
elif high is not None and high.value > self.band[1]:
warnings.warn('Spectrogram.crop_frequencies given high frequency '
'cutoff above cutoff of the input Spectrogram. High '
'frequency crop will have no effect.')
# find low index
if low is None:
idx0 = None
else:
idx0 = int(float(low.value - self.f0.value) // self.df.value)
# find high index
if high is None:
idx1 = None
else:
idx1 = int(float(high.value - self.f0.value) // self.df.value)
# crop
if copy:
return self[:, idx0:idx1].copy()
return self[:, idx0:idx1] | Crop this `Spectrogram` to the specified frequencies
Parameters
----------
low : `float`
lower frequency bound for cropped `Spectrogram`
high : `float`
upper frequency bound for cropped `Spectrogram`
copy : `bool`
if `False` return a view of the original data, otherwise create
a fresh memory copy
Returns
-------
spec : `Spectrogram`
A new `Spectrogram` with a subset of data from the frequency
axis |
def format_choices(self):
"""Return the choices in string form."""
ce = enumerate(self.choices)
f = lambda i, c: '%s (%d)' % (c, i+1)
# apply formatter and append help token
toks = [f(i,c) for i, c in ce] + ['Help (?)']
return ' '.join(toks) | Return the choices in string form. |
def set_current_thumbnail(self, thumbnail):
"""Set the currently selected thumbnail."""
self.current_thumbnail = thumbnail
self.figure_viewer.load_figure(
thumbnail.canvas.fig, thumbnail.canvas.fmt)
for thumbnail in self._thumbnails:
thumbnail.highlight_canvas(thumbnail == self.current_thumbnail) | Set the currently selected thumbnail. |
def text_entry(self):
""" Relay literal text entry from user to Roku until
<Enter> or <Esc> pressed. """
allowed_sequences = set(['KEY_ENTER', 'KEY_ESCAPE', 'KEY_DELETE'])
sys.stdout.write('Enter text (<Esc> to abort) : ')
sys.stdout.flush()
# Track start column to ensure user doesn't backspace too far
start_column = self.term.get_location()[1]
cur_column = start_column
with self.term.cbreak():
val = ''
while val != 'KEY_ENTER' and val != 'KEY_ESCAPE':
val = self.term.inkey()
if not val:
continue
elif val.is_sequence:
val = val.name
if val not in allowed_sequences:
continue
if val == 'KEY_ENTER':
self.roku.enter()
elif val == 'KEY_ESCAPE':
pass
elif val == 'KEY_DELETE':
self.roku.backspace()
if cur_column > start_column:
sys.stdout.write(u'\b \b')
cur_column -= 1
else:
self.roku.literal(val)
sys.stdout.write(val)
cur_column += 1
sys.stdout.flush()
# Clear to beginning of line
sys.stdout.write(self.term.clear_bol)
sys.stdout.write(self.term.move(self.term.height, 0))
sys.stdout.flush() | Relay literal text entry from user to Roku until
<Enter> or <Esc> pressed. |
def get_dict(self):
"""
Returns a dict containing the host's attributes. The following
keys are contained:
- hostname
- address
- protocol
- port
:rtype: dict
:return: The resulting dictionary.
"""
return {'hostname': self.get_name(),
'address': self.get_address(),
'protocol': self.get_protocol(),
'port': self.get_tcp_port()} | Returns a dict containing the host's attributes. The following
keys are contained:
- hostname
- address
- protocol
- port
:rtype: dict
:return: The resulting dictionary. |
def clear_graph(identifier=None):
""" Clean up a graph by removing it
:param identifier: Root identifier of the graph
:return:
"""
graph = get_graph()
if identifier:
graph.destroy(identifier)
try:
graph.close()
except:
warn("Unable to close the Graph") | Clean up a graph by removing it
:param identifier: Root identifier of the graph
:return: |
def local_global_attention(x,
self_attention_bias,
hparams,
q_padding="LEFT",
kv_padding="LEFT"):
"""Local and global 1d self attention."""
with tf.variable_scope("self_local_global_att"):
[x_global, x_local] = tf.split(x, 2, axis=-1)
split_hidden_size = int(hparams.hidden_size / 2)
split_heads = int(hparams.num_heads / 2)
if self_attention_bias is not None:
self_attention_bias = get_self_attention_bias(x)
y_global = common_attention.multihead_attention(
x_global,
None,
self_attention_bias,
hparams.attention_key_channels or split_hidden_size,
hparams.attention_value_channels or split_hidden_size,
split_hidden_size,
split_heads,
hparams.attention_dropout,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
q_padding=q_padding,
kv_padding=kv_padding,
name="global_self_att")
y_local = common_attention.multihead_attention(
x_local,
None,
None,
hparams.attention_key_channels or split_hidden_size,
hparams.attention_value_channels or split_hidden_size,
split_hidden_size,
split_heads,
hparams.attention_dropout,
attention_type="local_masked",
block_length=hparams.block_length,
block_width=hparams.block_width,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
q_padding=q_padding,
kv_padding=kv_padding,
name="local_self_att")
y = tf.concat([y_global, y_local], axis=-1)
return y | Local and global 1d self attention. |
def compute(self, inputs, outputs):
"""
Run one iteration of TM's compute.
"""
# Handle reset first (should be sent with an empty signal)
if "resetIn" in inputs:
assert len(inputs["resetIn"]) == 1
if inputs["resetIn"][0] != 0:
# send empty output
self._tm.reset()
outputs["activeCells"][:] = 0
outputs["nextPredictedCells"][:] = 0
outputs["predictedActiveCells"][:] = 0
outputs["winnerCells"][:] = 0
return
activeColumns = inputs["activeColumns"].nonzero()[0]
if "apicalInput" in inputs:
apicalInput = inputs["apicalInput"].nonzero()[0]
else:
apicalInput = np.empty(0, dtype="uint32")
if "apicalGrowthCandidates" in inputs:
apicalGrowthCandidates = inputs["apicalGrowthCandidates"].nonzero()[0]
else:
apicalGrowthCandidates = apicalInput
self._tm.compute(activeColumns, apicalInput, apicalGrowthCandidates,
self.learn)
# Extract the active / predicted cells and put them into binary arrays.
outputs["activeCells"][:] = 0
outputs["activeCells"][self._tm.getActiveCells()] = 1
outputs["nextPredictedCells"][:] = 0
outputs["nextPredictedCells"][
self._tm.getNextPredictedCells()] = 1
outputs["predictedActiveCells"][:] = 0
outputs["predictedActiveCells"][
self._tm.getPredictedActiveCells()] = 1
outputs["winnerCells"][:] = 0
outputs["winnerCells"][self._tm.getWinnerCells()] = 1 | Run one iteration of TM's compute. |
def root_manifest_id(self, root_manifest_id):
"""
Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str
"""
if root_manifest_id is not None and len(root_manifest_id) > 32:
raise ValueError("Invalid value for `root_manifest_id`, length must be less than or equal to `32`")
self._root_manifest_id = root_manifest_id | Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str |
def list_namespaced_stateful_set(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_stateful_set # noqa: E501
list or watch objects of kind StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_stateful_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs) # noqa: E501
return data | list_namespaced_stateful_set # noqa: E501
list or watch objects of kind StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_stateful_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread. |
def create_wsgi_request(event, server_name='apigw'):
"""Create a wsgi environment from an apigw request.
"""
path = urllib.url2pathname(event['path'])
script_name = (
event['headers']['Host'].endswith('.amazonaws.com') and
event['requestContext']['stage'] or '').encode('utf8')
query = event['queryStringParameters']
query_string = query and urllib.urlencode(query) or ""
body = event['body'] and event['body'].encode('utf8') or ''
environ = {
'HTTPS': 'on',
'PATH_INFO': path.encode('utf8'),
'QUERY_STRING': query_string.encode('utf8'),
'REMOTE_ADDR': event[
'requestContext']['identity']['sourceIp'].encode('utf8'),
'REQUEST_METHOD': event['httpMethod'].encode('utf8'),
'SCRIPT_NAME': script_name,
'SERVER_NAME': server_name.encode('utf8'),
'SERVER_PORT': '80'.encode('utf8'),
'SERVER_PROTOCOL': u'HTTP/1.1'.encode('utf8'),
'wsgi.errors': sys.stderr,
'wsgi.input': StringIO(body),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': u'https'.encode('utf8'),
'wsgi.version': (1, 0),
}
headers = event['headers']
# Input processing
if event['httpMethod'] in ("POST", "PUT", "PATCH"):
if 'Content-Type' in headers:
environ['CONTENT_TYPE'] = headers['Content-Type']
environ['CONTENT_LENGTH'] = str(len(body))
for header in list(event['headers'].keys()):
wsgi_name = "HTTP_" + header.upper().replace('-', '_')
environ[wsgi_name] = headers[header].encode('utf8')
if script_name:
path_info = environ['PATH_INFO']
if script_name in path_info:
environ['PATH_INFO'].replace(script_name, '')
# Extract remote user from event
remote_user = None
if event['requestContext'].get('authorizer'):
remote_user = event[
'requestContext']['authorizer'].get('principalId')
elif event['requestContext'].get('identity'):
remote_user = event['requestContext']['identity'].get('userArn')
if remote_user:
environ['REMOTE_USER'] = remote_user
# apigw aware integrations
environ['apigw.request'] = event['requestContext']
environ['apigw.stagevars'] = event['stageVariables']
return environ | Create a wsgi environment from an apigw request. |
def flatten(text):
"""
Flatten the text:
* make sure each record is on one line.
* remove parenthesis
"""
lines = text.split("\n")
# tokens: sequence of non-whitespace separated by '' where a newline was
tokens = []
for l in lines:
if len(l) == 0:
continue
l = l.replace("\t", " ")
tokens += filter(lambda x: len(x) > 0, l.split(" ")) + ['']
# find (...) and turn it into a single line ("capture" it)
capturing = False
captured = []
flattened = []
while len(tokens) > 0:
tok = tokens.pop(0)
if not capturing and len(tok) == 0:
# normal end-of-line
if len(captured) > 0:
flattened.append(" ".join(captured))
captured = []
continue
if tok.startswith("("):
# begin grouping
tok = tok.lstrip("(")
capturing = True
if capturing and tok.endswith(")"):
# end grouping. next end-of-line will turn this sequence into a flat line
tok = tok.rstrip(")")
capturing = False
captured.append(tok)
return "\n".join(flattened) | Flatten the text:
* make sure each record is on one line.
* remove parenthesis |
def wishart_pairwise_pvals(self, axis=0):
"""Return square symmetric matrix of pairwise column-comparison p-values.
Square, symmetric matrix along *axis* of pairwise p-values for the
null hypothesis that col[i] = col[j] for each pair of columns.
*axis* (int): axis along which to perform comparison. Only columns (0)
are implemented currently.
"""
if axis != 0:
raise NotImplementedError("Pairwise comparison only implemented for colums")
return WishartPairwiseSignificance.pvals(self, axis=axis) | Return square symmetric matrix of pairwise column-comparison p-values.
Square, symmetric matrix along *axis* of pairwise p-values for the
null hypothesis that col[i] = col[j] for each pair of columns.
*axis* (int): axis along which to perform comparison. Only columns (0)
are implemented currently. |
def parse_binary_descriptor(bindata):
"""Convert a binary node descriptor into a string descriptor.
Binary node descriptor are 20-byte binary structures that encode all
information needed to create a graph node. They are used to communicate
that information to an embedded device in an efficent format. This
function exists to turn such a compressed node description back into
an understandable string.
Args:
bindata (bytes): The raw binary structure that contains the node
description.
Returns:
str: The corresponding string description of the same sensor_graph node
"""
func_names = {0: 'copy_latest_a', 1: 'average_a',
2: 'copy_all_a', 3: 'sum_a',
4: 'copy_count_a', 5: 'trigger_streamer',
6: 'call_rpc', 7: 'subtract_afromb'}
if len(bindata) != 20:
raise ArgumentError("Invalid binary node descriptor with incorrect size", size=len(bindata), expected=20, bindata=bindata)
a_trig, b_trig, stream_id, a_id, b_id, proc, a_cond, b_cond, trig_combiner = struct.unpack("<LLHHHBBBB2x", bindata)
node_stream = DataStream.FromEncoded(stream_id)
if a_id == 0xFFFF:
raise ArgumentError("Invalid binary node descriptor with invalid first input", input_selector=a_id)
a_selector = DataStreamSelector.FromEncoded(a_id)
a_trigger = _process_binary_trigger(a_trig, a_cond)
b_selector = None
b_trigger = None
if b_id != 0xFFFF:
b_selector = DataStreamSelector.FromEncoded(b_id)
b_trigger = _process_binary_trigger(b_trig, b_cond)
if trig_combiner == SGNode.AndTriggerCombiner:
comb = '&&'
elif trig_combiner == SGNode.OrTriggerCombiner:
comb = '||'
else:
raise ArgumentError("Invalid trigger combiner in binary node descriptor", combiner=trig_combiner)
if proc not in func_names:
raise ArgumentError("Unknown processing function", function_id=proc, known_functions=func_names)
func_name = func_names[proc]
# Handle one input nodes
if b_selector is None:
return '({} {}) => {} using {}'.format(a_selector, a_trigger, node_stream, func_name)
return '({} {} {} {} {}) => {} using {}'.format(a_selector, a_trigger, comb,
b_selector, b_trigger,
node_stream, func_name) | Convert a binary node descriptor into a string descriptor.
Binary node descriptor are 20-byte binary structures that encode all
information needed to create a graph node. They are used to communicate
that information to an embedded device in an efficent format. This
function exists to turn such a compressed node description back into
an understandable string.
Args:
bindata (bytes): The raw binary structure that contains the node
description.
Returns:
str: The corresponding string description of the same sensor_graph node |
def split_header(fp):
"""
Read file pointer and return pair of lines lists:
first - header, second - the rest.
"""
body_start, header_ended = 0, False
lines = []
for line in fp:
if line.startswith('#') and not header_ended:
# Header text
body_start += 1
else:
header_ended = True
lines.append(line)
return lines[:body_start], lines[body_start:] | Read file pointer and return pair of lines lists:
first - header, second - the rest. |
def moment_sequence(self):
r"""
Create a generator to calculate the population mean and
variance-convariance matrix for both :math:`x_t` and :math:`y_t`
starting at the initial condition (self.mu_0, self.Sigma_0).
Each iteration produces a 4-tuple of items (mu_x, mu_y, Sigma_x,
Sigma_y) for the next period.
Yields
------
mu_x : array_like(float)
An n x 1 array representing the population mean of x_t
mu_y : array_like(float)
A k x 1 array representing the population mean of y_t
Sigma_x : array_like(float)
An n x n array representing the variance-covariance matrix
of x_t
Sigma_y : array_like(float)
A k x k array representing the variance-covariance matrix
of y_t
"""
# == Simplify names == #
A, C, G, H = self.A, self.C, self.G, self.H
# == Initial moments == #
mu_x, Sigma_x = self.mu_0, self.Sigma_0
while 1:
mu_y = G.dot(mu_x)
if H is None:
Sigma_y = G.dot(Sigma_x).dot(G.T)
else:
Sigma_y = G.dot(Sigma_x).dot(G.T) + H.dot(H.T)
yield mu_x, mu_y, Sigma_x, Sigma_y
# == Update moments of x == #
mu_x = A.dot(mu_x)
Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T) | r"""
Create a generator to calculate the population mean and
variance-convariance matrix for both :math:`x_t` and :math:`y_t`
starting at the initial condition (self.mu_0, self.Sigma_0).
Each iteration produces a 4-tuple of items (mu_x, mu_y, Sigma_x,
Sigma_y) for the next period.
Yields
------
mu_x : array_like(float)
An n x 1 array representing the population mean of x_t
mu_y : array_like(float)
A k x 1 array representing the population mean of y_t
Sigma_x : array_like(float)
An n x n array representing the variance-covariance matrix
of x_t
Sigma_y : array_like(float)
A k x k array representing the variance-covariance matrix
of y_t |
def example_yaml(cls, skip=()):
"""
Generate an example yaml string for a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance.
"""
return cls.example_instance(skip=skip).to_yaml(skip=skip) | Generate an example yaml string for a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance. |
def get_available_tokens(self, count=10, token_length=15, **kwargs):
"""Gets a list of available tokens.
:param count: the number of tokens to return.
:param token_length: the length of the tokens. The higher the number
the easier it will be to return a list. If token_length == 1
there's a strong probability that the enough tokens will exist in
the db.
"""
# This is the number of extra tokens to try and retrieve so calls to
# the db can be limited
token_buffer = int(math.ceil(count * .05))
if token_buffer < 5:
token_buffer = 5
available = set([])
while True:
tokens = [random_alphanum(length=token_length)
for t in range(count + token_buffer)]
db_tokens = self.filter(token__in=tokens).values_list('token',
flat=True)
available.update(set(tokens).difference(db_tokens))
if len(available) >= count:
return list(available)[:count] | Gets a list of available tokens.
:param count: the number of tokens to return.
:param token_length: the length of the tokens. The higher the number
the easier it will be to return a list. If token_length == 1
there's a strong probability that the enough tokens will exist in
the db. |
def _bulk_to_linear(M, N, L, qubits):
"Converts a list of chimera coordinates to linear indices."
return [2 * L * N * x + 2 * L * y + L * u + k for x, y, u, k in qubits] | Converts a list of chimera coordinates to linear indices. |
def _get_user_agent():
"""Construct the user-agent header with the package info,
Python version and OS version.
Returns:
The user agent string.
e.g. 'Python/3.6.7 slack/2.0.0 Darwin/17.7.0'
"""
# __name__ returns all classes, we only want the client
client = "{0}/{1}".format(__name__.split(".")[0], ver.__version__)
python_version = "Python/{v.major}.{v.minor}.{v.micro}".format(
v=sys.version_info
)
system_info = "{0}/{1}".format(platform.system(), platform.release())
user_agent_string = " ".join([python_version, client, system_info])
return user_agent_string | Construct the user-agent header with the package info,
Python version and OS version.
Returns:
The user agent string.
e.g. 'Python/3.6.7 slack/2.0.0 Darwin/17.7.0' |
def channels_set_topic(self, room_id, topic, **kwargs):
"""Sets the topic for the channel."""
return self.__call_api_post('channels.setTopic', roomId=room_id, topic=topic, kwargs=kwargs) | Sets the topic for the channel. |
def dateindex(self, col: str):
"""
Set a datetime index from a column
:param col: column name where to index the date from
:type col: str
:example: ``ds.dateindex("mycol")``
"""
df = self._dateindex(col)
if df is None:
self.err("Can not create date index")
return
self.df = df
self.ok("Added a datetime index from column", col) | Set a datetime index from a column
:param col: column name where to index the date from
:type col: str
:example: ``ds.dateindex("mycol")`` |
def _parse_textgroup_wrapper(self, cts_file):
""" Wraps with a Try/Except the textgroup parsing from a cts file
:param cts_file: Path to the CTS File
:type cts_file: str
:return: CtsTextgroupMetadata
"""
try:
return self._parse_textgroup(cts_file)
except Exception as E:
self.logger.error("Error parsing %s ", cts_file)
if self.RAISE_ON_GENERIC_PARSING_ERROR:
raise E | Wraps with a Try/Except the textgroup parsing from a cts file
:param cts_file: Path to the CTS File
:type cts_file: str
:return: CtsTextgroupMetadata |
def _gradient_penalty(self, real_samples, fake_samples, kwargs):
"""
Compute the norm of the gradients for each sample in a batch, and
penalize anything on either side of unit norm
"""
import torch
from torch.autograd import Variable, grad
real_samples = real_samples.view(fake_samples.shape)
subset_size = real_samples.shape[0]
real_samples = real_samples[:subset_size]
fake_samples = fake_samples[:subset_size]
alpha = torch.rand(subset_size)
if self.use_cuda:
alpha = alpha.cuda()
alpha = alpha.view((-1,) + ((1,) * (real_samples.dim() - 1)))
interpolates = alpha * real_samples + ((1 - alpha) * fake_samples)
interpolates = Variable(interpolates, requires_grad=True)
if self.use_cuda:
interpolates = interpolates.cuda()
d_output = self.critic(interpolates, **kwargs)
grad_ouputs = torch.ones(d_output.size())
if self.use_cuda:
grad_ouputs = grad_ouputs.cuda()
gradients = grad(
outputs=d_output,
inputs=interpolates,
grad_outputs=grad_ouputs,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
return ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10 | Compute the norm of the gradients for each sample in a batch, and
penalize anything on either side of unit norm |
def min(self):
"""
:returns the minimum of the column
"""
res = self._qexec("min(%s)" % self._name)
if len(res) > 0:
self._min = res[0][0]
return self._min | :returns the minimum of the column |
def unit(w, sparsity):
"""Unit-level magnitude pruning."""
w_shape = common_layers.shape_list(w)
count = tf.to_int32(w_shape[-1] * sparsity)
mask = common_layers.unit_targeting(w, count)
return (1 - mask) * w | Unit-level magnitude pruning. |
def new(image):
"""Make a region on an image.
Returns:
A new :class:`.Region`.
Raises:
:class:`.Error`
"""
pointer = vips_lib.vips_region_new(image.pointer)
if pointer == ffi.NULL:
raise Error('unable to make region')
return pyvips.Region(pointer) | Make a region on an image.
Returns:
A new :class:`.Region`.
Raises:
:class:`.Error` |
def pipeline_launchpad(job, fastqs, univ_options, tool_options):
"""
The precision immuno pipeline begins at this module. The DAG can be viewed in Flowchart.txt
This module corresponds to node 0 on the tree
"""
# Add Patient id to univ_options as is is passed to every major node in the DAG and can be used
# as a prefix for the logfile.
univ_options['patient'] = fastqs['patient_id']
# Ascertain the number of available CPUs. Jobs will be given fractions of this value.
ncpu = cpu_count()
tool_options['star']['n'] = tool_options['bwa']['n'] = tool_options['phlat']['n'] = \
tool_options['rsem']['n'] = ncpu / 3
# Define the various nodes in the DAG
# Need a logfile and a way to send it around
sample_prep = job.wrapJobFn(prepare_samples, fastqs, univ_options, disk='140G')
cutadapt = job.wrapJobFn(run_cutadapt, sample_prep.rv(), univ_options, tool_options['cutadapt'],
cores=1, disk='80G')
star = job.wrapJobFn(run_star, cutadapt.rv(), univ_options, tool_options['star'],
cores=tool_options['star']['n'], memory='40G', disk='120G').encapsulate()
bwa_tumor = job.wrapJobFn(run_bwa, sample_prep.rv(), 'tumor_dna', univ_options,
tool_options['bwa'], cores=tool_options['bwa']['n'],
disk='120G').encapsulate()
bwa_normal = job.wrapJobFn(run_bwa, sample_prep.rv(), 'normal_dna', univ_options,
tool_options['bwa'], cores=tool_options['bwa']['n'],
disk='120G').encapsulate()
phlat_tumor_dna = job.wrapJobFn(run_phlat, sample_prep.rv(), 'tumor_dna', univ_options,
tool_options['phlat'], cores=tool_options['phlat']['n'],
disk='60G')
phlat_normal_dna = job.wrapJobFn(run_phlat, sample_prep.rv(), 'normal_dna', univ_options,
tool_options['phlat'], cores=tool_options['phlat']['n'],
disk='60G')
phlat_tumor_rna = job.wrapJobFn(run_phlat, sample_prep.rv(), 'tumor_rna', univ_options,
tool_options['phlat'], cores=tool_options['phlat']['n'],
disk='60G')
fastq_deletion = job.wrapJobFn(delete_fastqs, sample_prep.rv())
rsem = job.wrapJobFn(run_rsem, star.rv(), univ_options, tool_options['rsem'],
cores=tool_options['rsem']['n'], disk='80G')
mhc_pathway_assessment = job.wrapJobFn(assess_mhc_genes, rsem.rv(), phlat_tumor_rna.rv(),
univ_options, tool_options['mhc_pathway_assessment'])
fusions = job.wrapJobFn(run_fusion_caller, star.rv(), univ_options, 'fusion_options')
Sradia = job.wrapJobFn(spawn_radia, star.rv(), bwa_tumor.rv(),
bwa_normal.rv(), univ_options, tool_options['mut_callers']).encapsulate()
Mradia = job.wrapJobFn(merge_radia, Sradia.rv())
Smutect = job.wrapJobFn(spawn_mutect, bwa_tumor.rv(), bwa_normal.rv(), univ_options,
tool_options['mut_callers']).encapsulate()
Mmutect = job.wrapJobFn(merge_mutect, Smutect.rv())
indels = job.wrapJobFn(run_indel_caller, bwa_tumor.rv(), bwa_normal.rv(), univ_options,
'indel_options')
merge_mutations = job.wrapJobFn(run_mutation_aggregator, fusions.rv(), Mradia.rv(),
Mmutect.rv(), indels.rv(), univ_options)
snpeff = job.wrapJobFn(run_snpeff, merge_mutations.rv(), univ_options, tool_options['snpeff'],
disk='30G')
transgene = job.wrapJobFn(run_transgene, snpeff.rv(), univ_options, tool_options['transgene'],
disk='5G')
merge_phlat = job.wrapJobFn(merge_phlat_calls, phlat_tumor_dna.rv(), phlat_normal_dna.rv(),
phlat_tumor_rna.rv(), disk='5G')
spawn_mhc = job.wrapJobFn(spawn_antigen_predictors, transgene.rv(), merge_phlat.rv(),
univ_options, (tool_options['mhci'],
tool_options['mhcii'])).encapsulate()
merge_mhc = job.wrapJobFn(merge_mhc_peptide_calls, spawn_mhc.rv(), transgene.rv(), disk='5G')
rank_boost = job.wrapJobFn(boost_ranks, rsem.rv(), merge_mhc.rv(), transgene.rv(), univ_options,
tool_options['rank_boost'], disk='5G')
# Define the DAG in a static form
job.addChild(sample_prep) # Edge 0->1
# A. The first step is running the alignments and the MHC haplotypers
sample_prep.addChild(cutadapt) # Edge 1->2
sample_prep.addChild(bwa_tumor) # Edge 1->3
sample_prep.addChild(bwa_normal) # Edge 1->4
sample_prep.addChild(phlat_tumor_dna) # Edge 1->5
sample_prep.addChild(phlat_normal_dna) # Edge 1->6
sample_prep.addChild(phlat_tumor_rna) # Edge 1->7
# B. cutadapt will be followed by star
cutadapt.addChild(star) # Edge 2->9
# Ci. gene expression and fusion detection follow start alignment
star.addChild(rsem) # Edge 9->10
star.addChild(fusions) # Edge 9->11
# Cii. Radia depends on all 3 alignments
star.addChild(Sradia) # Edge 9->12
bwa_tumor.addChild(Sradia) # Edge 3->12
bwa_normal.addChild(Sradia) # Edge 4->12
# Ciii. mutect and indel calling depends on dna to have been aligned
bwa_tumor.addChild(Smutect) # Edge 3->13
bwa_normal.addChild(Smutect) # Edge 4->13
bwa_tumor.addChild(indels) # Edge 3->14
bwa_normal.addChild(indels) # Edge 4->14
# D. MHC haplotypes will be merged once all 3 samples have been PHLAT-ed
phlat_tumor_dna.addChild(merge_phlat) # Edge 5->15
phlat_normal_dna.addChild(merge_phlat) # Edge 6->15
phlat_tumor_rna.addChild(merge_phlat) # Edge 7->15
# E. Delete the fastqs from the job store since all alignments are complete
sample_prep.addChild(fastq_deletion) # Edge 1->8
cutadapt.addChild(fastq_deletion) # Edge 2->8
bwa_normal.addChild(fastq_deletion) # Edge 3->8
bwa_tumor.addChild(fastq_deletion) # Edge 4->8
phlat_normal_dna.addChild(fastq_deletion) # Edge 5->8
phlat_tumor_dna.addChild(fastq_deletion) # Edge 6>8
phlat_tumor_rna.addChild(fastq_deletion) # Edge 7->8
# F. Mutation calls need to be merged before they can be used
Sradia.addChild(Mradia) # Edge 12->16
Smutect.addChild(Mmutect) # Edge 13->17
# G. All mutations get aggregated when they have finished running
fusions.addChild(merge_mutations) # Edge 11->18
Mradia.addChild(merge_mutations) # Edge 16->18
Mmutect.addChild(merge_mutations) # Edge 17->18
indels.addChild(merge_mutations) # Edge 14->18
# H. Aggregated mutations will be translated to protein space
merge_mutations.addChild(snpeff) # Edge 18->19
# I. snpeffed mutations will be converted into peptides
snpeff.addChild(transgene) # Edge 19->20
# J. Merged haplotypes and peptides will be converted into jobs and submitted for mhc:peptide
# binding prediction
merge_phlat.addChild(spawn_mhc) # Edge 15->21
transgene.addChild(spawn_mhc) # Edge 20->21
# K. The results from all the predictions will be merged. This is a follow-on job because
# spawn_mhc will spawn an undetermined number of children.
spawn_mhc.addFollowOn(merge_mhc) # Edges 21->XX->22 and 21->YY->22
# L. Finally, the merged mhc along with the gene expression will be used for rank boosting
rsem.addChild(rank_boost) # Edge 10->23
merge_mhc.addChild(rank_boost) # Edge 22->23
# M. Assess the status of the MHC genes in the patient
phlat_tumor_rna.addChild(mhc_pathway_assessment) # Edge 7->24
rsem.addChild(mhc_pathway_assessment) # Edge 10->24
return None | The precision immuno pipeline begins at this module. The DAG can be viewed in Flowchart.txt
This module corresponds to node 0 on the tree |
def todict(self, exclude_cache=False):
'''Return a dictionary of serialised scalar field for pickling.
If the *exclude_cache* flag is ``True``, fields with :attr:`Field.as_cache`
attribute set to ``True`` will be excluded.'''
odict = {}
for field, value in self.fieldvalue_pairs(exclude_cache=exclude_cache):
value = field.serialise(value)
if value:
odict[field.name] = value
if self._dbdata and 'id' in self._dbdata:
odict['__dbdata__'] = {'id': self._dbdata['id']}
return odict | Return a dictionary of serialised scalar field for pickling.
If the *exclude_cache* flag is ``True``, fields with :attr:`Field.as_cache`
attribute set to ``True`` will be excluded. |
def parse_xmlsec_output(output):
""" Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False
"""
for line in output.splitlines():
if line == 'OK':
return True
elif line == 'FAIL':
raise XmlsecError(output)
raise XmlsecError(output) | Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.