code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path | Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order |
def topological_sort(bpmn_graph, nodes_with_classification):
"""
:return:
"""
node_param_name = "node"
classification_param_name = "classification"
tmp_nodes_with_classification = copy.deepcopy(nodes_with_classification)
sorted_nodes_with_classification = []
no_incoming_flow_nodes = []
backward_flows = []
while tmp_nodes_with_classification:
for node_with_classification in tmp_nodes_with_classification:
incoming_list = node_with_classification[node_param_name][1][consts.Consts.incoming_flow]
if len(incoming_list) == 0:
no_incoming_flow_nodes.append(node_with_classification)
if len(no_incoming_flow_nodes) > 0:
while len(no_incoming_flow_nodes) > 0:
node_with_classification = no_incoming_flow_nodes.pop()
tmp_nodes_with_classification.remove(node_with_classification)
sorted_nodes_with_classification \
.append(next(tmp_node for tmp_node in nodes_with_classification
if tmp_node[node_param_name][0] == node_with_classification[node_param_name][0]))
outgoing_list = list(node_with_classification[node_param_name][1][consts.Consts.outgoing_flow])
tmp_outgoing_list = list(outgoing_list)
for flow_id in tmp_outgoing_list:
'''
- Remove the outgoing flow for source flow node (the one without incoming flows)
- Get the target node
- Remove the incoming flow for target flow node
'''
outgoing_list.remove(flow_id)
node_with_classification[node_param_name][1][consts.Consts.outgoing_flow].remove(flow_id)
flow = bpmn_graph.get_flow_by_id(flow_id)
target_id = flow[2][consts.Consts.target_ref]
target = next(tmp_node[node_param_name]
for tmp_node in tmp_nodes_with_classification
if tmp_node[node_param_name][0] == target_id)
target[1][consts.Consts.incoming_flow].remove(flow_id)
else:
for node_with_classification in tmp_nodes_with_classification:
if "Join" in node_with_classification[classification_param_name]:
incoming_list = list(node_with_classification[node_param_name][1][consts.Consts.incoming_flow])
tmp_incoming_list = list(incoming_list)
for flow_id in tmp_incoming_list:
incoming_list.remove(flow_id)
flow = bpmn_graph.get_flow_by_id(flow_id)
source_id = flow[2][consts.Consts.source_ref]
source = next(tmp_node[node_param_name]
for tmp_node in tmp_nodes_with_classification
if tmp_node[node_param_name][0] == source_id)
source[1][consts.Consts.outgoing_flow].remove(flow_id)
target_id = flow[2][consts.Consts.target_ref]
target = next(tmp_node[node_param_name]
for tmp_node in tmp_nodes_with_classification
if tmp_node[node_param_name][0] == target_id)
target[1][consts.Consts.incoming_flow].remove(flow_id)
backward_flows.append(flow)
return sorted_nodes_with_classification, backward_flows | :return: |
def add_vcenter(self, **kwargs):
"""
Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter",
xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
url = ET.SubElement(credentials, "url")
url.text = kwargs.pop('url')
username = ET.SubElement(credentials, "username")
username.text = kwargs.pop('username')
password = ET.SubElement(credentials, "password")
password.text = kwargs.pop('password')
try:
self._callback(config)
return True
except Exception as error:
logging.error(error)
return False | Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None |
def configuration_from_uri(cls, persistence_uri):
"""
Return a configuration object.
"""
db_uri, persistence_state_id = cls.parse_persistence_uri(persistence_uri)
engine = create_engine(db_uri)
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
job = session.query(Job).filter(Job.id == persistence_state_id).first()
configuration = job.configuration
configuration = yaml.safe_load(configuration)
configuration['exporter_options']['resume'] = True
configuration['exporter_options']['persistence_state_id'] = persistence_state_id
return configuration | Return a configuration object. |
def explode(self):
"""Explode realms with each realm_members and higher_realms to get all the
realms sub realms.
:return: None
"""
# Manage higher realms where defined
for realm in [tmp_realm for tmp_realm in self if tmp_realm.higher_realms]:
for parent in realm.higher_realms:
higher_realm = self.find_by_name(parent)
if higher_realm:
# Add the realm to its parent realm members
higher_realm.realm_members.append(realm.get_name())
for realm in self:
# Set a recursion tag to protect against loop
for tmp_realm in self:
tmp_realm.rec_tag = False
realm.get_realms_by_explosion(self)
# Clean the recursion tag
for tmp_realm in self:
del tmp_realm.rec_tag | Explode realms with each realm_members and higher_realms to get all the
realms sub realms.
:return: None |
def robust_init(stochclass, tries, *args, **kwds):
"""Robust initialization of a Stochastic.
If the evaluation of the log-probability returns a ZeroProbability
error, due for example to a parent being outside of the support for
this Stochastic, the values of parents are randomly sampled until
a valid log-probability is obtained.
If the log-probability is still not valid after `tries` attempts, the
original ZeroProbability error is raised.
:Parameters:
stochclass : Stochastic, eg. Normal, Uniform, ...
The Stochastic distribution to instantiate.
tries : int
Maximum number of times parents will be sampled.
*args, **kwds
Positional and keyword arguments to declare the Stochastic variable.
:Example:
>>> lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True)
>>> pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1,2,3,4], observed=True)
"""
# Find the direct parents
stochs = [arg for arg in (list(args) + list(kwds.values()))
if isinstance(arg.__class__, StochasticMeta)]
# Find the extended parents
parents = stochs
for s in stochs:
parents.extend(s.extended_parents)
extended_parents = set(parents)
# Select the parents with a random method.
random_parents = [
p for p in extended_parents if p.rseed is True and hasattr(
p,
'random')]
for i in range(tries):
try:
return stochclass(*args, **kwds)
except ZeroProbability:
exc = sys.exc_info()
for parent in random_parents:
try:
parent.random()
except:
six.reraise(*exc)
six.reraise(*exc) | Robust initialization of a Stochastic.
If the evaluation of the log-probability returns a ZeroProbability
error, due for example to a parent being outside of the support for
this Stochastic, the values of parents are randomly sampled until
a valid log-probability is obtained.
If the log-probability is still not valid after `tries` attempts, the
original ZeroProbability error is raised.
:Parameters:
stochclass : Stochastic, eg. Normal, Uniform, ...
The Stochastic distribution to instantiate.
tries : int
Maximum number of times parents will be sampled.
*args, **kwds
Positional and keyword arguments to declare the Stochastic variable.
:Example:
>>> lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True)
>>> pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1,2,3,4], observed=True) |
def download(url, dest):
"""
Platform-agnostic downloader.
"""
u = urllib.FancyURLopener()
logger.info("Downloading %s..." % url)
u.retrieve(url, dest)
logger.info('Done, see %s' % dest)
return dest | Platform-agnostic downloader. |
def is_valid(self):
""" Checks recursively if the tree is valid
It is valid if each node splits correctly """
if not self:
return True
if self.left and self.data[self.axis] < self.left.data[self.axis]:
return False
if self.right and self.data[self.axis] > self.right.data[self.axis]:
return False
return all(c.is_valid() for c, _ in self.children) or self.is_leaf | Checks recursively if the tree is valid
It is valid if each node splits correctly |
def s2m(self):
'''
Imports settings to meta
'''
m = '%s settings' % (IDENT)
self.meta.load(m, 'import %s' % (m), mdict=self.settings.get) | Imports settings to meta |
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value | Gets the value of a field of a model obj that is prepared for the db. |
def _repr_latex_(self):
"""
This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX.
How Cool is this?
"""
# TODO: we're mixing HTML with latex here. That is not necessarily a good idea, but works
# with IPython 1.2.0. Once IPython 2.0 is released, this needs to be changed to _ipython_display_
lines = []
lines.append(r"<h1>{0}</h1>".format(self.__class__.__name__))
lines.append("<p>Method: <code>{0!r}</code></p>".format(self.method))
lines.append("<p>Parameters: <code>{0!r}</code></p>".format(self.parameters))
lines.append("<p>Terms:</p>")
lines.append("<ul>")
lines.extend(['<li><code>{0!r}</code></li>'.format(lhs) for lhs in self.left_hand_side_descriptors])
lines.append("</ul>")
lines.append('<hr />')
lines.append(r"\begin{align*}")
for lhs, rhs in zip(self.left_hand_side_descriptors, self.right_hand_side):
lines.append(r"\dot{{{0}}} &= {1} \\".format(sympy.latex(lhs.symbol), sympy.latex(rhs)))
lines.append(r"\end{align*}")
return "\n".join(lines) | This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX.
How Cool is this? |
def show_dependencies(self, stream=sys.stdout):
"""Writes to the given stream the ASCII representation of the dependency tree."""
def child_iter(node):
return [d.node for d in node.deps]
def text_str(node):
return colored(str(node), color=node.status.color_opts["color"])
for task in self.iflat_tasks():
print(draw_tree(task, child_iter, text_str), file=stream) | Writes to the given stream the ASCII representation of the dependency tree. |
def create(cls, name, gateway, network, input_speed=None,
output_speed=None, domain_server_address=None,
provider_name=None, probe_address=None,
standby_mode_period=3600, standby_mode_timeout=30,
active_mode_period=5, active_mode_timeout=1, comment=None):
"""
Create a new StaticNetlink to be used as a traffic handler.
:param str name: name of netlink Element
:param gateway_ref: gateway to map this netlink to. This can be an element
or str href.
:type gateway_ref: Router,Engine
:param list ref: network/s associated with this netlink.
:type ref: list(str,Element)
:param int input_speed: input speed in Kbps, used for ratio-based
load-balancing
:param int output_speed: output speed in Kbps, used for ratio-based
load-balancing
:param list domain_server_address: dns addresses for netlink. Engine
DNS can override this field
:type dns_addresses: list(str,Element)
:param str provider_name: optional name to identify provider for this
netlink
:param list probe_address: list of IP addresses to use as probing
addresses to validate connectivity
:type probe_ip_address: list(str)
:param int standby_mode_period: Specifies the probe period when
standby mode is used (in seconds)
:param int standby_mode_timeout: probe timeout in seconds
:param int active_mode_period: Specifies the probe period when active
mode is used (in seconds)
:param int active_mode_timeout: probe timeout in seconds
:raises ElementNotFound: if using type Element parameters that are
not found.
:raises CreateElementFailed: failure to create netlink with reason
:rtype: StaticNetlink
.. note:: To monitor the status of the network links, you must define
at least one probe IP address.
"""
json = {'name': name,
'gateway_ref': element_resolver(gateway),
'ref': element_resolver(network),
'input_speed': input_speed,
'output_speed': output_speed,
'probe_address': probe_address,
'nsp_name': provider_name,
'comment': comment,
'standby_mode_period': standby_mode_period,
'standby_mode_timeout': standby_mode_timeout,
'active_mode_period': active_mode_period,
'active_mode_timeout': active_mode_timeout}
if domain_server_address:
r = RankedDNSAddress([])
r.add(domain_server_address)
json.update(domain_server_address=r.entries)
return ElementCreator(cls, json) | Create a new StaticNetlink to be used as a traffic handler.
:param str name: name of netlink Element
:param gateway_ref: gateway to map this netlink to. This can be an element
or str href.
:type gateway_ref: Router,Engine
:param list ref: network/s associated with this netlink.
:type ref: list(str,Element)
:param int input_speed: input speed in Kbps, used for ratio-based
load-balancing
:param int output_speed: output speed in Kbps, used for ratio-based
load-balancing
:param list domain_server_address: dns addresses for netlink. Engine
DNS can override this field
:type dns_addresses: list(str,Element)
:param str provider_name: optional name to identify provider for this
netlink
:param list probe_address: list of IP addresses to use as probing
addresses to validate connectivity
:type probe_ip_address: list(str)
:param int standby_mode_period: Specifies the probe period when
standby mode is used (in seconds)
:param int standby_mode_timeout: probe timeout in seconds
:param int active_mode_period: Specifies the probe period when active
mode is used (in seconds)
:param int active_mode_timeout: probe timeout in seconds
:raises ElementNotFound: if using type Element parameters that are
not found.
:raises CreateElementFailed: failure to create netlink with reason
:rtype: StaticNetlink
.. note:: To monitor the status of the network links, you must define
at least one probe IP address. |
def previous(self):
''' Returns previous image for same content_object and None if image
is the first. '''
try:
return self.__class__.objects.for_model(self.content_object,
self.content_type).\
filter(order__gt=self.order).order_by('order')[0]
except IndexError:
return None | Returns previous image for same content_object and None if image
is the first. |
def _read_sections(ifile):
"""Read sections_in.txt file, if it exists."""
if os.path.exists(ifile):
return read_sections(ifile, exclude_ungrouped=True, prt=None) | Read sections_in.txt file, if it exists. |
def get_current_live_chat(self):
""" Check if there is a live chat on the go, so that we should take
over the AskMAMA page with the live chat.
"""
now = datetime.now()
chat = self.upcoming_live_chat()
if chat and chat.is_in_progress():
return chat
return None | Check if there is a live chat on the go, so that we should take
over the AskMAMA page with the live chat. |
def _get_pos_name(code, name='parent', english=True, delimiter=':',
pos_tags=pos_map.POS_MAP):
"""Gets the part of speech name for *code*.
Joins the names together with *delimiter* if *name* is ``'all'``.
See :func:``pynlpir.pos_map.get_pos_name`` for more information.
"""
pos_name = pos_map.get_pos_name(code, name, english, pos_tags=pos_tags)
return delimiter.join(pos_name) if name == 'all' else pos_name | Gets the part of speech name for *code*.
Joins the names together with *delimiter* if *name* is ``'all'``.
See :func:``pynlpir.pos_map.get_pos_name`` for more information. |
def parse(s):
r"""
Returns a list of strings or format dictionaries to describe the strings.
May raise a ValueError if it can't be parsed.
>>> parse(">>> []")
['>>> []']
>>> #parse("\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m")
"""
stuff = []
rest = s
while True:
front, token, rest = peel_off_esc_code(rest)
if front:
stuff.append(front)
if token:
try:
tok = token_type(token)
if tok:
stuff.extend(tok)
except ValueError:
raise ValueError("Can't parse escape sequence: %r %r %r %r" % (s, repr(front), token, repr(rest)))
if not rest:
break
return stuff | r"""
Returns a list of strings or format dictionaries to describe the strings.
May raise a ValueError if it can't be parsed.
>>> parse(">>> []")
['>>> []']
>>> #parse("\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m") |
def set_path(self, file_path):
"""
Set the path of the database.
Create the file if it does not exist.
"""
if not file_path:
self.read_data = self.memory_read
self.write_data = self.memory_write
elif not is_valid(file_path):
self.write_data(file_path, {})
self.path = file_path | Set the path of the database.
Create the file if it does not exist. |
def dump_graph(self):
"""Dump a key-only representation of the schema to a dictionary. Every
known relation is a key with a value of a list of keys it is referenced
by.
"""
# we have to hold the lock for the entire dump, if other threads modify
# self.relations or any cache entry's referenced_by during iteration
# it's a runtime error!
with self.lock:
return {
dot_separated(k): v.dump_graph_entry()
for k, v in self.relations.items()
} | Dump a key-only representation of the schema to a dictionary. Every
known relation is a key with a value of a list of keys it is referenced
by. |
def update_slidepos(self):
"""
Periodically update the slide position.
Also farmed out to a thread to avoid hanging GUI main thread
"""
g = get_root(self).globals
if not g.cpars['focal_plane_slide_on']:
self.after(20000, self.update_slidepos)
return
def slide_threaded_update():
try:
(pos_ms, pos_mm, pos_px), msg = g.fpslide.slide.return_position()
self.slide_pos_queue.put((pos_ms, pos_mm, pos_px))
except Exception as err:
t, v, tb = sys.exc_info()
error = traceback.format_exception_only(t, v)[0].strip()
tback = 'Slide Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
g.FIFO.put(('Slide', error, tback))
t = threading.Thread(target=slide_threaded_update)
t.start()
self.after(20000, self.update_slidepos) | Periodically update the slide position.
Also farmed out to a thread to avoid hanging GUI main thread |
def call_audit(func):
"""Print a detailed audit of all calls to this function."""
def audited_func(*args, **kwargs):
import traceback
stack = traceback.extract_stack()
r = func(*args, **kwargs)
func_name = func.__name__
print("@depth %d, trace %s -> %s(*%r, **%r) => %r" % (
len(stack),
" -> ".join("%s:%d:%s" % x[0:3] for x in stack[-5:-2]),
func_name,
args,
kwargs,
r))
return r
return audited_func | Print a detailed audit of all calls to this function. |
def as_format(item, format_str='.2f'):
"""
Map a format string over a pandas object.
"""
if isinstance(item, pd.Series):
return item.map(lambda x: format(x, format_str))
elif isinstance(item, pd.DataFrame):
return item.applymap(lambda x: format(x, format_str)) | Map a format string over a pandas object. |
def _get_graph(graph, filename):
"""Retrieve or render a graph."""
try:
rendered = graph.rendered_file
except AttributeError:
try:
graph.render(os.path.join(server.tmpdir, filename), format='png')
rendered = filename
except OSError:
rendered = None
graph.rendered_file = rendered
return rendered | Retrieve or render a graph. |
def mean_subtraction(x, mean, t, base_axis=1, update_running_mean=True):
r"""
It subtracts the mean of the elements of the input array,
and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy
in various tasks such as image classification.
At training time, this function is defined as
.. math::
\begin{eqnarray}
\mu &=& \frac{1}{M} \sum x_i \\
y_i &=& x_i - \mu
\end{eqnarray}
At testing time, the mean values used are those that were computed during training by moving average.
Note:
The backward performs an approximated differentiation that takes into account only the latest mini-batch.
Args:
x(~nnabla.Variable): N-D array of input.
mean(~nnabla.Variable): N-D array of running mean (modified during forward execution).
t(~nnabla.Variable): Scalar of num of iteration of running mean (modified during forward execution).
base_axis(int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension.
[default=``1``]
update_running_mean(bool): Update running mean during forward execution.
[default=``True``]
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.mean_subtraction``.
"""
from .function_bases import mean_subtraction as mean_subtraction_base
return mean_subtraction_base(x, mean, t,
base_axis=base_axis,
update_running_mean=update_running_mean) | r"""
It subtracts the mean of the elements of the input array,
and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy
in various tasks such as image classification.
At training time, this function is defined as
.. math::
\begin{eqnarray}
\mu &=& \frac{1}{M} \sum x_i \\
y_i &=& x_i - \mu
\end{eqnarray}
At testing time, the mean values used are those that were computed during training by moving average.
Note:
The backward performs an approximated differentiation that takes into account only the latest mini-batch.
Args:
x(~nnabla.Variable): N-D array of input.
mean(~nnabla.Variable): N-D array of running mean (modified during forward execution).
t(~nnabla.Variable): Scalar of num of iteration of running mean (modified during forward execution).
base_axis(int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension.
[default=``1``]
update_running_mean(bool): Update running mean during forward execution.
[default=``True``]
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.mean_subtraction``. |
def _folder_item_assigned_worksheet(self, analysis_brain, item):
"""Adds an icon to the item dict if the analysis is assigned to a
worksheet and if the icon is suitable for the current context
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
if not IAnalysisRequest.providedBy(self.context):
# We want this icon to only appear if the context is an AR
return
analysis_obj = self.get_object(analysis_brain)
worksheet = analysis_obj.getWorksheet()
if not worksheet:
# No worksheet assigned. Do nothing
return
title = t(_("Assigned to: ${worksheet_id}",
mapping={'worksheet_id': safe_unicode(worksheet.id)}))
img = get_image('worksheet.png', title=title)
anchor = get_link(worksheet.absolute_url(), img)
self._append_html_element(item, 'state_title', anchor) | Adds an icon to the item dict if the analysis is assigned to a
worksheet and if the icon is suitable for the current context
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row |
def raw_snapshot_data(self, name):
"""
::
GET /:login/machines/:id/snapshots/:name
:param name: identifier for snapshot
:type name: :py:class:`basestring`
:rtype: :py:class:`dict`
Used internally to get a raw dict of a single machine snapshot.
"""
j, _ = self.datacenter.request('GET', self.path + '/snapshots/' +
str(name))
return j | ::
GET /:login/machines/:id/snapshots/:name
:param name: identifier for snapshot
:type name: :py:class:`basestring`
:rtype: :py:class:`dict`
Used internally to get a raw dict of a single machine snapshot. |
def for_account_hash(parent, account_hash):
"""
Returns a new AccountProxy that acquires the account with the
given hash, if such an account is known to the account manager.
It is an error if the account manager does not have such an
account.
"""
account = AccountProxy(parent)
account.account_hash = account_hash
if account.acquire():
return account
return None | Returns a new AccountProxy that acquires the account with the
given hash, if such an account is known to the account manager.
It is an error if the account manager does not have such an
account. |
def launch_ipython_legacy_shell(args): # pylint: disable=unused-argument
"""Open the SolveBio shell (IPython wrapper) for older IPython versions"""
try:
from IPython.config.loader import Config
except ImportError:
_print("The SolveBio Python shell requires IPython.\n"
"To install, type: 'pip install ipython'")
return False
try:
# see if we're already inside IPython
get_ipython # pylint: disable=undefined-variable
except NameError:
cfg = Config()
prompt_config = cfg.PromptManager
prompt_config.in_template = '[SolveBio] In <\\#>: '
prompt_config.in2_template = ' .\\D.: '
prompt_config.out_template = 'Out<\\#>: '
banner1 = '\nSolveBio Python shell started.'
exit_msg = 'Quitting SolveBio shell.'
else:
_print("Running nested copies of IPython.")
cfg = Config()
banner1 = exit_msg = ''
# First import the embeddable shell class
try:
from IPython.terminal.embed import InteractiveShellEmbed
except ImportError:
# pylint: disable=import-error,no-name-in-module
from IPython.frontend.terminal.embed import InteractiveShellEmbed
path = os.path.dirname(os.path.abspath(__file__))
init_file = '{}/ipython_init.py'.format(path)
exec(compile(open(init_file).read(), init_file, 'exec'),
globals(), locals())
InteractiveShellEmbed(config=cfg, banner1=banner1, exit_msg=exit_msg)() | Open the SolveBio shell (IPython wrapper) for older IPython versions |
def create_secgroup_rule(self, protocol, from_port, to_port,
source, target):
"""
Creates a new server security group rule.
:param str protocol: E.g. ``tcp``, ``icmp``, etc...
:param int from_port: E.g. ``1``
:param int to_port: E.g. ``65535``
:param str source:
:param str target:
"""
nova = self.nova
def get_id(gname):
sg = nova.security_groups.find(name=gname)
if not sg:
raise BangError("Security group not found, %s" % gname)
return str(sg.id)
kwargs = {
'ip_protocol': protocol,
'from_port': str(from_port),
'to_port': str(to_port),
'parent_group_id': get_id(target),
}
if '/' in source:
kwargs['cidr'] = source
else:
kwargs['group_id'] = get_id(source)
# not sure if this is an openstack hack or an hpcloud hack, but
# this is definitely required to get it working on hpcloud:
kwargs['cidr'] = 'null'
nova.security_group_rules.create(**kwargs) | Creates a new server security group rule.
:param str protocol: E.g. ``tcp``, ``icmp``, etc...
:param int from_port: E.g. ``1``
:param int to_port: E.g. ``65535``
:param str source:
:param str target: |
def update(self, configuration, debug=None):
"""Update the internal configuration values, removing debug_only
handlers if debug is False. Returns True if the configuration has
changed from previous configuration values.
:param dict configuration: The logging configuration
:param bool debug: Toggles use of debug_only loggers
:rtype: bool
"""
if self.config != dict(configuration) and debug != self.debug:
self.config = dict(configuration)
self.debug = debug
self.configure()
return True
return False | Update the internal configuration values, removing debug_only
handlers if debug is False. Returns True if the configuration has
changed from previous configuration values.
:param dict configuration: The logging configuration
:param bool debug: Toggles use of debug_only loggers
:rtype: bool |
def touch(args):
"""
%prog touch timestamp.info
Recover timestamps for files in the current folder.
CAUTION: you must execute this in the same directory as timestamp().
"""
from time import ctime
p = OptionParser(touch.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
info, = args
fp = open(info)
for row in fp:
path, atime, mtime = row.split()
atime = float(atime)
mtime = float(mtime)
current_atime, current_mtime = get_times(path)
# Check if the time has changed, with resolution up to 1 sec
if int(atime) == int(current_atime) and \
int(mtime) == int(current_mtime):
continue
times = [ctime(x) for x in (current_atime, current_mtime, atime, mtime)]
msg = "{0} : ".format(path)
msg += "({0}, {1}) => ({2}, {3})".format(*times)
print(msg, file=sys.stderr)
os.utime(path, (atime, mtime)) | %prog touch timestamp.info
Recover timestamps for files in the current folder.
CAUTION: you must execute this in the same directory as timestamp(). |
def corr_dw_v1(self):
"""Adjust the water stage drop to the highest value allowed and correct
the associated fluxes.
Note that method |corr_dw_v1| calls the method `interp_v` of the
respective application model. Hence the requirements of the actual
`interp_v` need to be considered additionally.
Required control parameter:
|MaxDW|
Required derived parameters:
|llake_derived.TOY|
|Seconds|
Required flux sequence:
|QZ|
Updated flux sequence:
|llake_fluxes.QA|
Updated state sequences:
|llake_states.W|
|llake_states.V|
Basic Restriction:
:math:`W_{old} - W_{new} \\leq MaxDW`
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> derived.toy.update()
>>> derived.seconds.update()
Select the first half of the second day of January as the simulation
step relevant for the following examples:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
The following tests are based on method |interp_v_v1| for the
interpolation of the stored water volume based on the corrected
water stage:
>>> model.interp_v = model.interp_v_v1
For the sake of simplicity, the underlying `w`-`v` relationship is
assumed to be linear:
>>> n(2.)
>>> w(0., 1.)
>>> v(0., 1e6)
The maximum drop in water stage for the first half of the second
day of January is set to 0.4 m/d. Note that, due to the difference
between the parameter step size and the simulation step size, the
actual value used for calculation is 0.2 m/12h:
>>> maxdw(_1_1_18=.1,
... _1_2_6=.4,
... _1_2_18=.1)
>>> maxdw
maxdw(toy_1_1_18_0_0=0.1,
toy_1_2_6_0_0=0.4,
toy_1_2_18_0_0=0.1)
>>> from hydpy import round_
>>> round_(maxdw.value[2])
0.2
Define old and new water stages and volumes in agreement with the
given linear relationship:
>>> states.w.old = 1.
>>> states.v.old = 1e6
>>> states.w.new = .9
>>> states.v.new = 9e5
Also define an inflow and an outflow value. Note the that the latter
is set to zero, which is inconsistent with the actual water stage drop
defined above, but done for didactic reasons:
>>> fluxes.qz = 1.
>>> fluxes.qa = 0.
Calling the |corr_dw_v1| method does not change the values of
either of following sequences, as the actual drop (0.1 m/12h) is
smaller than the allowed drop (0.2 m/12h):
>>> model.corr_dw_v1()
>>> states.w
w(0.9)
>>> states.v
v(900000.0)
>>> fluxes.qa
qa(0.0)
Note that the values given above are not recalculated, which can
clearly be seen for the lake outflow, which is still zero.
Through setting the new value of the water stage to 0.6 m, the actual
drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the
water stage is trimmed and the other values are recalculated:
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.8)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
Through setting the maximum water stage drop to zero, method
|corr_dw_v1| is effectively disabled. Regardless of the actual
change in water stage, no trimming or recalculating is performed:
>>> maxdw.toy_01_02_06 = 0.
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.6)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
old = self.sequences.states.fastaccess_old
new = self.sequences.states.fastaccess_new
idx = der.toy[self.idx_sim]
if (con.maxdw[idx] > 0.) and ((old.w-new.w) > con.maxdw[idx]):
new.w = old.w-con.maxdw[idx]
self.interp_v()
flu.qa = flu.qz+(old.v-new.v)/der.seconds | Adjust the water stage drop to the highest value allowed and correct
the associated fluxes.
Note that method |corr_dw_v1| calls the method `interp_v` of the
respective application model. Hence the requirements of the actual
`interp_v` need to be considered additionally.
Required control parameter:
|MaxDW|
Required derived parameters:
|llake_derived.TOY|
|Seconds|
Required flux sequence:
|QZ|
Updated flux sequence:
|llake_fluxes.QA|
Updated state sequences:
|llake_states.W|
|llake_states.V|
Basic Restriction:
:math:`W_{old} - W_{new} \\leq MaxDW`
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> derived.toy.update()
>>> derived.seconds.update()
Select the first half of the second day of January as the simulation
step relevant for the following examples:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
The following tests are based on method |interp_v_v1| for the
interpolation of the stored water volume based on the corrected
water stage:
>>> model.interp_v = model.interp_v_v1
For the sake of simplicity, the underlying `w`-`v` relationship is
assumed to be linear:
>>> n(2.)
>>> w(0., 1.)
>>> v(0., 1e6)
The maximum drop in water stage for the first half of the second
day of January is set to 0.4 m/d. Note that, due to the difference
between the parameter step size and the simulation step size, the
actual value used for calculation is 0.2 m/12h:
>>> maxdw(_1_1_18=.1,
... _1_2_6=.4,
... _1_2_18=.1)
>>> maxdw
maxdw(toy_1_1_18_0_0=0.1,
toy_1_2_6_0_0=0.4,
toy_1_2_18_0_0=0.1)
>>> from hydpy import round_
>>> round_(maxdw.value[2])
0.2
Define old and new water stages and volumes in agreement with the
given linear relationship:
>>> states.w.old = 1.
>>> states.v.old = 1e6
>>> states.w.new = .9
>>> states.v.new = 9e5
Also define an inflow and an outflow value. Note the that the latter
is set to zero, which is inconsistent with the actual water stage drop
defined above, but done for didactic reasons:
>>> fluxes.qz = 1.
>>> fluxes.qa = 0.
Calling the |corr_dw_v1| method does not change the values of
either of following sequences, as the actual drop (0.1 m/12h) is
smaller than the allowed drop (0.2 m/12h):
>>> model.corr_dw_v1()
>>> states.w
w(0.9)
>>> states.v
v(900000.0)
>>> fluxes.qa
qa(0.0)
Note that the values given above are not recalculated, which can
clearly be seen for the lake outflow, which is still zero.
Through setting the new value of the water stage to 0.6 m, the actual
drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the
water stage is trimmed and the other values are recalculated:
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.8)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
Through setting the maximum water stage drop to zero, method
|corr_dw_v1| is effectively disabled. Regardless of the actual
change in water stage, no trimming or recalculating is performed:
>>> maxdw.toy_01_02_06 = 0.
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.6)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963) |
def delete(name=None, group_id=None, region=None, key=None, keyid=None,
profile=None, vpc_id=None, vpc_name=None):
'''
Delete a security group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if group:
deleted = conn.delete_security_group(group_id=group.id)
if deleted:
log.info('Deleted security group %s with id %s.', group.name, group.id)
return True
else:
msg = 'Failed to delete security group {0}.'.format(name)
log.error(msg)
return False
else:
log.debug('Security group not found.')
return False | Delete a security group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup |
def get_assessment_notification_session_for_bank(self, assessment_receiver, bank_id):
"""Gets the ``OsidSession`` associated with the assessment notification service for the given bank.
arg: assessment_receiver
(osid.assessment.AssessmentReceiver): the assessment
receiver interface
arg: bank_id (osid.id.Id): the ``Id`` of the bank
return: (osid.assessment.AssessmentNotificationSession) - ``an
_assessment_notification_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``assessment_receiver`` or ``bank_id`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_assessment_notification()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_assessment_notification():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.ItemNotificationSession(bank_id, runtime=self._runtime, receiver=assessment_receiver) | Gets the ``OsidSession`` associated with the assessment notification service for the given bank.
arg: assessment_receiver
(osid.assessment.AssessmentReceiver): the assessment
receiver interface
arg: bank_id (osid.id.Id): the ``Id`` of the bank
return: (osid.assessment.AssessmentNotificationSession) - ``an
_assessment_notification_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``assessment_receiver`` or ``bank_id`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_assessment_notification()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_notification()`` and
``supports_visible_federation()`` are ``true``.* |
def do_bugin(self, args):
"""bugin [ <logger> ] - add a console logging handler to a logger"""
args = args.split()
if _debug: ConsoleCmd._debug("do_bugin %r", args)
# get the logger name and logger
if args:
loggerName = args[0]
if loggerName in logging.Logger.manager.loggerDict:
logger = logging.getLogger(loggerName)
else:
logger = None
else:
loggerName = '__root__'
logger = logging.getLogger()
# add a logging handler
if not logger:
self.stdout.write("not a valid logger name\n")
elif loggerName in self.handlers:
self.stdout.write("%s already has a handler\n" % loggerName)
else:
handler = ConsoleLogHandler(logger)
self.handlers[loggerName] = handler
self.stdout.write("handler to %s added\n" % loggerName)
self.stdout.write("\n") | bugin [ <logger> ] - add a console logging handler to a logger |
def SA_tank(D, L, sideA=None, sideB=None, sideA_a=0,
sideB_a=0, sideA_f=None, sideA_k=None, sideB_f=None, sideB_k=None,
full_output=False):
r'''Calculates the surface are of a cylindrical tank with optional heads.
In the degenerate case of being provided with only `D` and `L`, provides
the surface area of a cylinder.
Parameters
----------
D : float
Diameter of the cylindrical section of the tank, [m]
L : float
Length of the main cylindrical section of the tank, [m]
sideA : string, optional
The left (or bottom for vertical) head of the tank's type; one of
[None, 'conical', 'ellipsoidal', 'torispherical', 'guppy', 'spherical'].
sideB : string, optional
The right (or top for vertical) head of the tank's type; one of
[None, 'conical', 'ellipsoidal', 'torispherical', 'guppy', 'spherical'].
sideA_a : float, optional
The distance the head as specified by sideA extends down or to the left
from the main cylindrical section, [m]
sideB_a : float, optional
The distance the head as specified by sideB extends up or to the right
from the main cylindrical section, [m]
sideA_f : float, optional
Dish-radius parameter for side A; fD = dish radius [1/m]
sideA_k : float, optional
knuckle-radius parameter for side A; kD = knuckle radius [1/m]
sideB_f : float, optional
Dish-radius parameter for side B; fD = dish radius [1/m]
sideB_k : float, optional
knuckle-radius parameter for side B; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area of the tank [m^2]
areas : tuple, only returned if full_output == True
(sideA_SA, sideB_SA, lateral_SA)
Other Parameters
----------------
full_output : bool, optional
Returns a tuple of (sideA_SA, sideB_SA, lateral_SA) if True
Examples
--------
Cylinder, Spheroid, Long Cones, and spheres. All checked.
>>> SA_tank(D=2, L=2)
18.84955592153876
>>> SA_tank(D=1., L=0, sideA='ellipsoidal', sideA_a=2, sideB='ellipsoidal',
... sideB_a=2)
28.480278854014387
>>> SA_tank(D=1., L=5, sideA='conical', sideA_a=2, sideB='conical',
... sideB_a=2)
22.18452243965656
>>> SA_tank(D=1., L=5, sideA='spherical', sideA_a=0.5, sideB='spherical',
... sideB_a=0.5)
18.84955592153876
'''
# Side A
if sideA == 'conical':
sideA_SA = SA_conical_head(D=D, a=sideA_a)
elif sideA == 'ellipsoidal':
sideA_SA = SA_ellipsoidal_head(D=D, a=sideA_a)
elif sideA == 'guppy':
sideA_SA = SA_guppy_head(D=D, a=sideA_a)
elif sideA == 'spherical':
sideA_SA = SA_partial_sphere(D=D, h=sideA_a)
elif sideA == 'torispherical':
sideA_SA = SA_torispheroidal(D=D, fd=sideA_f, fk=sideA_k)
else:
sideA_SA = pi/4*D**2 # Circle
# Side B
if sideB == 'conical':
sideB_SA = SA_conical_head(D=D, a=sideB_a)
elif sideB == 'ellipsoidal':
sideB_SA = SA_ellipsoidal_head(D=D, a=sideB_a)
elif sideB == 'guppy':
sideB_SA = SA_guppy_head(D=D, a=sideB_a)
elif sideB == 'spherical':
sideB_SA = SA_partial_sphere(D=D, h=sideB_a)
elif sideB == 'torispherical':
sideB_SA = SA_torispheroidal(D=D, fd=sideB_f, fk=sideB_k)
else:
sideB_SA = pi/4*D**2 # Circle
lateral_SA = pi*D*L
SA = sideA_SA + sideB_SA + lateral_SA
if full_output:
return SA, (sideA_SA, sideB_SA, lateral_SA)
else:
return SA | r'''Calculates the surface are of a cylindrical tank with optional heads.
In the degenerate case of being provided with only `D` and `L`, provides
the surface area of a cylinder.
Parameters
----------
D : float
Diameter of the cylindrical section of the tank, [m]
L : float
Length of the main cylindrical section of the tank, [m]
sideA : string, optional
The left (or bottom for vertical) head of the tank's type; one of
[None, 'conical', 'ellipsoidal', 'torispherical', 'guppy', 'spherical'].
sideB : string, optional
The right (or top for vertical) head of the tank's type; one of
[None, 'conical', 'ellipsoidal', 'torispherical', 'guppy', 'spherical'].
sideA_a : float, optional
The distance the head as specified by sideA extends down or to the left
from the main cylindrical section, [m]
sideB_a : float, optional
The distance the head as specified by sideB extends up or to the right
from the main cylindrical section, [m]
sideA_f : float, optional
Dish-radius parameter for side A; fD = dish radius [1/m]
sideA_k : float, optional
knuckle-radius parameter for side A; kD = knuckle radius [1/m]
sideB_f : float, optional
Dish-radius parameter for side B; fD = dish radius [1/m]
sideB_k : float, optional
knuckle-radius parameter for side B; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area of the tank [m^2]
areas : tuple, only returned if full_output == True
(sideA_SA, sideB_SA, lateral_SA)
Other Parameters
----------------
full_output : bool, optional
Returns a tuple of (sideA_SA, sideB_SA, lateral_SA) if True
Examples
--------
Cylinder, Spheroid, Long Cones, and spheres. All checked.
>>> SA_tank(D=2, L=2)
18.84955592153876
>>> SA_tank(D=1., L=0, sideA='ellipsoidal', sideA_a=2, sideB='ellipsoidal',
... sideB_a=2)
28.480278854014387
>>> SA_tank(D=1., L=5, sideA='conical', sideA_a=2, sideB='conical',
... sideB_a=2)
22.18452243965656
>>> SA_tank(D=1., L=5, sideA='spherical', sideA_a=0.5, sideB='spherical',
... sideB_a=0.5)
18.84955592153876 |
def rank_members_in(self, leaderboard_name, members_and_scores):
'''
Rank an array of members in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param members_and_scores [Array] Variable list of members and scores.
'''
for member, score in grouper(2, members_and_scores):
self.rank_member_in(leaderboard_name, member, score) | Rank an array of members in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param members_and_scores [Array] Variable list of members and scores. |
def update_context(self, ctx):
""" updates the query context with this clauses values """
assert isinstance(ctx, dict)
ctx[str(self.context_id)] = self.value | updates the query context with this clauses values |
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min packet-provider
salt-cloud --function list_nodes_min packet-provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
for device in get_devices_by_token():
ret[device.hostname] = {'id': device.id, 'state': device.state}
return ret | Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min packet-provider
salt-cloud --function list_nodes_min packet-provider |
def remove_all_trips_fully_outside_buffer(db_conn, center_lat, center_lon, buffer_km, update_secondary_data=True):
"""
Not used in the regular filter process for the time being.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
center_lat: float
center_lon: float
buffer_km: float
"""
distance_function_str = add_wgs84_distance_function_to_db(db_conn)
stops_within_buffer_query_sql = "SELECT stop_I FROM stops WHERE CAST(" + distance_function_str + \
"(lat, lon, {lat} , {lon}) AS INT) < {d_m}"\
.format(lat=float(center_lat), lon=float(center_lon), d_m=int(1000*buffer_km))
select_all_trip_Is_where_stop_I_is_within_buffer_sql = "SELECT distinct(trip_I) FROM stop_times WHERE stop_I IN (" + stops_within_buffer_query_sql + ")"
trip_Is_to_remove_sql = "SELECT trip_I FROM trips WHERE trip_I NOT IN ( " + select_all_trip_Is_where_stop_I_is_within_buffer_sql + ")"
trip_Is_to_remove = pandas.read_sql(trip_Is_to_remove_sql, db_conn)["trip_I"].values
trip_Is_to_remove_string = ",".join([str(trip_I) for trip_I in trip_Is_to_remove])
remove_all_trips_fully_outside_buffer_sql = "DELETE FROM trips WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
remove_all_stop_times_where_trip_I_fully_outside_buffer_sql = "DELETE FROM stop_times WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
db_conn.execute(remove_all_trips_fully_outside_buffer_sql)
db_conn.execute(remove_all_stop_times_where_trip_I_fully_outside_buffer_sql)
delete_stops_not_in_stop_times_and_not_as_parent_stop(db_conn)
db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAYS_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAY_TRIPS2_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_CALENDAR_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_CALENDAR_DATES_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)
db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
if update_secondary_data:
update_secondary_data_copies(db_conn) | Not used in the regular filter process for the time being.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
center_lat: float
center_lon: float
buffer_km: float |
def load_checkpoints(self, checkpointDirs):
"""Load checkpoints from the checkpoint files into a dictionary.
The results are used to pre-populate the memoizer's lookup_table
Kwargs:
- checkpointDirs (list) : List of run folder to use as checkpoints
Eg. ['runinfo/001', 'runinfo/002']
Returns:
- dict containing, hashed -> future mappings
"""
self.memo_lookup_table = None
if not checkpointDirs:
return {}
if type(checkpointDirs) is not list:
raise BadCheckpoint("checkpointDirs expects a list of checkpoints")
return self._load_checkpoints(checkpointDirs) | Load checkpoints from the checkpoint files into a dictionary.
The results are used to pre-populate the memoizer's lookup_table
Kwargs:
- checkpointDirs (list) : List of run folder to use as checkpoints
Eg. ['runinfo/001', 'runinfo/002']
Returns:
- dict containing, hashed -> future mappings |
def require(*args, **kwargs):
'''
Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package.
'''
# If called with no arguments, returns requirements list
if not args and not kwargs:
return freeze()
# Construct array of requirements
requirements = list(args)
extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs]
requirements.extend(extra)
args = ['install', '-q']
args.extend(requirements)
pip.main(args) | Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package. |
def processResponse(self, arg, replytype, **kw):
"""
Parameters:
arg -- deferred
replytype -- typecode
"""
if self.debug:
log.msg('--->PROCESS REQUEST\n%s' %arg, debug=1)
for h in self.handlers:
arg.addCallback(h.processResponse, **kw)
arg.addCallback(self.parseResponse, replytype) | Parameters:
arg -- deferred
replytype -- typecode |
def lit_count(self):
"""
The number of LEDs on the bar graph actually lit up. Note that just
like :attr:`value`, this can be negative if the LEDs are lit from last
to first.
"""
lit_value = self.value * len(self)
if not isinstance(self[0], PWMLED):
lit_value = int(lit_value)
return lit_value | The number of LEDs on the bar graph actually lit up. Note that just
like :attr:`value`, this can be negative if the LEDs are lit from last
to first. |
def rename(name):
# type: (str) -> None
""" Give the currently developed hotfix a new name. """
from peltak.extra.gitflow import logic
if name is None:
name = click.prompt('Hotfix name')
logic.hotfix.rename(name) | Give the currently developed hotfix a new name. |
def decode_ay(ay):
"""Convert binary blob from DBus queries to strings."""
if ay is None:
return ''
elif isinstance(ay, str):
return ay
elif isinstance(ay, bytes):
return ay.decode('utf-8')
else:
# dbus.Array([dbus.Byte]) or any similar sequence type:
return bytearray(ay).rstrip(bytearray((0,))).decode('utf-8') | Convert binary blob from DBus queries to strings. |
def decode(self, inputs, context, inference=False):
"""
Applies the decoder to inputs, given the context from the encoder.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param context: context from the encoder
:param inference: if True inference mode, if False training mode
"""
return self.decoder(inputs, context, inference) | Applies the decoder to inputs, given the context from the encoder.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param context: context from the encoder
:param inference: if True inference mode, if False training mode |
def fromstring(cls, s, *args, **kwargs):
""" Returns a new Pattern from the given string.
Constraints are separated by a space.
If a constraint contains a space, it must be wrapped in [].
"""
s = s.replace("\(", "&lparen;")
s = s.replace("\)", "&rparen;")
s = s.replace("\[", "[")
s = s.replace("\]", "]")
s = s.replace("\{", "&lcurly;")
s = s.replace("\}", "&rcurly;")
p = []
i = 0
for m in re.finditer(r"\[.*?\]|\(.*?\)", s):
# Spaces in a range encapsulated in square brackets are encoded.
# "[Windows Vista]" is one range, don't split on space.
p.append(s[i:m.start()])
p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end()
p.append(s[i:])
s = "".join(p)
s = s.replace("][", "] [")
s = s.replace(")(", ") (")
s = s.replace("\|", "⊢")
s = re.sub(r"\s+\|\s+", "|", s)
s = re.sub(r"\s+", " ", s)
s = re.sub(r"\{\s+", "{", s)
s = re.sub(r"\s+\}", "}", s)
s = s.split(" ")
s = [v.replace("&space;"," ") for v in s]
P = cls([], *args, **kwargs)
G, O, i = [], [], 0
for s in s:
constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY))
constraint.index = len(P.sequence)
P.sequence.append(constraint)
# Push a new group on the stack if string starts with "{".
# Parse constraint from string, add it to all open groups.
# Pop latest group from stack if string ends with "}".
# Insert groups in opened-first order (i).
while s.startswith("{"):
s = s[1:]
G.append((i, [])); i+=1
O.append([])
for g in G:
g[1].append(constraint)
while s.endswith("}"):
s = s[:-1]
if G: O[G[-1][0]] = G[-1][1]; G.pop()
P.groups = [g for g in O if g]
return P | Returns a new Pattern from the given string.
Constraints are separated by a space.
If a constraint contains a space, it must be wrapped in []. |
def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[str, Any]={}):
"""Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run.
Args:
clock_name: The name of the Area that is used as synchronizing Clock.
clock_slots: The slots of the Clock relevant to this Area.
subscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values.
"""
for area in subscriptions: # type: str
init_full(self, area, subscriptions[area])
subscriptions[area] = {'slots': subscriptions[area]}
if clock_name is not None:
self.clock_name = clock_name
self.clock_slots = clock_slots
subscriptions[clock_name] = {'slots': clock_slots, 'buffer-length': 1}
self.setup(puller=True, subscriptions=subscriptions) | Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run.
Args:
clock_name: The name of the Area that is used as synchronizing Clock.
clock_slots: The slots of the Clock relevant to this Area.
subscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values. |
def flush(self, preserve=None):
"""
Delete the cache for this dataset. Optionally preserve
a region. Helpful when working with overlaping volumes.
Warning: the preserve option is not multi-process safe.
You're liable to end up deleting the entire cache.
Optional:
preserve (Bbox: None): Preserve chunks located partially
or entirely within this bounding box.
Return: void
"""
if not os.path.exists(self.path):
return
if preserve is None:
shutil.rmtree(self.path)
return
for mip in self.vol.available_mips:
preserve_mip = self.vol.slices_from_global_coords(preserve)
preserve_mip = Bbox.from_slices(preserve_mip)
mip_path = os.path.join(self.path, self.vol.mip_key(mip))
if not os.path.exists(mip_path):
continue
for filename in os.listdir(mip_path):
bbox = Bbox.from_filename(filename)
if not Bbox.intersects(preserve_mip, bbox):
os.remove(os.path.join(mip_path, filename)) | Delete the cache for this dataset. Optionally preserve
a region. Helpful when working with overlaping volumes.
Warning: the preserve option is not multi-process safe.
You're liable to end up deleting the entire cache.
Optional:
preserve (Bbox: None): Preserve chunks located partially
or entirely within this bounding box.
Return: void |
def immediate(self, name, value):
"""
Load something immediately
"""
setattr(self, name, value)
self._all.add(name) | Load something immediately |
def setconf(self, conf, rscpath, logger=None):
"""Set input conf in input path.
:param Configuration conf: conf to write to path.
:param str rscpath: specific resource path to use.
:param Logger logger: used to log info/errors.
:param bool error: raise catched errors.
:raises: ConfDriver.Error in case of error and input error.
"""
resource = self.pathresource(rscpath=rscpath, logger=logger)
if resource is None:
resource = self.resource()
try:
self._setconf(conf=conf, resource=resource, rscpath=rscpath)
except Exception as ex:
if logger is not None:
msg = 'Error while setting conf to {0}.'.format(rscpath)
full_msg = '{0} {1}: {2}'.format(msg, ex, format_exc())
logger.error(full_msg)
reraise(self.Error, self.Error(msg)) | Set input conf in input path.
:param Configuration conf: conf to write to path.
:param str rscpath: specific resource path to use.
:param Logger logger: used to log info/errors.
:param bool error: raise catched errors.
:raises: ConfDriver.Error in case of error and input error. |
def queue_it(queue=g_queue, **put_args):
"""
Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
queue.put(func(*args, **kwargs), **put_args)
return wrapper
return func_wrapper | Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into |
def in_resource(cls, session_type):
"""Returns True if the attribute is part of a given session type.
The session_type is a tuple with the interface type and resource_class
:type session_type: (constants.InterfaceType, str)
:rtype: bool
"""
if cls.resources is AllSessionTypes:
return True
return session_type in cls.resources | Returns True if the attribute is part of a given session type.
The session_type is a tuple with the interface type and resource_class
:type session_type: (constants.InterfaceType, str)
:rtype: bool |
def switch(self, idx, control):
"""Switch a single control of <idx>"""
old = None
new = None
if control == 'Q':
if self.PQ[idx] == 1:
old = 'PQ'
new = 'PV'
elif self.vQ[idx] == 1:
old = 'vQ'
new = 'vV'
elif control == 'P':
if self.PQ[idx] == 1:
old = 'PQ'
new = 'vQ'
elif self.PV[idx] == 1:
old = 'PV'
new = 'vV'
elif control == 'V':
if self.PV[idx] == 1:
old = 'PV'
new = 'PQ'
elif self.vV[idx] == 1:
old = 'vV'
new = 'vQ'
elif control == 'v':
if self.vQ[idx] == 1:
old = 'vQ'
new = 'PQ'
elif self.vV[idx] == 1:
old = 'vV'
new = 'PV'
if old and new:
self.__dict__[old][idx] = 0
self.__dict__[new][idx] = 1 | Switch a single control of <idx> |
def RunStateMethod(self, method_name, request=None, responses=None):
"""Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of FlowMessages responding to the request.
"""
if self.rdf_flow.pending_termination:
self.Error(error_message=self.rdf_flow.pending_termination.reason)
return
client_id = self.rdf_flow.client_id
deadline = self.rdf_flow.processing_deadline
if deadline and rdfvalue.RDFDatetime.Now() > deadline:
raise flow.FlowError("Processing time for flow %s on %s expired." %
(self.rdf_flow.flow_id, self.rdf_flow.client_id))
self.rdf_flow.current_state = method_name
if request and responses:
logging.debug("Running %s for flow %s on %s, %d responses.", method_name,
self.rdf_flow.flow_id, client_id, len(responses))
else:
logging.debug("Running %s for flow %s on %s", method_name,
self.rdf_flow.flow_id, client_id)
try:
try:
method = getattr(self, method_name)
except AttributeError:
raise ValueError("Flow %s has no state method %s" %
(self.__class__.__name__, method_name))
# Prepare a responses object for the state method to use:
responses = flow_responses.Responses.FromResponses(
request=request, responses=responses)
if responses.status is not None:
self.SaveResourceUsage(responses.status)
stats_collector_instance.Get().IncrementCounter("grr_worker_states_run")
if method_name == "Start":
stats_collector_instance.Get().IncrementCounter(
"flow_starts", fields=[self.rdf_flow.flow_class_name])
method()
else:
method(responses)
if self.replies_to_process:
if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id:
self._ProcessRepliesWithHuntOutputPlugins(self.replies_to_process)
else:
self._ProcessRepliesWithFlowOutputPlugins(self.replies_to_process)
self.replies_to_process = []
# We don't know here what exceptions can be thrown in the flow but we have
# to continue. Thus, we catch everything.
except Exception as e: # pylint: disable=broad-except
# This flow will terminate now
stats_collector_instance.Get().IncrementCounter(
"flow_errors", fields=[self.rdf_flow.flow_class_name])
logging.exception("Flow %s on %s raised %s.", self.rdf_flow.flow_id,
client_id, utils.SmartUnicode(e))
self.Error(
error_message=utils.SmartUnicode(e), backtrace=traceback.format_exc()) | Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of FlowMessages responding to the request. |
def enterprise_login_required(view):
"""
View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Wrap the decorator.
"""
if 'enterprise_uuid' not in kwargs:
raise Http404
enterprise_uuid = kwargs['enterprise_uuid']
enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)
# Now verify if the user is logged in. If user is not logged in then
# send the user to the login screen to sign in with an
# Enterprise-linked IdP and the pipeline will get them back here.
if not request.user.is_authenticated:
parsed_current_url = urlparse(request.get_full_path())
parsed_query_string = parse_qs(parsed_current_url.query)
parsed_query_string.update({
'tpa_hint': enterprise_customer.identity_provider,
FRESH_LOGIN_PARAMETER: 'yes'
})
next_url = '{current_path}?{query_string}'.format(
current_path=quote(parsed_current_url.path),
query_string=urlencode(parsed_query_string, doseq=True)
)
return redirect(
'{login_url}?{params}'.format(
login_url='/login',
params=urlencode(
{'next': next_url}
)
)
)
# Otherwise, they can proceed to the original view.
return view(request, *args, **kwargs)
return wrapper | View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ... |
def load_terminfo(terminal_name=None, fallback='vt100'):
"""
If the environment variable TERM is unset try with `fallback` if not empty.
vt100 is a popular terminal supporting ANSI X3.64.
"""
terminal_name = os.getenv('TERM')
if not terminal_name:
if not fallback:
raise TerminfoError('Environment variable TERM is unset and no fallback was requested')
else:
terminal_name = fallback
if os.getenv('TERMINFO'):
# from man terminfo(5):
# if the environment variable TERMINFO is set,
# only that directory is searched
terminfo_locations = [os.getenv('TERMINFO')]
else:
terminfo_locations = [] # from most to least important
if os.getenv('TERMINFO_DIRS'):
for i in os.getenv('TERMINFO_DIRS').split(':'):
# from man terminfo(5)
# An empty directory name is interpreted as /usr/share/terminfo.
terminfo_locations.append(i or '/usr/share/terminfo')
terminfo_locations += [
os.path.expanduser('~/.terminfo'),
'/etc/terminfo',
'/usr/local/ncurses/share/terminfo',
'/lib/terminfo',
'/usr/share/terminfo'
]
# remove duplicates preserving order
terminfo_locations = list(OrderedDict.fromkeys(terminfo_locations))
terminfo_path = None
for dirpath in terminfo_locations:
path = os.path.join(dirpath, terminal_name[0], terminal_name)
if os.path.exists(path):
terminfo_path = path
break
if not path:
raise TerminfoError("Couldn't find a terminfo file for terminal '%s'" % terminal_name)
from terminfo_index import BOOLEAN_CAPABILITIES, NUMBER_CAPABILITIES, STRING_CAPABILITIES
data = open(terminfo_path, 'rb').read()
# header (see man term(5), STORAGE FORMAT)
header = struct.unpack('<hhhhhh', data[:12]) # 2 bytes == 1 short integer
magic_number = header[0] # the magic number (octal 0432)
size_names = header[1] # the size, in bytes, of the names section
size_booleans = header[2] # the number of bytes in the boolean section
num_numbers = header[3] # the number of short integers in the numbers section
num_offsets = header[4] # the number of offsets (short integers) in the strings section
size_strings = header[5] # the size, in bytes, of the string table
if magic_number != 0o432:
raise TerminfoError('Bad magic number')
# sections indexes
idx_section_names = 12
idx_section_booleans = idx_section_names + size_names
idx_section_numbers = idx_section_booleans + size_booleans
if idx_section_numbers % 2 != 0:
idx_section_numbers += 1 # must start on an even byte
idx_section_strings = idx_section_numbers + 2 * num_numbers
idx_section_string_table = idx_section_strings + 2 * num_offsets
# terminal names
terminal_names = data[idx_section_names:idx_section_booleans].decode('ascii')
terminal_names = terminal_names[:-1].split('|') # remove ASCII NUL and split
terminfo = Terminfo(terminal_names[0], terminal_names[1:])
# booleans
for i, idx in enumerate(range(idx_section_booleans, idx_section_booleans + size_booleans)):
cap = BooleanCapability(*BOOLEAN_CAPABILITIES[i], value=data[i] == b'\x00')
terminfo.booleans[cap.variable] = cap
# numbers
numbers = struct.unpack('<'+'h' * num_numbers, data[idx_section_numbers:idx_section_strings])
for i,strnum in enumerate(numbers):
cap = NumberCapability(*NUMBER_CAPABILITIES[i], value=strnum)
terminfo.numbers[cap.variable] = cap
# strings
offsets = struct.unpack('<'+'h' * num_offsets, data[idx_section_strings:idx_section_string_table])
idx = 0
for offset in offsets:
k = 0
string = []
while True and offset != -1:
char = data[idx_section_string_table + offset + k:idx_section_string_table + offset + k + 1]
if char == b'\x00':
break
string.append(char.decode('iso-8859-1'))
k += 1
string = u''.join(string)
cap = StringCapability(*STRING_CAPABILITIES[idx], value=string)
terminfo.strings[cap.variable] = cap
idx += 1
terminfo._reset_index()
return terminfo | If the environment variable TERM is unset try with `fallback` if not empty.
vt100 is a popular terminal supporting ANSI X3.64. |
def get_children(self, node):
"""Get children."""
# if node in self.nodes:
try:
index = self.nodes.index(node) + 1
return [self.nodes[index]]
except IndexError:
return [] | Get children. |
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json | Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json |
def preload(python_data: LdapObject, database: Optional[Database] = None) -> LdapObject:
""" Preload all NotLoaded fields in LdapObject. """
changes = {}
# Load objects within lists.
def preload_item(value: Any) -> Any:
if isinstance(value, NotLoaded):
return value.load(database)
else:
return value
for name in python_data.keys():
value_list = python_data.get_as_list(name)
# Check for errors.
if isinstance(value_list, NotLoadedObject):
raise RuntimeError(f"{name}: Unexpected NotLoadedObject outside list.")
elif isinstance(value_list, NotLoadedList):
value_list = value_list.load(database)
else:
if any(isinstance(v, NotLoadedList) for v in value_list):
raise RuntimeError(f"{name}: Unexpected NotLoadedList in list.")
elif any(isinstance(v, NotLoadedObject) for v in value_list):
value_list = [preload_item(value) for value in value_list]
else:
value_list = None
if value_list is not None:
changes[name] = value_list
return python_data.merge(changes) | Preload all NotLoaded fields in LdapObject. |
def profileUpperLimit(self, delta = 2.71):
"""
Compute one-sided upperlimit via profile method.
"""
a = self.p_2
b = self.p_1
if self.vertex_x < 0:
c = self.p_0 + delta
else:
c = self.p_0 - self.vertex_y + delta
if b**2 - 4. * a * c < 0.:
print('WARNING')
print(a, b, c)
return 0.
return max((np.sqrt(b**2 - 4. * a * c) - b) / (2. * a), (-1. * np.sqrt(b**2 - 4. * a * c) - b) / (2. * a)) | Compute one-sided upperlimit via profile method. |
def get(no_create=False, server=None, port=None, force_uuid=None):
"""Get the thread local singleton"""
pid = os.getpid()
thread = threading.current_thread()
wdb = Wdb._instances.get((pid, thread))
if not wdb and not no_create:
wdb = object.__new__(Wdb)
Wdb.__init__(wdb, server, port, force_uuid)
wdb.pid = pid
wdb.thread = thread
Wdb._instances[(pid, thread)] = wdb
elif wdb:
if (server is not None and wdb.server != server
or port is not None and wdb.port != port):
log.warn('Different server/port set, ignoring')
else:
wdb.reconnect_if_needed()
return wdb | Get the thread local singleton |
def display(self, filename=None):
"""Displays/opens the doc using the OS's default application."""
if filename is None:
filename = 'display_temp.svg'
self.save(filename)
open_in_browser(filename) | Displays/opens the doc using the OS's default application. |
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
"""Return a dataframe of validation results for the appropriate series vs the vector of validators.
Args:
table (pd.DataFrame): A dataframe on which to apply validation logic.
failed_only (bool): If ``True``: return only the indexes that failed to validate.
"""
return pd.concat([
self._validate_input(table, failed_only=failed_only),
self._validate_output(table, failed_only=failed_only),
]).fillna(True) | Return a dataframe of validation results for the appropriate series vs the vector of validators.
Args:
table (pd.DataFrame): A dataframe on which to apply validation logic.
failed_only (bool): If ``True``: return only the indexes that failed to validate. |
def assert_gt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand <= right_hand."""
assert left > right, _assert_fail_message(message, left, right, "<=", extra) | Raises an AssertionError if left_hand <= right_hand. |
def enqueue_job(self, job):
"""
Move a scheduled job to a queue. In addition, it also does puts the job
back into the scheduler if needed.
"""
self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin))
interval = job.meta.get('interval', None)
repeat = job.meta.get('repeat', None)
cron_string = job.meta.get('cron_string', None)
# If job is a repeated job, decrement counter
if repeat:
job.meta['repeat'] = int(repeat) - 1
queue = self.get_queue_for_job(job)
queue.enqueue_job(job)
self.connection.zrem(self.scheduled_jobs_key, job.id)
if interval:
# If this is a repeat job and counter has reached 0, don't repeat
if repeat is not None:
if job.meta['repeat'] == 0:
return
self.connection.zadd(self.scheduled_jobs_key,
{job.id: to_unix(datetime.utcnow()) + int(interval)})
elif cron_string:
# If this is a repeat job and counter has reached 0, don't repeat
if repeat is not None:
if job.meta['repeat'] == 0:
return
self.connection.zadd(self.scheduled_jobs_key,
{job.id: to_unix(get_next_scheduled_time(cron_string))}) | Move a scheduled job to a queue. In addition, it also does puts the job
back into the scheduler if needed. |
def count(self, model_class, conditions=None):
'''
Counts the number of records in the model's table.
- `model_class`: the model to count.
- `conditions`: optional SQL conditions (contents of the WHERE clause).
'''
query = 'SELECT count() FROM $table'
if conditions:
query += ' WHERE ' + conditions
query = self._substitute(query, model_class)
r = self._send(query)
return int(r.text) if r.text else 0 | Counts the number of records in the model's table.
- `model_class`: the model to count.
- `conditions`: optional SQL conditions (contents of the WHERE clause). |
def delete(ctx, opts, owner_repo_identifier, yes):
"""
Delete an entitlement from a repository.
- OWNER/REPO/IDENTIFIER: Specify the OWNER namespace (i.e. user or org),
and the REPO name that has an entitlement identified by IDENTIFIER. All
separated by a slash.
Example: 'your-org/your-repo/abcdef123456'
Full CLI example:
$ cloudsmith ents delete your-org/your-repo/abcdef123456
"""
owner, repo, identifier = owner_repo_identifier
delete_args = {
"identifier": click.style(identifier, bold=True),
"repository": click.style(repo, bold=True),
}
prompt = (
"delete the %(identifier)s entitlement from the %(repository)s "
"repository" % delete_args
)
if not utils.confirm_operation(prompt, assume_yes=yes):
return
click.secho(
"Deleting %(identifier)s entitlement from the %(repository)s "
"repository ... " % delete_args,
nl=False,
)
context_msg = "Failed to delete the entitlement!"
with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg):
with maybe_spinner(opts):
api.delete_entitlement(owner=owner, repo=repo, identifier=identifier)
click.secho("OK", fg="green") | Delete an entitlement from a repository.
- OWNER/REPO/IDENTIFIER: Specify the OWNER namespace (i.e. user or org),
and the REPO name that has an entitlement identified by IDENTIFIER. All
separated by a slash.
Example: 'your-org/your-repo/abcdef123456'
Full CLI example:
$ cloudsmith ents delete your-org/your-repo/abcdef123456 |
def array(
item_processor, # type: Processor
alias=None, # type: Optional[Text]
nested=None, # type: Optional[Text]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Array(item_processor, alias, nested, omit_empty)
return _processor_wrap_if_hooks(processor, hooks) | Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object. |
def get_or_create_evidence(self, citation: Citation, text: str) -> Evidence:
"""Create an entry and object for given evidence if it does not exist."""
sha512 = hash_evidence(text=text, type=str(citation.type), reference=str(citation.reference))
if sha512 in self.object_cache_evidence:
evidence = self.object_cache_evidence[sha512]
self.session.add(evidence)
return evidence
evidence = self.get_evidence_by_hash(sha512)
if evidence is not None:
self.object_cache_evidence[sha512] = evidence
return evidence
evidence = Evidence(
text=text,
citation=citation,
sha512=sha512,
)
self.session.add(evidence)
self.object_cache_evidence[sha512] = evidence
return evidence | Create an entry and object for given evidence if it does not exist. |
def from_str(cls, timestr, shaked=False):
"""Use `dateutil` module to parse the give string
:param basestring timestr: string representing a date to parse
:param bool shaked: whether the input parameter been already
cleaned or not.
"""
orig = timestr
if not shaked:
timestr = cls.fix_timezone_separator(timestr)
try:
date = parser.parse(timestr)
except ValueError:
if not shaked:
shaked = False
for shaker in [
cls.fix_mispelled_day,
cls.remove_parenthesis_around_tz,
cls.remove_quotes_around_tz]:
new_timestr = shaker(timestr)
if new_timestr is not None:
timestr = new_timestr
shaked = True
if shaked:
try:
return cls.from_str(timestr, shaked=True)
except ValueError:
# raise ValueError below with proper message
pass
msg = u"Unknown string format: {!r}".format(orig)
raise ValueError(msg), None, sys.exc_info()[2]
else:
try:
return cls.from_datetime(date)
except ValueError:
new_str = cls.remove_timezone(orig)
if new_str is not None:
return cls.from_str(new_str)
else:
raise | Use `dateutil` module to parse the give string
:param basestring timestr: string representing a date to parse
:param bool shaked: whether the input parameter been already
cleaned or not. |
def npz_to_W_pdf(path=None, regx='w1pre_[0-9]+\.(npz)'):
r"""Convert the first weight matrix of `.npz` file to `.pdf` by using `tl.visualize.W()`.
Parameters
----------
path : str
A folder path to `npz` files.
regx : str
Regx for the file name.
Examples
---------
Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf.
>>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\.(npz)')
"""
file_list = load_file_list(path=path, regx=regx)
for f in file_list:
W = load_npz(path, f)[0]
logging.info("%s --> %s" % (f, f.split('.')[0] + '.pdf'))
visualize.draw_weights(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012) | r"""Convert the first weight matrix of `.npz` file to `.pdf` by using `tl.visualize.W()`.
Parameters
----------
path : str
A folder path to `npz` files.
regx : str
Regx for the file name.
Examples
---------
Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf.
>>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\.(npz)') |
def set_backlight(self, backlight):
"""Enable or disable the backlight. If PWM is not enabled (default), a
non-zero backlight value will turn on the backlight and a zero value will
turn it off. If PWM is enabled, backlight can be any value from 0.0 to
1.0, with 1.0 being full intensity backlight.
"""
if self._backlight is not None:
if self._pwm_enabled:
self._pwm.set_duty_cycle(self._backlight, self._pwm_duty_cycle(backlight))
else:
self._gpio.output(self._backlight, self._blpol if backlight else not self._blpol) | Enable or disable the backlight. If PWM is not enabled (default), a
non-zero backlight value will turn on the backlight and a zero value will
turn it off. If PWM is enabled, backlight can be any value from 0.0 to
1.0, with 1.0 being full intensity backlight. |
def define_standalone_options(parser, extra_options=None):
'''
Adds the options specific to the database connection.
Parses the agency configuration files and uses its configuration as the
default values.
'''
c = config.parse_service_config()
parser.add_option('--dbhost', '-H', action='store', dest='db_host',
type='str', help='hostname of the database',
default=c.db.host)
parser.add_option('--dbname', '-n', action='store', dest='db_name',
type='str', help='name of database to use',
default=c.db.name)
parser.add_option('--dbport', '-P', action='store', dest='db_port',
type='str', help='port of database to use',
default=c.db.port)
parser.add_option('--dbusername', dest="db_username",
help="username to use for authentication ",
metavar="USER", default=c.db.username)
parser.add_option('--dbpassword', dest="db_password",
help="password to use for authentication ",
metavar="PASSWORD", default=c.db.password)
parser.add_option('--ssl', '-S', action='store_true', dest='db_https',
help='whether to use SSL db connections',
default=False)
parser.add_option('--log', action='store', dest='log',
type='str', help='log level to set',
default=os.environ.get('FEAT_DEBUG', '2'))
if extra_options:
for option in extra_options:
parser.add_option(option)
return parser | Adds the options specific to the database connection.
Parses the agency configuration files and uses its configuration as the
default values. |
def is_citeable(publication_info):
"""Check some fields in order to define if the article is citeable.
:param publication_info: publication_info field
already populated
:type publication_info: list
"""
def _item_has_pub_info(item):
return all(
key in item for key in (
'journal_title', 'journal_volume'
)
)
def _item_has_page_or_artid(item):
return any(
key in item for key in (
'page_start', 'artid'
)
)
has_pub_info = any(
_item_has_pub_info(item) for item in publication_info
)
has_page_or_artid = any(
_item_has_page_or_artid(item) for item in publication_info
)
return has_pub_info and has_page_or_artid | Check some fields in order to define if the article is citeable.
:param publication_info: publication_info field
already populated
:type publication_info: list |
def add_permission(FunctionName, StatementId, Action, Principal, SourceArn=None,
SourceAccount=None, Qualifier=None,
region=None, key=None, keyid=None, profile=None):
'''
Add a permission to a lambda function.
Returns {added: true} if the permission was added and returns
{added: False} if the permission was not added.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.add_permission my_function my_id "lambda:*" \\
s3.amazonaws.com aws:arn::::bucket-name \\
aws-account-id
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
for key in ('SourceArn', 'SourceAccount', 'Qualifier'):
if locals()[key] is not None:
kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
conn.add_permission(FunctionName=FunctionName, StatementId=StatementId,
Action=Action, Principal=str(Principal), # future lint: disable=blacklisted-function
**kwargs)
return {'updated': True}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} | Add a permission to a lambda function.
Returns {added: true} if the permission was added and returns
{added: False} if the permission was not added.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.add_permission my_function my_id "lambda:*" \\
s3.amazonaws.com aws:arn::::bucket-name \\
aws-account-id |
def move(self, dest, src):
"""Move element from sequence, member from mapping.
:param dest: the destination
:type dest: Pointer
:param src: the source
:type src: Pointer
:return: resolved document
:rtype: Target
.. note::
This operation is functionally identical to a "remove" operation on
the "from" location, followed immediately by an "add" operation at
the target location with the value that was just removed.
The "from" location MUST NOT be a proper prefix of the "path"
location; i.e., a location cannot be moved into one of its children
"""
doc = deepcopy(self.document)
# delete
parent, fragment = None, doc
for token in Pointer(src):
parent, fragment = fragment, token.extract(fragment,
bypass_ref=True)
if isinstance(parent, Mapping):
del parent[token]
if isinstance(parent, MutableSequence):
parent.pop(int(token))
# insert
return Target(doc).add(dest, fragment) | Move element from sequence, member from mapping.
:param dest: the destination
:type dest: Pointer
:param src: the source
:type src: Pointer
:return: resolved document
:rtype: Target
.. note::
This operation is functionally identical to a "remove" operation on
the "from" location, followed immediately by an "add" operation at
the target location with the value that was just removed.
The "from" location MUST NOT be a proper prefix of the "path"
location; i.e., a location cannot be moved into one of its children |
def order_by(self, key_selector=identity):
'''Sorts by a key in ascending order.
Introduces a primary sorting order to the sequence. Additional sort
criteria should be specified by subsequent calls to then_by() and
then_by_descending(). Calling order_by() or order_by_descending() on
the results of a call to order_by() will introduce a new primary
ordering which will override any already established ordering.
This method performs a stable sort. The order of two elements with the
same key will be preserved.
Note: This method uses deferred execution.
Args:
key_selector: A unary function which extracts a key from each
element using which the result will be ordered.
Returns:
An OrderedQueryable over the sorted elements.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the key_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call order_by() on a "
"closed Queryable.")
if not is_callable(key_selector):
raise TypeError("order_by() parameter key_selector={key_selector} "
"is not callable".format(key_selector=repr(key_selector)))
return self._create_ordered(iter(self), -1, key_selector) | Sorts by a key in ascending order.
Introduces a primary sorting order to the sequence. Additional sort
criteria should be specified by subsequent calls to then_by() and
then_by_descending(). Calling order_by() or order_by_descending() on
the results of a call to order_by() will introduce a new primary
ordering which will override any already established ordering.
This method performs a stable sort. The order of two elements with the
same key will be preserved.
Note: This method uses deferred execution.
Args:
key_selector: A unary function which extracts a key from each
element using which the result will be ordered.
Returns:
An OrderedQueryable over the sorted elements.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the key_selector is not callable. |
def stop(self):
"""Stop the process."""
logger.info("stopping process")
self.watcher.stop()
os.kill(self.child_pid, signal.SIGTERM) | Stop the process. |
def _raiseImageMissing(self, pattern):
""" Builds an ImageMissing event and triggers the default handler (or the custom handler,
if one has been specified). Returns True if throwing method should retry, False if it
should skip, and throws an exception if it should abort. """
event = ImageMissingEvent(self, pattern=pattern, event_type="MISSING")
if self._imageMissingHandler is not None:
self._imageMissingHandler(event)
response = (event._response or self._findFailedResponse)
#if response == "PROMPT": # Prompt not valid for ImageMissing error
# response = _findFailedPrompt(pattern)
if response == "ABORT":
raise FindFailed(event)
elif response == "SKIP":
return False
elif response == "RETRY":
return True | Builds an ImageMissing event and triggers the default handler (or the custom handler,
if one has been specified). Returns True if throwing method should retry, False if it
should skip, and throws an exception if it should abort. |
def get_labels(self, plt, label_fontsize=10):
"""
Handles the optional labelling of the plot with relevant quantities
Args:
plt (plt): Plot of the locpot vs c axis
label_fontsize (float): Fontsize of labels
Returns Labelled plt
"""
# center of vacuum and bulk region
if len(self.slab_regions) > 1:
label_in_vac = (self.slab_regions[0][1] + self.slab_regions[1][0])/2
if abs(self.slab_regions[0][0]-self.slab_regions[0][1]) > \
abs(self.slab_regions[1][0]-self.slab_regions[1][1]):
label_in_bulk = self.slab_regions[0][1]/2
else:
label_in_bulk = (self.slab_regions[1][1] + self.slab_regions[1][0]) / 2
else:
label_in_bulk = (self.slab_regions[0][0] + self.slab_regions[0][1])/2
if self.slab_regions[0][0] > 1-self.slab_regions[0][1]:
label_in_vac = self.slab_regions[0][0] / 2
else:
label_in_vac = (1 + self.slab_regions[0][1]) / 2
plt.plot([0, 1], [self.vacuum_locpot]*2, 'b--', zorder=-5, linewidth=1)
xy = [label_in_bulk, self.vacuum_locpot+self.ave_locpot*0.05]
plt.annotate(r"$V_{vac}=%.2f$" %(self.vacuum_locpot), xy=xy,
xytext=xy, color='b', fontsize=label_fontsize)
# label the fermi energy
plt.plot([0, 1], [self.efermi]*2, 'g--',
zorder=-5, linewidth=3)
xy = [label_in_bulk, self.efermi+self.ave_locpot*0.05]
plt.annotate(r"$E_F=%.2f$" %(self.efermi), xytext=xy,
xy=xy, fontsize=label_fontsize, color='g')
# label the bulk-like locpot
plt.plot([0, 1], [self.ave_bulk_p]*2, 'r--', linewidth=1., zorder=-1)
xy = [label_in_vac, self.ave_bulk_p + self.ave_locpot * 0.05]
plt.annotate(r"$V^{interior}_{slab}=%.2f$" % (self.ave_bulk_p),
xy=xy, xytext=xy, color='r', fontsize=label_fontsize)
# label the work function as a barrier
plt.plot([label_in_vac]*2, [self.efermi, self.vacuum_locpot],
'k--', zorder=-5, linewidth=2)
xy = [label_in_vac, self.efermi + self.ave_locpot * 0.05]
plt.annotate(r"$\Phi=%.2f$" %(self.work_function),
xy=xy, xytext=xy, fontsize=label_fontsize)
return plt | Handles the optional labelling of the plot with relevant quantities
Args:
plt (plt): Plot of the locpot vs c axis
label_fontsize (float): Fontsize of labels
Returns Labelled plt |
def _update_settings(self, dialect):
"""Sets the widget settings to those of the chosen dialect"""
# the first parameter is the dialect itself --> ignore
for parameter in self.csv_params[2:]:
pname, ptype, plabel, phelp = parameter
widget = self._widget_from_p(pname, ptype)
if ptype is types.TupleType:
ptype = types.ObjectType
digest = Digest(acceptable_types=[ptype])
if pname == 'self.has_header':
if self.has_header is not None:
widget.SetValue(digest(self.has_header))
else:
value = getattr(dialect, pname)
widget.SetValue(digest(value)) | Sets the widget settings to those of the chosen dialect |
def upload(self, login, package_name, release, basename, fd, distribution_type,
description='', md5=None, size=None, dependencies=None, attrs=None, channels=('main',), callback=None):
'''
Upload a new distribution to a package release.
:param login: the login of the package owner
:param package_name: the name of the package
:param version: the version string of the release
:param basename: the basename of the distribution to download
:param fd: a file like object to upload
:param distribution_type: pypi or conda or ipynb, etc
:param description: (optional) a short description about the file
:param attrs: any extra attributes about the file (eg. build=1, pyversion='2.7', os='osx')
'''
url = '%s/stage/%s/%s/%s/%s' % (self.domain, login, package_name, release, quote(basename))
if attrs is None:
attrs = {}
if not isinstance(attrs, dict):
raise TypeError('argument attrs must be a dictionary')
payload = dict(distribution_type=distribution_type, description=description, attrs=attrs,
dependencies=dependencies, channels=channels)
data, headers = jencode(payload)
res = self.session.post(url, data=data, headers=headers)
self._check_response(res)
obj = res.json()
s3url = obj['post_url']
s3data = obj['form_data']
if md5 is None:
_hexmd5, b64md5, size = compute_hash(fd, size=size)
elif size is None:
spos = fd.tell()
fd.seek(0, os.SEEK_END)
size = fd.tell() - spos
fd.seek(spos)
s3data['Content-Length'] = size
s3data['Content-MD5'] = b64md5
data_stream, headers = stream_multipart(s3data, files={'file':(basename, fd)},
callback=callback)
request_method = self.session if s3url.startswith(self.domain) else requests
s3res = request_method.post(
s3url, data=data_stream,
verify=self.session.verify, timeout=10 * 60 * 60,
headers=headers
)
if s3res.status_code != 201:
logger.info(s3res.text)
logger.info('')
logger.info('')
raise errors.BinstarError('Error uploading package', s3res.status_code)
url = '%s/commit/%s/%s/%s/%s' % (self.domain, login, package_name, release, quote(basename))
payload = dict(dist_id=obj['dist_id'])
data, headers = jencode(payload)
res = self.session.post(url, data=data, headers=headers)
self._check_response(res)
return res.json() | Upload a new distribution to a package release.
:param login: the login of the package owner
:param package_name: the name of the package
:param version: the version string of the release
:param basename: the basename of the distribution to download
:param fd: a file like object to upload
:param distribution_type: pypi or conda or ipynb, etc
:param description: (optional) a short description about the file
:param attrs: any extra attributes about the file (eg. build=1, pyversion='2.7', os='osx') |
def make_simple(self):
"""
Return a cylinder with the thread's average radius & length.
:math:`radius = (inner_radius + outer_radius) / 2`
"""
(inner_radius, outer_radius) = self.get_radii()
radius = (inner_radius + outer_radius) / 2
return cadquery.Workplane('XY') \
.circle(radius).extrude(self.length) | Return a cylinder with the thread's average radius & length.
:math:`radius = (inner_radius + outer_radius) / 2` |
def safe_either(method, dictionary, key1, key2, default_value=None):
"""A helper-wrapper for the safe_value_2() family."""
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value) | A helper-wrapper for the safe_value_2() family. |
def clear_alarms(alarm):
'''
Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name]
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('alarm', 'clear', alarm)
else:
cmd = _traffic_line('--clear_alarms', alarm)
return _subprocess(cmd) | Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name] |
def open(self):
'''Opens the stream for reading.'''
options = copy(self.__options)
# Get scheme and format if not already given
compression = None
if self.__scheme is None or self.__format is None:
detected_scheme, detected_format = helpers.detect_scheme_and_format(self.__source)
scheme = self.__scheme or detected_scheme
format = self.__format or detected_format
# Get compression
for type in config.SUPPORTED_COMPRESSION:
if self.__compression == type or detected_format == type:
compression = type
else:
scheme = self.__scheme
format = self.__format
# Initiate loader
self.__loader = None
if scheme is not None:
loader_class = self.__custom_loaders.get(scheme)
if loader_class is None:
if scheme not in config.LOADERS:
message = 'Scheme "%s" is not supported' % scheme
raise exceptions.SchemeError(message)
loader_path = config.LOADERS[scheme]
if loader_path:
loader_class = helpers.import_attribute(loader_path)
if loader_class is not None:
loader_options = helpers.extract_options(options, loader_class.options)
if compression and 'http_stream' in loader_class.options:
loader_options['http_stream'] = False
self.__loader = loader_class(
bytes_sample_size=self.__bytes_sample_size,
**loader_options)
# Zip compression
if compression == 'zip' and six.PY3:
source = self.__loader.load(self.__source, mode='b')
with zipfile.ZipFile(source) as archive:
name = archive.namelist()[0]
if 'filename' in options.keys():
name = options['filename']
del options['filename']
with archive.open(name) as file:
source = tempfile.NamedTemporaryFile(suffix='.' + name)
for line in file:
source.write(line)
source.seek(0)
self.__source = source
self.__loader = StreamLoader(bytes_sample_size=self.__bytes_sample_size)
format = self.__format or helpers.detect_scheme_and_format(source.name)[1]
scheme = 'stream'
# Gzip compression
elif compression == 'gz' and six.PY3:
name = self.__source.replace('.gz', '')
self.__source = gzip.open(self.__loader.load(self.__source, mode='b'))
self.__loader = StreamLoader(bytes_sample_size=self.__bytes_sample_size)
format = self.__format or helpers.detect_scheme_and_format(name)[1]
scheme = 'stream'
# Not supported compression
elif compression:
message = 'Compression "%s" is not supported for your Python version'
raise exceptions.TabulatorException(message % compression)
# Initiate parser
parser_class = self.__custom_parsers.get(format)
if parser_class is None:
if format not in config.PARSERS:
message = 'Format "%s" is not supported' % format
raise exceptions.FormatError(message)
parser_class = helpers.import_attribute(config.PARSERS[format])
parser_options = helpers.extract_options(options, parser_class.options)
self.__parser = parser_class(self.__loader,
force_parse=self.__force_parse,
**parser_options)
# Bad options
if options:
message = 'Not supported option(s) "%s" for scheme "%s" and format "%s"'
message = message % (', '.join(options), scheme, format)
warnings.warn(message, UserWarning)
# Open and setup
self.__parser.open(self.__source, encoding=self.__encoding)
self.__extract_sample()
self.__extract_headers()
if not self.__allow_html:
self.__detect_html()
# Set scheme/format/encoding
self.__actual_scheme = scheme
self.__actual_format = format
self.__actual_encoding = self.__parser.encoding
return self | Opens the stream for reading. |
def FindByIndex(node, index):
'''
Method which finds child according to index. Applies only to nodes whose children are sorted into a dict,
so if the current node's children are in a list it will recursively search - similarly if the index is not found
in the current node's dictionary indexes.
:param node: current node to search for
:param index: index of child.
:return:
'''
result = None
if isinstance(node.children, dict):
result = node.GetChild(index)
if result is None:
children = list(node.children.keys())
child = 0
while child < len(children) and result is None:
key = children[child]
result = FindByIndex(node.GetChild(key), index)
if result is not None:
break
child += 1
else:
child = 0
while child < len(node.children) and result is None:
result = FindByIndex(node.GetChild(child), index)
if result is not None:
break
child += 1
return result | Method which finds child according to index. Applies only to nodes whose children are sorted into a dict,
so if the current node's children are in a list it will recursively search - similarly if the index is not found
in the current node's dictionary indexes.
:param node: current node to search for
:param index: index of child.
:return: |
def _get_field(self, field):
"""
Queries the api for a single field for the record by `id`.
This method should only be called indirectly by cached properties.
:param field: name of the record field to load
"""
if not hasattr(self, "id") or self.id is None:
raise APIResponseError("Cannot query an article without an id")
sq = next(SearchQuery(q="id:{}".format(self.id), fl=field))
# If the requested field is not present in the returning Solr doc,
# return None instead of hitting _get_field again.
if field not in sq._raw:
# These fields will never be in the result solr document;
# pass through to __getattribute__ to allow the relevant
# secondary service queries
if field in ["reference", "citation", "metrics", "bibtex"]:
pass
else:
return None
value = sq.__getattribute__(field)
self._raw[field] = value
return value | Queries the api for a single field for the record by `id`.
This method should only be called indirectly by cached properties.
:param field: name of the record field to load |
def get_user_permissions(self, username):
"""
:returns: list of dicts, or an empty list if there are no permissions.
:param string username: User to set permissions for.
"""
path = Client.urls['user_permissions'] % (username,)
conns = self._call(path, 'GET')
return conns | :returns: list of dicts, or an empty list if there are no permissions.
:param string username: User to set permissions for. |
def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs):
"""
Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_many(filter_doc, **kwargs) | Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str |
def _get_argspec(func):
"""Helper function to support both Python versions"""
if inspect.isclass(func):
func = func.__init__
if not inspect.isfunction(func):
# Init function not existing
return [], False
parameters = inspect.signature(func).parameters
args = []
uses_starstar = False
for par in parameters.values():
if (par.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD or
par.kind == inspect.Parameter.KEYWORD_ONLY):
args.append(par.name)
elif par.kind == inspect.Parameter.VAR_KEYWORD:
uses_starstar = True
return args, uses_starstar | Helper function to support both Python versions |
def _headline(self, error, i: int) -> str:
"""
Format the error message's headline
"""
msgs = Msg()
# get the error title
if error.errclass == "fatal":
msg = msgs.fatal(i)
elif error.errclass == "warning":
msg = msgs.warning(i)
elif error.errclass == "info":
msg = msgs.info(i)
elif error.errclass == "debug":
msg = msgs.debug(i)
elif error.errclass == "via":
msg = msgs.via(i)
else:
msg = msgs.error(i)
# function name
if error.function is not None:
msg += " from " + colors.bold(error.function)
if error.caller is not None:
msg += " called from " + colors.bold(error.caller)
if error.caller_msg is not None:
msg += "\n" + error.caller_msg
if error.function is not None and error.msg is not None:
msg += ": "
else:
msg = msg + " "
if error.errtype is not None:
msg += error.errtype + " : "
if error.msg is not None:
msg += error.msg
return msg | Format the error message's headline |
def dataset(self, **kwargs):
""" Return a key that specifies the data selection
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
try:
return NameFactory.dataset_format.format(**kwargs_copy)
except KeyError:
return None | Return a key that specifies the data selection |
def subsample(self):
"""
Subsample 1000 reads from the baited files
"""
# Create the threads for the analysis
logging.info('Subsampling FASTQ reads')
for _ in range(self.cpus):
threads = Thread(target=self.subsamplethreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name of the subsampled FASTQ file
sample[self.analysistype].subsampledfastq = \
os.path.splitext(sample[self.analysistype].baitedfastq)[0] + '_subsampled.fastq'
# Set the system call
sample[self.analysistype].seqtkcall = 'reformat.sh in={} out={} samplereadstarget=1000'\
.format(sample[self.analysistype].baitedfastq,
sample[self.analysistype].subsampledfastq)
# Add the sample to the queue
self.samplequeue.put(sample)
self.samplequeue.join() | Subsample 1000 reads from the baited files |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UsageContext for this UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageContext
"""
if self._context is None:
self._context = UsageContext(self._version, sim_sid=self._solution['sim_sid'], )
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UsageContext for this UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageContext |
def _handleBulletWidth(bulletText, style, maxWidths):
"""
work out bullet width and adjust maxWidths[0] if neccessary
"""
if bulletText:
if isinstance(bulletText, basestring):
bulletWidth = stringWidth(bulletText, style.bulletFontName, style.bulletFontSize)
else:
#it's a list of fragments
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth + 0.6 * style.bulletFontSize
indent = style.leftIndent + style.firstLineIndent
if bulletRight > indent:
#..then it overruns, and we have less space available on line 1
maxWidths[0] -= (bulletRight - indent) | work out bullet width and adjust maxWidths[0] if neccessary |
def _poor_convergence(z, r, f, bn, mvec):
"""
Test for poor convergence based on three function evaluations.
This test evaluates the function at the three points and returns false if
the relative error is greater than 1e-3.
"""
check_points = (-0.4 + 0.3j, 0.7 + 0.2j, 0.02 - 0.06j)
diffs = []
ftests = []
for check_point in check_points:
rtest = r * check_point
ztest = z + rtest
ftest = f(ztest)
# Evaluate powerseries:
comp = np.sum(bn * np.power(check_point, mvec))
ftests.append(ftest)
diffs.append(comp - ftest)
max_abs_error = np.max(np.abs(diffs))
max_f_value = np.max(np.abs(ftests))
return max_abs_error > 1e-3 * max_f_value | Test for poor convergence based on three function evaluations.
This test evaluates the function at the three points and returns false if
the relative error is greater than 1e-3. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.