code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def write_point(self, **kw):
"""
Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`)
"""
assert 'type' in kw
self.convert_bool(kw, 'score_exit')
return self.write_tag_with_content('Point', **kw) | Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`) |
def getColor(rgb=None, hsv=None):
"""
Convert a color or list of colors to (r,g,b) format from many input formats.
:param bool hsv: if set to `True`, rgb is assumed as (hue, saturation, value).
Example:
- RGB = (255, 255, 255), corresponds to white
- rgb = (1,1,1) is white
- hex = #FFFF00 is yellow
- string = 'white'
- string = 'w' is white nickname
- string = 'dr' is darkred
- int = 7 picks color nr. 7 in a predefined color list
- int = -7 picks color nr. 7 in a different predefined list
.. hint:: |colorcubes| |colorcubes.py|_
"""
#recursion, return a list if input is list of colors:
if _isSequence(rgb) and len(rgb) > 3:
seqcol = []
for sc in rgb:
seqcol.append(getColor(sc))
return seqcol
if str(rgb).isdigit():
rgb = int(rgb)
if hsv:
c = hsv2rgb(hsv)
else:
c = rgb
if _isSequence(c):
if c[0] <= 1 and c[1] <= 1 and c[2] <= 1:
return c # already rgb
else:
if len(c) == 3:
return list(np.array(c) / 255.0) # RGB
else:
return (c[0] / 255.0, c[1] / 255.0, c[2] / 255.0, c[3]) # RGBA
elif isinstance(c, str): # is string
c = c.replace(",", " ").replace("/", " ").replace("alpha=", "")
c = c.replace("grey", "gray")
c = c.split()[0] # ignore possible opacity float inside string
if 0 < len(c) < 3: # single/double letter color
if c.lower() in color_nicks.keys():
c = color_nicks[c.lower()]
else:
print("Unknow color nickname:", c)
print("Available abbreviations:", color_nicks)
return (0.5, 0.5, 0.5)
if c.lower() in colors.keys(): # matplotlib name color
c = colors[c.lower()]
else: # vtk name color
namedColors = vtk.vtkNamedColors()
rgba = [0, 0, 0, 0]
namedColors.GetColor(c, rgba)
return list(np.array(rgba[0:3]) / 255.0)
if "#" in c: # hex to rgb
h = c.lstrip("#")
rgb255 = list(int(h[i : i + 2], 16) for i in (0, 2, 4))
rgbh = np.array(rgb255) / 255.0
if np.sum(rgbh) > 3:
print("Error in getColor(): Wrong hex color", c)
return (0.5, 0.5, 0.5)
return tuple(rgbh)
elif isinstance(c, int): # color number
if c >= 0:
return colors1[c % 10]
else:
return colors2[-c % 10]
elif isinstance(c, float):
if c >= 0:
return colors1[int(c) % 10]
else:
return colors2[int(-c) % 10]
#print("Unknown color:", c)
return (0.5, 0.5, 0.5) | Convert a color or list of colors to (r,g,b) format from many input formats.
:param bool hsv: if set to `True`, rgb is assumed as (hue, saturation, value).
Example:
- RGB = (255, 255, 255), corresponds to white
- rgb = (1,1,1) is white
- hex = #FFFF00 is yellow
- string = 'white'
- string = 'w' is white nickname
- string = 'dr' is darkred
- int = 7 picks color nr. 7 in a predefined color list
- int = -7 picks color nr. 7 in a different predefined list
.. hint:: |colorcubes| |colorcubes.py|_ |
def _get_scope_highlight_color(self):
"""
Gets the base scope highlight color (derivated from the editor
background)
For lighter themes will be a darker color,
and for darker ones will be a lighter color
"""
color = self.editor.sideareas_color
if color.lightness() < 128:
color = drift_color(color, 130)
else:
color = drift_color(color, 105)
return color | Gets the base scope highlight color (derivated from the editor
background)
For lighter themes will be a darker color,
and for darker ones will be a lighter color |
def do_macro_block(parser, token):
""" Function taking parsed template tag
to a MacroBlockNode.
"""
tag_name, macro_name, args, kwargs = parse_macro_params(token)
# could add extra validation on the macro_name tag
# here, but probably don't need to since we're checking
# if there's a macro by that name anyway.
try:
# see if the macro is in the context.
macro = parser._macros[macro_name]
except (AttributeError, KeyError):
raise template.TemplateSyntaxError(
"Macro '{0}' is not defined ".format(macro_name) +
"previously to the {0} tag".format(tag_name))
# get the arg and kwarg nodes from the nodelist
nodelist = parser.parse(('endmacro_block',))
parser.delete_first_token()
# Loop through nodes, sorting into args/kwargs
# (we could do this more semantically, but we loop
# only once like this as an optimization).
for node in nodelist:
if isinstance(node, MacroArgNode) and not isinstance(node, MacroKwargNode):
# note that MacroKwargNode is also a MacroArgNode (via inheritance),
# so we must check against this.
args.append(node)
elif isinstance(node, MacroKwargNode):
if node.keyword in macro.kwargs:
# check that the keyword is defined as an argument for
# the macro.
if node.keyword not in kwargs:
# add the keyword argument to the dict
# if it's not in there
kwargs[node.keyword] = node
else:
# raise a template syntax error if the
# keyword is already in the dict (thus a keyword
# argument was passed twice.
raise template.TemplateSyntaxError(
"{0} template tag was supplied "
"the same keyword argument multiple times.".format(
tag_name))
else:
raise template.TemplateSyntaxError(
"{0} template tag was supplied with a "
"keyword argument not defined by the {1} macro.".format(
tag_name, macro_name))
# The following is a check that only whitespace is inside the macro_block tag,
# but it's currently removed for reasons of backwards compatibility/potential
# uses people might have to put extra stuff in te macro_block tag.
# elif not isinstance(node, template.TextNode) or node.s.strip() != "":
# # whitespace is allowed, anything else is not
# raise template.TemplateSyntaxError(
# "{0} template tag received an argument that "
# "is neither a arg or a kwarg tag. Make sure there's "
# "text or template tags directly descending "
# "from the {0} tag.".format(tag_name))
# check that there aren't more arg tags than args
# in the macro.
if len(args) > len(macro.args):
raise template.TemplateSyntaxError(
"{0} template tag was supplied too many arg block tags.".format(
tag_name))
macro.parser = parser
return MacroBlockNode(macro, nodelist, args, kwargs) | Function taking parsed template tag
to a MacroBlockNode. |
def __setup_taskset(self, affinity, pid=None, args=None):
""" if pid specified: set process w/ pid `pid` CPU affinity to specified `affinity` core(s)
if args specified: modify list of args for Popen to start w/ taskset w/ affinity `affinity`
"""
self.taskset_path = self.get_option(self.SECTION, 'taskset_path')
if args:
return [self.taskset_path, '-c', affinity] + args
if pid:
args = "%s -pc %s %s" % (self.taskset_path, affinity, pid)
retcode, stdout, stderr = execute(args, shell=True, poll_period=0.1, catch_out=True)
logger.debug('taskset for pid %s stdout: %s', pid, stdout)
if retcode == 0:
logger.info("Enabled taskset for pid %s with affinity %s", str(pid), affinity)
else:
logger.debug('Taskset setup failed w/ retcode :%s', retcode)
raise KeyError(stderr) | if pid specified: set process w/ pid `pid` CPU affinity to specified `affinity` core(s)
if args specified: modify list of args for Popen to start w/ taskset w/ affinity `affinity` |
def case_name_parts(self):
"""
Convert all the parts of the name to the proper case... carefully!
"""
if not self.is_mixed_case():
self.honorific = self.honorific.title() if self.honorific else None
self.nick = self.nick.title() if self.nick else None
if self.first:
self.first = self.first.title()
self.first = self.capitalize_and_punctuate_initials(self.first)
if self.last:
self.last = self.last.title()
self.last = self.uppercase_the_scots(self.last)
self.middle = self.middle.title() if self.middle else None
if self.suffix:
# Title case Jr/Sr, but uppercase roman numerals
if re.match(r'(?i).*[js]r', self.suffix):
self.suffix = self.suffix.title()
else:
self.suffix = self.suffix.upper()
return self | Convert all the parts of the name to the proper case... carefully! |
def locator_to_latlong (locator):
"""converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wrong or invalid input arg
TypeError: When arg is not a string
Example:
The following example converts a Maidenhead locator into Latitude and Longitude
>>> from pyhamtools.locator import locator_to_latlong
>>> latitude, longitude = locator_to_latlong("JN48QM")
>>> print latitude, longitude
48.5208333333 9.375
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North)
"""
locator = locator.upper()
if len(locator) == 5 or len(locator) < 4:
raise ValueError
if ord(locator[0]) > ord('R') or ord(locator[0]) < ord('A'):
raise ValueError
if ord(locator[1]) > ord('R') or ord(locator[1]) < ord('A'):
raise ValueError
if ord(locator[2]) > ord('9') or ord(locator[2]) < ord('0'):
raise ValueError
if ord(locator[3]) > ord('9') or ord(locator[3]) < ord('0'):
raise ValueError
if len(locator) == 6:
if ord(locator[4]) > ord('X') or ord(locator[4]) < ord('A'):
raise ValueError
if ord (locator[5]) > ord('X') or ord(locator[5]) < ord('A'):
raise ValueError
longitude = (ord(locator[0]) - ord('A')) * 20 - 180
latitude = (ord(locator[1]) - ord('A')) * 10 - 90
longitude += (ord(locator[2]) - ord('0')) * 2
latitude += (ord(locator[3]) - ord('0'))
if len(locator) == 6:
longitude += ((ord(locator[4])) - ord('A')) * (2 / 24)
latitude += ((ord(locator[5])) - ord('A')) * (1 / 24)
# move to center of subsquare
longitude += 1 / 24
latitude += 0.5 / 24
else:
# move to center of square
longitude += 1;
latitude += 0.5;
return latitude, longitude | converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wrong or invalid input arg
TypeError: When arg is not a string
Example:
The following example converts a Maidenhead locator into Latitude and Longitude
>>> from pyhamtools.locator import locator_to_latlong
>>> latitude, longitude = locator_to_latlong("JN48QM")
>>> print latitude, longitude
48.5208333333 9.375
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North) |
def _reset_server(self, address):
"""Clear our pool for a server and mark it Unknown.
Hold the lock when calling this. Does *not* request an immediate check.
"""
server = self._servers.get(address)
# "server" is None if another thread removed it from the topology.
if server:
server.reset()
# Mark this server Unknown.
self._description = self._description.reset_server(address)
self._update_servers() | Clear our pool for a server and mark it Unknown.
Hold the lock when calling this. Does *not* request an immediate check. |
def get_charset(request):
""" Extract charset from the content type
"""
content_type = request.META.get('CONTENT_TYPE', None)
if content_type:
return extract_charset(content_type) if content_type else None
else:
return None | Extract charset from the content type |
def create_hooks(use_tfdbg=False,
use_dbgprofile=False,
dbgprofile_kwargs=None,
use_validation_monitor=False,
validation_monitor_kwargs=None,
use_early_stopping=False,
early_stopping_kwargs=None):
"""Create train and eval hooks for Experiment."""
train_hooks = []
eval_hooks = []
if use_tfdbg:
hook = debug.LocalCLIDebugHook()
train_hooks.append(hook)
eval_hooks.append(hook)
if use_dbgprofile:
# Recorded traces can be visualized with chrome://tracing/
# The memory/tensor lifetime is also profiled
tf.logging.info("Using ProfilerHook")
defaults = dict(save_steps=10, show_dataflow=True, show_memory=True)
defaults.update(dbgprofile_kwargs)
train_hooks.append(tf.train.ProfilerHook(**defaults))
if use_validation_monitor:
tf.logging.info("Using ValidationMonitor")
train_hooks.append(
tf.contrib.learn.monitors.ValidationMonitor(
hooks=eval_hooks, **validation_monitor_kwargs))
if use_early_stopping:
tf.logging.info("Using EarlyStoppingHook")
hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs)
# Adding to both training and eval so that eval aborts as well
train_hooks.append(hook)
eval_hooks.append(hook)
return train_hooks, eval_hooks | Create train and eval hooks for Experiment. |
def add_node(self, binary_descriptor):
"""Add a node to the sensor_graph using a binary node descriptor.
Args:
binary_descriptor (bytes): An encoded binary node descriptor.
Returns:
int: A packed error code.
"""
try:
node_string = parse_binary_descriptor(binary_descriptor)
except:
self._logger.exception("Error parsing binary node descriptor: %s", binary_descriptor)
return _pack_sgerror(SensorGraphError.INVALID_NODE_STREAM) # FIXME: Actually provide the correct error codes here
try:
self.graph.add_node(node_string)
except NodeConnectionError:
return _pack_sgerror(SensorGraphError.STREAM_NOT_IN_USE)
except ProcessingFunctionError:
return _pack_sgerror(SensorGraphError.INVALID_PROCESSING_FUNCTION)
except ResourceUsageError:
return _pack_sgerror(SensorGraphError.NO_NODE_SPACE_AVAILABLE)
return Error.NO_ERROR | Add a node to the sensor_graph using a binary node descriptor.
Args:
binary_descriptor (bytes): An encoded binary node descriptor.
Returns:
int: A packed error code. |
def flash(message, category='message'):
"""Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category.
"""
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# are always in sync with the sess on object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get('_flashes', [])
flashes.append((category, message))
session['_flashes'] = flashes
message_flashed.send(current_app._get_current_object(),
message=message, category=category) | Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category. |
def artist_related_artists(self, spotify_id):
"""Get related artists for an artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
"""
route = Route('GET', '/artists/{spotify_id}/related-artists', spotify_id=spotify_id)
return self.request(route) | Get related artists for an artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by. |
def find_non_contiguous(all_items):
"""Find any items that have slots that aren't contiguous"""
non_contiguous = []
for item in all_items:
if item.slots.count() < 2:
# No point in checking
continue
last_slot = None
for slot in item.slots.all().order_by('end_time'):
if last_slot:
if last_slot.end_time != slot.get_start_time():
non_contiguous.append(item)
break
last_slot = slot
return non_contiguous | Find any items that have slots that aren't contiguous |
def get(self, queue='', no_ack=False, to_dict=False, auto_decode=True):
"""Fetch a single message.
:param str queue: Queue name
:param bool no_ack: No acknowledgement needed
:param bool to_dict: Should incoming messages be converted to a
dictionary before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:returns: Returns a single message, as long as there is a message in
the queue. If no message is available, returns None.
:rtype: dict|Message|None
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
elif not isinstance(no_ack, bool):
raise AMQPInvalidArgument('no_ack should be a boolean')
elif self._channel.consumer_tags:
raise AMQPChannelError("Cannot call 'get' when channel is "
"set to consume")
get_frame = specification.Basic.Get(queue=queue,
no_ack=no_ack)
with self._channel.lock and self._channel.rpc.lock:
message = self._get_message(get_frame, auto_decode=auto_decode)
if message and to_dict:
return message.to_dict()
return message | Fetch a single message.
:param str queue: Queue name
:param bool no_ack: No acknowledgement needed
:param bool to_dict: Should incoming messages be converted to a
dictionary before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:returns: Returns a single message, as long as there is a message in
the queue. If no message is available, returns None.
:rtype: dict|Message|None |
def addcomment(self, comment, private=False):
"""
Add the given comment to this bug. Set private to True to mark this
comment as private.
"""
# Note: fedora bodhi uses this function
vals = self.bugzilla.build_update(comment=comment,
comment_private=private)
log.debug("addcomment: update=%s", vals)
return self.bugzilla.update_bugs(self.bug_id, vals) | Add the given comment to this bug. Set private to True to mark this
comment as private. |
def _receive_data(self):
"""Gets data from queue"""
result = self.queue.get(block=True)
if hasattr(self.queue, 'task_done'):
self.queue.task_done()
return result | Gets data from queue |
def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2017.7.0
Adds or deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
.. warning::
Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`,
or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`.
.. deprecated:: 2019.2.0
action
The action to perform. Either ``add`` or ``delete``.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_or_delete_policy add my-policy
'''
salt.utils.versions.warn_until(
'Sodium',
'The \'selinux.fcontext_add_or_delete_policy\' module has been deprecated. Please use the '
'\'selinux.fcontext_add_policy\' and \'selinux.fcontext_delete_policy\' modules instead. '
'Support for the \'selinux.fcontext_add_or_delete_policy\' module will be removed in Salt '
'{version}.'
)
return _fcontext_add_or_delete_policy(action, name, filetype, sel_type, sel_user, sel_level) | .. versionadded:: 2017.7.0
Adds or deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
.. warning::
Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`,
or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`.
.. deprecated:: 2019.2.0
action
The action to perform. Either ``add`` or ``delete``.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_or_delete_policy add my-policy |
def _pys_assert_version(self, line):
"""Asserts pys file version"""
if float(line.strip()) > 1.0:
# Abort if file version not supported
msg = _("File version {version} unsupported (>1.0).").format(
version=line.strip())
raise ValueError(msg) | Asserts pys file version |
def _from_dict(cls, _dict):
"""Initialize a SentimentResult object from a json dictionary."""
args = {}
if 'document' in _dict:
args['document'] = DocumentSentimentResults._from_dict(
_dict.get('document'))
if 'targets' in _dict:
args['targets'] = [
TargetedSentimentResults._from_dict(x)
for x in (_dict.get('targets'))
]
return cls(**args) | Initialize a SentimentResult object from a json dictionary. |
def update_username(
self,
username: Union[str, None]
) -> bool:
"""Use this method to update your own username.
This method only works for users, not bots. Bot usernames must be changed via Bot Support or by recreating
them from scratch using BotFather. To update a channel or supergroup username you can use
:meth:`update_chat_username`.
Args:
username (``str`` | ``None``):
Username to set. "" (empty string) or None to remove the username.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
return bool(
self.send(
functions.account.UpdateUsername(
username=username or ""
)
)
) | Use this method to update your own username.
This method only works for users, not bots. Bot usernames must be changed via Bot Support or by recreating
them from scratch using BotFather. To update a channel or supergroup username you can use
:meth:`update_chat_username`.
Args:
username (``str`` | ``None``):
Username to set. "" (empty string) or None to remove the username.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
def pass_rate(self, include_skips=False, include_inconclusive=False, include_retries=True):
"""
Calculate pass rate for tests in this list.
:param include_skips: Boolean, if True skipped tc:s will be included. Default is False
:param include_inconclusive: Boolean, if True inconclusive tc:s will be included.
Default is False.
:param include_retries: Boolean, if True retried tc:s will be included in percentages.
:return: Percentage in format .2f %
"""
total = self.count()
success = self.success_count()
retries = self.retry_count()
try:
if include_inconclusive and include_skips and include_retries:
val = 100.0*success/total
elif include_inconclusive and include_skips and not include_retries:
val = 100.0 * success / (total - retries)
elif include_skips and include_retries and not include_inconclusive:
inconcs = self.inconclusive_count()
val = 100.0 * success / (total - inconcs)
elif include_skips and not include_retries and not include_inconclusive:
inconcs = self.inconclusive_count()
val = 100.0 * success / (total - inconcs - retries)
elif include_inconclusive and include_retries and not include_skips:
skipped = self.skip_count()
val = 100.0 * success / (total - skipped)
elif include_inconclusive and not include_retries and not include_skips:
skipped = self.skip_count()
val = 100.0 * success / (total - skipped - retries)
elif not include_inconclusive and not include_skips and include_retries:
failures = self.failure_count()
val = 100.0 * success / (failures + success)
else:
failures = self.clean_fails()
val = 100.0 * success / (failures + success)
except ZeroDivisionError:
val = 0
return format(val, '.2f') + " %" | Calculate pass rate for tests in this list.
:param include_skips: Boolean, if True skipped tc:s will be included. Default is False
:param include_inconclusive: Boolean, if True inconclusive tc:s will be included.
Default is False.
:param include_retries: Boolean, if True retried tc:s will be included in percentages.
:return: Percentage in format .2f % |
def get_object_methods(obj):
"""
Returns all methods belonging to an object instance specified in by the
__dir__ function
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> obj = ut.NiceRepr()
>>> methods1 = ut.get_object_methods()
>>> ut.inject_func_as_method(obj, ut.get_object_methods)
>>> methods2 = ut.get_object_methods()
>>> assert ut.get_object_methods in methods2
"""
import utool as ut
attr_list = (getattr(obj, attrname) for attrname in dir(obj))
methods = [attr for attr in attr_list if ut.is_method(attr)]
return methods | Returns all methods belonging to an object instance specified in by the
__dir__ function
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> obj = ut.NiceRepr()
>>> methods1 = ut.get_object_methods()
>>> ut.inject_func_as_method(obj, ut.get_object_methods)
>>> methods2 = ut.get_object_methods()
>>> assert ut.get_object_methods in methods2 |
def delete(self):
'''
delete - Delete all objects in this list.
@return <int> - Number of objects deleted
'''
if len(self) == 0:
return 0
mdl = self.getModel()
return mdl.deleter.deleteMultiple(self) | delete - Delete all objects in this list.
@return <int> - Number of objects deleted |
def is_muted(what):
"""
Checks if a logged event is to be muted for debugging purposes.
Also goes through the solo list - only items in there will be logged!
:param what:
:return:
"""
state = False
for item in solo:
if item not in what:
state = True
else:
state = False
break
for item in mute:
if item in what:
state = True
break
return state | Checks if a logged event is to be muted for debugging purposes.
Also goes through the solo list - only items in there will be logged!
:param what:
:return: |
def get_email(self, email_id):
"""
Get a specific email
"""
connection = Connection(self.token)
connection.set_url(self.production, self.EMAILS_ID_URL % email_id)
return connection.get_request() | Get a specific email |
def colstack(seq, mode='abort',returnnaming=False):
"""
Horizontally stack a sequence of numpy ndarrays with structured dtypes
Analog of numpy.hstack for recarrays.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.colstack` which uses
:func:`tabular.tabarray.tab_colstack`.
**Parameters**
**seq** : sequence of numpy ndarray with structured dtype
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['first','drop','abort','rename']
Denotes how to proceed if when multiple recarrays share the
same column name:
* if `mode` == ``first``, take the column from the first
recarray in `seq` containing the shared column name.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack share column names; this is the
default mode.
* elif `mode` == ``drop``, drop any column that shares
its name with any other column among the sequence of
recarrays.
* elif `mode` == ``rename``, for any set of all columns
sharing the same name, rename all columns by appending
an underscore, '_', followed by an integer, starting
with '0' and incrementing by 1 for each subsequent
column.
**Returns**
**out** : numpy ndarray with structured dtype
Result of horizontally stacking the arrays in `seq`.
**See also:** `numpy.hstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html>`_.
"""
assert mode in ['first','drop','abort','rename'], \
'mode argument must take on value "first","drop", "rename", or "abort".'
AllNames = utils.uniqify(utils.listunion(
[list(l.dtype.names) for l in seq]))
NameList = [(x, [i for i in range(len(seq)) if x in seq[i].dtype.names])
for x in AllNames]
Commons = [x[0] for x in NameList if len(x[1]) > 1]
if len(Commons) > 0 or mode == 'first':
if mode == 'abort':
raise ValueError('There are common column names with differing ' +
'values in the columns')
elif mode == 'drop':
Names = [(L[0], x,x) for (x, L) in NameList if x not in Commons]
elif mode == 'rename':
NameDict = dict(NameList)
Names = utils.listunion([[(i,n,n) if len(NameDict[n]) == 1 else \
(i,n,n + '_' + str(i)) for n in s.dtype.names] \
for (i,s) in enumerate(seq)])
else:
Names = [(L[0], x,x) for (x, L) in NameList]
if returnnaming:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]),Names
else:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]) | Horizontally stack a sequence of numpy ndarrays with structured dtypes
Analog of numpy.hstack for recarrays.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.colstack` which uses
:func:`tabular.tabarray.tab_colstack`.
**Parameters**
**seq** : sequence of numpy ndarray with structured dtype
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['first','drop','abort','rename']
Denotes how to proceed if when multiple recarrays share the
same column name:
* if `mode` == ``first``, take the column from the first
recarray in `seq` containing the shared column name.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack share column names; this is the
default mode.
* elif `mode` == ``drop``, drop any column that shares
its name with any other column among the sequence of
recarrays.
* elif `mode` == ``rename``, for any set of all columns
sharing the same name, rename all columns by appending
an underscore, '_', followed by an integer, starting
with '0' and incrementing by 1 for each subsequent
column.
**Returns**
**out** : numpy ndarray with structured dtype
Result of horizontally stacking the arrays in `seq`.
**See also:** `numpy.hstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html>`_. |
def get_weld_obj_id(weld_obj, data):
"""Helper method to update WeldObject with some data.
Parameters
----------
weld_obj : WeldObject
WeldObject to update.
data : numpy.ndarray or WeldObject or str
Data for which to get an id. If str, it is a placeholder or 'str' literal.
Returns
-------
str
The id of the data, e.g. _inp0 for raw data, obj101 for WeldObject
"""
obj_id = weld_obj.update(data)
if isinstance(data, WeldObject):
obj_id = data.obj_id
weld_obj.dependencies[obj_id] = data
return obj_id | Helper method to update WeldObject with some data.
Parameters
----------
weld_obj : WeldObject
WeldObject to update.
data : numpy.ndarray or WeldObject or str
Data for which to get an id. If str, it is a placeholder or 'str' literal.
Returns
-------
str
The id of the data, e.g. _inp0 for raw data, obj101 for WeldObject |
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not os.path.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
return filepath | Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed. |
def get_mesures(self, mes, debut=None, fin=None, freq='H', format=None,
dayfirst=False, brut=False):
"""
Récupération des données de mesure.
Paramètres:
mes: Un nom de mesure ou plusieurs séparées par des virgules, une liste
(list, tuple, pandas.Series) de noms
debut: Chaine de caractère ou objet datetime décrivant la date de début.
Défaut=date du jour
fin: Chaine de caractère ou objet datetime décrivant la date de fin.
Défaut=date de début
freq: fréquence de temps. '15T' | 'H' | 'D' | 'M' | 'A' (15T pour quart-horaire)
format: chaine de caractère décrivant le format des dates (ex:"%Y-%m-%d"
pour debut/fin="2013-01-28"). Appeler pyair.date.strtime_help() pour
obtenir la liste des codes possibles.
Defaut="%Y-%m-%d"
dayfirst: Si aucun format n'est fourni et que les dates sont des chaines
de caractères, aide le décrypteur à transformer la date en objet datetime
en spécifiant que les dates commencent par le jour (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
brut: si oui ou non renvoyer le dataframe brut, non invalidé, et les
codes d'état des mesures
Defaut=False
Retourne:
Un dataframe contenant toutes les mesures demandées.
Si brut=True, renvoie le dataframe des mesures brutes non invalidées et
le dataframe des codes d'états.
Le dataframe valide (net) peut être alors recalculé en faisant:
brut, etats = xr.get_mesure(..., brut=True)
invalides = etats_to_invalid(etats)
net = brut.mask(invalides)
"""
def create_index(index, freq):
"""
Nouvel index [id, date] avec date formaté suivant le pas de temps voulu
index: index de l'ancien dataframe, tel que [date à minuit, date à ajouter]
"""
decalage = 1 # sert à compenser l'aberration des temps qui veut qu'on marque sur la fin d'une période (ex: à 24h, la pollution de 23 à minuit)
if freq == 'T' or freq == '15T':
f = pd.tseries.offsets.Minute
decalage = 15
if freq == 'H':
f = pd.tseries.offsets.Hour
if freq == 'D':
f = pd.tseries.offsets.Day
if freq == 'M':
f = pd.tseries.offsets.MonthBegin
if freq == 'A':
f = pd.tseries.offsets.YearBegin
else:
f = pd.tseries.offsets.Hour
new_index = [date + f(int(delta) - decalage) for date, delta in index]
return new_index
# Reformatage du champ des noms de mesure
mes = _format(mes)
# Analyse des champs dates
debut = to_date(debut, dayfirst, format)
if not fin:
fin = debut
else:
fin = to_date(fin, dayfirst, format)
# La freq de temps Q n'existe pas, on passe d'abord par une fréquence 15 minutes
if freq in ('Q', 'T'):
freq = '15T'
# Sélection des champs et de la table en fonctions de la fréquence de temps souhaitée
if freq == '15T':
diviseur = 96
champ_val = ','.join(['Q_M%02i AS "%i"' % (x, x * 15) for x in range(1, diviseur + 1)])
champ_code = 'Q_ETATV'
table = 'JOURNALIER'
elif freq == 'H':
diviseur = 24
champ_val = ','.join(['H_M%02i AS "%i"' % (x, x) for x in range(1, diviseur + 1)])
champ_code = 'H_ETAT'
table = 'JOURNALIER'
elif freq == 'D':
diviseur = 1
champ_val = 'J_M01 AS "1"'
champ_code = 'J_ETAT'
table = 'JOURNALIER'
elif freq == 'M':
diviseur = 12
champ_val = ','.join(['M_M%02i AS "%i"' % (x, x) for x in range(1, diviseur + 1)])
champ_code = 'M_ETAT'
table = 'MOIS'
elif freq == 'A':
diviseur = 1
champ_val = 'A_M01 AS "1"'
champ_code = 'A_ETAT'
table = 'MOIS'
else:
raise ValueError("freq doit être T, H, D, M ou A")
if table == 'JOURNALIER':
champ_date = 'J_DATE'
debut_db = debut
fin_db = fin
else:
champ_date = 'M_DATE'
# Pour les freq='M' et 'A', la table contient toutes les valeurs sur une
# année entière. Pour ne pas perturber la récupération si on passait des
# dates en milieu d'année, on transforme les dates pour être calées en début
# et en fin d'année. Le recadrage se fera plus loin dans le code, lors du reindex
debut_db = debut.replace(month=1, day=1, hour=0, minute=0)
fin_db = fin.replace(month=12, day=31, hour=23, minute=0)
debut_db = debut_db.strftime("%Y-%m-%d")
fin_db = fin_db.strftime("%Y-%m-%d")
# Récupération des valeurs et codes d'états associés
_sql = """SELECT
IDENTIFIANT as "id",
{champ_date} as "date",
{champ_code} as "etat",
{champ_val}
FROM {table}
INNER JOIN MESURE USING (NOM_COURT_MES)
WHERE IDENTIFIANT IN ('{mes}')
AND {champ_date} BETWEEN TO_DATE('{debut}', 'YYYY-MM-DD') AND TO_DATE('{fin}', 'YYYY-MM-DD')
ORDER BY IDENTIFIANT, {champ_date} ASC""".format(champ_date=champ_date,
table=table,
champ_code=champ_code,
mes=mes,
champ_val=champ_val,
debut=debut_db,
fin=fin_db)
## TODO : A essayer quand la base sera en version 11g
# _sql = """SELECT *
# FROM ({selection})
# UNPIVOT (IDENTIFIANT FOR VAL IN ({champ_as}))""".format(selection=_sql,
# champ_date=champ_date,
# champ_as=champ_as)
# On recupere les valeurs depuis la freq dans une dataframe
rep = psql.read_sql(_sql, self.conn)
# On créait un multiindex pour manipuler plus facilement le dataframe
df = rep.set_index(['id', 'date'])
# Stack le dataframe pour mettre les colonnes en lignes, en supprimant la colonne des états
# puis on unstack suivant l'id pour avoir les polluants en colonnes
etats = df['etat']
df = df.drop('etat', axis=1)
df_stack = df.stack(dropna=False)
df = df_stack.unstack('id')
# Calcul d'un nouvel index avec les bonnes dates. L'index du df est
# formé du champ date à minuit, et des noms des champs de valeurs
# qui sont aliassés de 1 à 24 pour les heures, ... voir champ_val.
# On aggrève alors ces 2 valeurs pour avoir des dates alignées qu'on utilise alors comme index final
index = create_index(df.index, freq)
df.reset_index(inplace=True, drop=True)
df['date'] = index
df = df.set_index(['date'])
# Traitement des codes d'état
# On concatène les codes d'état pour chaque polluant
# etats = etats.sum(level=0)
# etats = pd.DataFrame(zip(*etats.apply(list)))
etats = etats.unstack('id')
etats.fillna(value=MISSING_CODE * diviseur, inplace=True)
etats = etats.sum(axis=0)
etats = pd.DataFrame(list(zip(*etats.apply(list))))
etats.index = df.index
etats.columns = df.columns
# Remplacement des valeurs aux dates manquantes par des NaN
dates_completes = date_range(debut, fin, freq)
df = df.reindex(dates_completes)
etats = etats.reindex(dates_completes)
# Invalidation par codes d'état
# Pour chaque code d'état, regarde si oui ou non il est invalidant en le remplacant par un booléen
invalid = etats_to_invalid(etats)
if not brut:
# dans le dataframe, masque toute valeur invalide par NaN
dfn = df.mask(invalid) # DataFrame net
return dfn
else:
return df, etats | Récupération des données de mesure.
Paramètres:
mes: Un nom de mesure ou plusieurs séparées par des virgules, une liste
(list, tuple, pandas.Series) de noms
debut: Chaine de caractère ou objet datetime décrivant la date de début.
Défaut=date du jour
fin: Chaine de caractère ou objet datetime décrivant la date de fin.
Défaut=date de début
freq: fréquence de temps. '15T' | 'H' | 'D' | 'M' | 'A' (15T pour quart-horaire)
format: chaine de caractère décrivant le format des dates (ex:"%Y-%m-%d"
pour debut/fin="2013-01-28"). Appeler pyair.date.strtime_help() pour
obtenir la liste des codes possibles.
Defaut="%Y-%m-%d"
dayfirst: Si aucun format n'est fourni et que les dates sont des chaines
de caractères, aide le décrypteur à transformer la date en objet datetime
en spécifiant que les dates commencent par le jour (ex:11/09/2012
pourrait être interpreté comme le 09 novembre si dayfirst=False)
brut: si oui ou non renvoyer le dataframe brut, non invalidé, et les
codes d'état des mesures
Defaut=False
Retourne:
Un dataframe contenant toutes les mesures demandées.
Si brut=True, renvoie le dataframe des mesures brutes non invalidées et
le dataframe des codes d'états.
Le dataframe valide (net) peut être alors recalculé en faisant:
brut, etats = xr.get_mesure(..., brut=True)
invalides = etats_to_invalid(etats)
net = brut.mask(invalides) |
def infer_trading_calendar(factor_idx, prices_idx):
"""
Infer the trading calendar from factor and price information.
Parameters
----------
factor_idx : pd.DatetimeIndex
The factor datetimes for which we are computing the forward returns
prices_idx : pd.DatetimeIndex
The prices datetimes associated withthe factor data
Returns
-------
calendar : pd.DateOffset
"""
full_idx = factor_idx.union(prices_idx)
traded_weekdays = []
holidays = []
days_of_the_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
for day, day_str in enumerate(days_of_the_week):
weekday_mask = (full_idx.dayofweek == day)
# drop days of the week that are not traded at all
if not weekday_mask.any():
continue
traded_weekdays.append(day_str)
# look for holidays
used_weekdays = full_idx[weekday_mask].normalize()
all_weekdays = pd.date_range(full_idx.min(), full_idx.max(),
freq=CustomBusinessDay(weekmask=day_str)
).normalize()
_holidays = all_weekdays.difference(used_weekdays)
_holidays = [timestamp.date() for timestamp in _holidays]
holidays.extend(_holidays)
traded_weekdays = ' '.join(traded_weekdays)
return CustomBusinessDay(weekmask=traded_weekdays, holidays=holidays) | Infer the trading calendar from factor and price information.
Parameters
----------
factor_idx : pd.DatetimeIndex
The factor datetimes for which we are computing the forward returns
prices_idx : pd.DatetimeIndex
The prices datetimes associated withthe factor data
Returns
-------
calendar : pd.DateOffset |
def optimisation_plot(d, overlay_alpha=0.5, **kwargs):
"""
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot`
"""
if not hasattr(d, 'opt'):
raise ValueError('Please run `signal_optimiser` before trying to plot its results.')
out = []
for n, opt in d.opt.items():
if not opt['optimisation_success']:
out.append((None, None))
else:
# unpack variables
means = opt['means']
stds = opt['stds']
min_points = opt['min_points']
mean_threshold = opt['mean_threshold']
std_threshold = opt['std_threshold']
opt_centre = opt['opt_centre']
opt_n_points = opt['opt_n_points']
centres, npoints = np.meshgrid(np.arange(means.shape[1]), np.arange(min_points, min_points + means.shape[0]))
rind = (stds < std_threshold)
mind = (means < mean_threshold)
# color scale and histogram limits
mlim = np.percentile(means.flatten()[~np.isnan(means.flatten())], (0, 99))
rlim = np.percentile(stds.flatten()[~np.isnan(stds.flatten())], (0, 99))
cmr = plt.cm.Blues
cmr.set_bad((0,0,0,0.3))
cmm = plt.cm.Reds
cmm.set_bad((0,0,0,0.3))
# create figure
fig = plt.figure(figsize=[7,7])
ma = fig.add_subplot(3, 2, 1)
ra = fig.add_subplot(3, 2, 2)
# work out image limits
nonan = np.argwhere(~np.isnan(means))
xdif = np.ptp(nonan[:, 1])
ydif = np.ptp(nonan[:, 0])
extent = (nonan[:, 1].min() - np.ceil(0.1 * xdif), # x min
nonan[:, 1].max() + np.ceil(0.1 * xdif), # x max
nonan[:, 0].min() + min_points, # y min
nonan[:, 0].max() + np.ceil(0.1 * ydif) + min_points) # y max
mm = ma.imshow(means, origin='bottomleft', cmap=cmm, vmin=mlim[0], vmax=mlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.set_ylabel('N points')
ma.set_xlabel('Center')
fig.colorbar(mm, ax=ma, label='Amplitude')
mr = ra.imshow(stds, origin='bottomleft', cmap=cmr, vmin=rlim[0], vmax=rlim[1],
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ra.set_xlabel('Center')
fig.colorbar(mr, ax=ra, label='std')
# view limits
ra.imshow(~rind, origin='bottomleft', cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
ma.imshow(~mind, origin='bottomleft', cmap=plt.cm.Greys, alpha=overlay_alpha,
extent=(centres.min(), centres.max(), npoints.min(), npoints.max()))
for ax in [ma, ra]:
ax.scatter(opt_centre, opt_n_points, c=(1,1,1,0.7), edgecolor='k',marker='o')
ax.set_xlim(extent[:2])
ax.set_ylim(extent[-2:])
# draw histograms
mah = fig.add_subplot(3, 2, 3)
rah = fig.add_subplot(3, 2, 4)
mah.set_xlim(mlim)
mbin = np.linspace(*mah.get_xlim(), 50)
mah.hist(means.flatten()[~np.isnan(means.flatten())], mbin)
mah.axvspan(mean_threshold, mah.get_xlim()[1], color=(0,0,0,overlay_alpha))
mah.axvline(mean_threshold, c='r')
mah.set_xlabel('Scaled Mean Analyte Conc')
mah.set_ylabel('N')
rah.set_xlim(rlim)
rbin = np.linspace(*rah.get_xlim(), 50)
rah.hist(stds.flatten()[~np.isnan(stds.flatten())], rbin)
rah.axvspan(std_threshold, rah.get_xlim()[1], color=(0,0,0,0.4))
rah.axvline(std_threshold, c='r')
rah.set_xlabel('std')
tax = fig.add_subplot(3,1,3)
tplot(d, opt.analytes, ax=tax, **kwargs)
tax.axvspan(*d.Time[[opt.lims[0], opt.lims[1]]], alpha=0.2)
tax.set_xlim(d.Time[d.ns == n].min() - 3, d.Time[d.ns == n].max() + 3)
fig.tight_layout()
out.append((fig, (ma, ra, mah, rah, tax)))
return out | Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `tplot` |
def _set_sharing_keys(self, keys):
"""
Set the keys to share or unshare
Parameters
----------
keys: string or iterable of strings
The iterable may contain formatoptions that shall be shared (or
unshared), or group names of formatoptions to share all
formatoptions of that group (see the :attr:`fmt_groups` property).
If None, all formatoptions of this plotter are inserted.
Returns
-------
set
The set of formatoptions to share (or unshare)"""
if isinstance(keys, str):
keys = {keys}
keys = set(self) if keys is None else set(keys)
fmto_groups = self._fmto_groups
keys.update(chain(*(map(lambda fmto: fmto.key, fmto_groups[key])
for key in keys.intersection(fmto_groups))))
keys.difference_update(fmto_groups)
return keys | Set the keys to share or unshare
Parameters
----------
keys: string or iterable of strings
The iterable may contain formatoptions that shall be shared (or
unshared), or group names of formatoptions to share all
formatoptions of that group (see the :attr:`fmt_groups` property).
If None, all formatoptions of this plotter are inserted.
Returns
-------
set
The set of formatoptions to share (or unshare) |
def _valcache_lookup(self, cache, branch, turn, tick):
"""Return the value at the given time in ``cache``"""
if branch in cache:
branc = cache[branch]
try:
if turn in branc and branc[turn].rev_gettable(tick):
return branc[turn][tick]
elif branc.rev_gettable(turn-1):
turnd = branc[turn-1]
return turnd[turnd.end]
except HistoryError as ex:
# probably shouldn't ever happen, empty branches shouldn't be kept in the cache at all...
# but it's easy to handle
if ex.deleted:
raise
for b, r, t in self.db._iter_parent_btt(branch, turn, tick):
if b in cache:
if r in cache[b] and cache[b][r].rev_gettable(t):
try:
return cache[b][r][t]
except HistoryError as ex:
if ex.deleted:
raise
elif cache[b].rev_gettable(r-1):
cbr = cache[b][r-1]
try:
return cbr[cbr.end]
except HistoryError as ex:
if ex.deleted:
raise | Return the value at the given time in ``cache`` |
def class_method(cls, f):
"""Decorator which dynamically binds class methods to the model for later use."""
setattr(cls, f.__name__, classmethod(f))
return f | Decorator which dynamically binds class methods to the model for later use. |
def monitor(result_queue, broker=None):
"""
Gets finished tasks from the result queue and saves them to Django
:type result_queue: multiprocessing.Queue
"""
if not broker:
broker = get_broker()
name = current_process().name
logger.info(_("{} monitoring at {}").format(name, current_process().pid))
for task in iter(result_queue.get, 'STOP'):
# save the result
if task.get('cached', False):
save_cached(task, broker)
else:
save_task(task, broker)
# acknowledge result
ack_id = task.pop('ack_id', False)
if ack_id and (task['success'] or task.get('ack_failure', False)):
broker.acknowledge(ack_id)
# log the result
if task['success']:
# log success
logger.info(_("Processed [{}]").format(task['name']))
else:
# log failure
logger.error(_("Failed [{}] - {}").format(task['name'], task['result']))
logger.info(_("{} stopped monitoring results").format(name)) | Gets finished tasks from the result queue and saves them to Django
:type result_queue: multiprocessing.Queue |
def push_plugin(self, name):
"""
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True) | Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful |
def preprocess_incoming_content(content, encrypt_func, max_size_bytes):
"""
Apply preprocessing steps to file/notebook content that we're going to
write to the database.
Applies ``encrypt_func`` to ``content`` and checks that the result is
smaller than ``max_size_bytes``.
"""
encrypted = encrypt_func(content)
if max_size_bytes != UNLIMITED and len(encrypted) > max_size_bytes:
raise FileTooLarge()
return encrypted | Apply preprocessing steps to file/notebook content that we're going to
write to the database.
Applies ``encrypt_func`` to ``content`` and checks that the result is
smaller than ``max_size_bytes``. |
def write_short_ascii(s):
"""
Encode a Kafka short string which represents text.
:param str s:
Text string (`str` on Python 3, `str` or `unicode` on Python 2) or
``None``. The string will be ASCII-encoded.
:returns: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters
"""
if s is None:
return _NULL_SHORT_STRING
if not isinstance(s, string_types):
raise TypeError('{!r} is not text'.format(s))
return write_short_bytes(s.encode('ascii')) | Encode a Kafka short string which represents text.
:param str s:
Text string (`str` on Python 3, `str` or `unicode` on Python 2) or
``None``. The string will be ASCII-encoded.
:returns: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters |
def addEqLink(self, link):
'''Appends EqLink
'''
if isinstance(link, EqLink):
self.eqLinks.append(link)
else:
raise TypeError(
'link Type should be InternalLink, not %s' % type(link)) | Appends EqLink |
def retrieve_value(self, name, default_value=None):
"""Retrieve a value from DB"""
value = self.spine.send_query("retrieveSetting", self.group, name, processes=["kervi-main"])
if value is None:
return default_value
elif isinstance(value, list) and len(value) == 0:
return default_value
elif isinstance(default_value, int):
return int(value)
elif isinstance(default_value, float):
return float(value)
else:
return value | Retrieve a value from DB |
def set_yaxis(self, param, unit=None, label=None):
""" Sets the value of use on the yaxis
:param param: value to use on the yaxis, should be a variable or function of the objects in objectList. ie 'R'
for the radius variable and 'calcDensity()' for the calcDensity function
:param unit: the unit to scale the values to
:type unit: quantities unit or None
:param label: axis label to use, if None "Parameter (Unit)" is generated here and used
:type label: str
"""
if unit is None:
unit = self._getParLabelAndUnit(param)[1] # use the default unit defined in this class
self._yaxis_unit = unit
self._yaxis = self._set_axis(param, unit)
if label is None:
self.ylabel = self._gen_label(param, unit)
else:
self.ylabel = label | Sets the value of use on the yaxis
:param param: value to use on the yaxis, should be a variable or function of the objects in objectList. ie 'R'
for the radius variable and 'calcDensity()' for the calcDensity function
:param unit: the unit to scale the values to
:type unit: quantities unit or None
:param label: axis label to use, if None "Parameter (Unit)" is generated here and used
:type label: str |
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n) | Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')] |
def __get_nondirect_init(self, init):
"""
return the non-direct init if the direct algorithm has been selected.
"""
crc = init
for i in range(self.Width):
bit = crc & 0x01
if bit:
crc^= self.Poly
crc >>= 1
if bit:
crc |= self.MSB_Mask
return crc & self.Mask | return the non-direct init if the direct algorithm has been selected. |
def process_quote(self, data):
"""报价推送"""
for ix, row in data.iterrows():
symbol = row['code']
tick = self._tick_dict.get(symbol, None)
if not tick:
tick = TinyQuoteData()
tick.symbol = symbol
self._tick_dict[symbol] = tick
tick.date = row['data_date'].replace('-', '')
tick.time = row['data_time']
# with GLOBAL.dt_lock:
if tick.date and tick.time:
tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S')
else:
return
tick.openPrice = row['open_price']
tick.highPrice = row['high_price']
tick.lowPrice = row['low_price']
tick.preClosePrice = row['prev_close_price']
# 1.25 新增摆盘价差,方便计算正确的订单提交价格 要求牛牛最低版本 v3.42.4961.125
if 'price_spread' in row:
tick.priceSpread = row['price_spread']
tick.lastPrice = row['last_price']
tick.volume = row['volume']
new_tick = copy(tick)
self._notify_new_tick_event(new_tick) | 报价推送 |
def call_remoteckan(self, *args, **kwargs):
# type: (Any, Any) -> Dict
"""
Calls the remote CKAN
Args:
*args: Arguments to pass to remote CKAN call_action method
**kwargs: Keyword arguments to pass to remote CKAN call_action method
Returns:
Dict: The response from the remote CKAN call_action method
"""
requests_kwargs = kwargs.get('requests_kwargs', dict())
credentials = self._get_credentials()
if credentials:
requests_kwargs['auth'] = credentials
kwargs['requests_kwargs'] = requests_kwargs
apikey = kwargs.get('apikey', self.get_api_key())
kwargs['apikey'] = apikey
return self.remoteckan().call_action(*args, **kwargs) | Calls the remote CKAN
Args:
*args: Arguments to pass to remote CKAN call_action method
**kwargs: Keyword arguments to pass to remote CKAN call_action method
Returns:
Dict: The response from the remote CKAN call_action method |
def copy(self, graph):
""" Returns a copy of the event handler, remembering the last node clicked.
"""
e = events(graph, self._ctx)
e.clicked = self.clicked
return e | Returns a copy of the event handler, remembering the last node clicked. |
def stop(self):
"""
Stops this VirtualBox VM.
"""
self._hw_virtualization = False
yield from self._stop_ubridge()
yield from self._stop_remote_console()
vm_state = yield from self._get_vm_state()
if vm_state == "running" or vm_state == "paused" or vm_state == "stuck":
if self.acpi_shutdown:
# use ACPI to shutdown the VM
result = yield from self._control_vm("acpipowerbutton")
trial = 0
while True:
vm_state = yield from self._get_vm_state()
if vm_state == "poweroff":
break
yield from asyncio.sleep(1)
trial += 1
if trial >= 120:
yield from self._control_vm("poweroff")
break
self.status = "stopped"
log.debug("ACPI shutdown result: {}".format(result))
else:
# power off the VM
result = yield from self._control_vm("poweroff")
self.status = "stopped"
log.debug("Stop result: {}".format(result))
log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
yield from asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
try:
# deactivate the first serial port
yield from self._modify_vm("--uart1 off")
except VirtualBoxError as e:
log.warn("Could not deactivate the first serial port: {}".format(e))
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
yield from self._modify_vm("--nictrace{} off".format(adapter_number + 1))
yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
yield from self._modify_vm("--nic{} null".format(adapter_number + 1))
yield from super().stop() | Stops this VirtualBox VM. |
def tool(self):
"""The tool that was in use during this event.
If the caller keeps a reference to a tool, the tool object will
compare equal to the previously obtained tool object.
Note:
Physical tool tracking requires hardware support. If unavailable,
libinput creates one tool per type per tablet. See
`Tracking unique tools`_ for more details.
Returns:
~libinput.define.TabletTool: The new tool triggering this event.
"""
htablettool = self._libinput.libinput_event_tablet_tool_get_tool(
self._handle)
return TabletTool(htablettool, self._libinput) | The tool that was in use during this event.
If the caller keeps a reference to a tool, the tool object will
compare equal to the previously obtained tool object.
Note:
Physical tool tracking requires hardware support. If unavailable,
libinput creates one tool per type per tablet. See
`Tracking unique tools`_ for more details.
Returns:
~libinput.define.TabletTool: The new tool triggering this event. |
def _sample_batch():
"""Determine if a batch should be processed and if not, pop off all of
the pending metrics for that batch.
:rtype: bool
"""
if _sample_probability == 1.0 or random.random() < _sample_probability:
return True
# Pop off all the metrics for the batch
for database in _measurements:
_measurements[database] = _measurements[database][_max_batch_size:]
return False | Determine if a batch should be processed and if not, pop off all of
the pending metrics for that batch.
:rtype: bool |
def return_markers(self, state='MicromedCode'):
"""Return all the markers (also called triggers or events).
Returns
-------
list of dict
where each dict contains 'name' as str, 'start' and 'end' as float
in seconds from the start of the recordings, and 'chan' as list of
str with the channels involved (if not of relevance, it's None).
Raises
------
FileNotFoundError
when it cannot read the events for some reason (don't use other
exceptions).
"""
markers = []
try:
all_states = self._read_states()
except ValueError: # cryptic error when reading states
return markers
try:
x = all_states[state]
except KeyError:
return markers
markers = []
i_mrk = hstack((0, where(diff(x))[0] + 1, len(x)))
for i0, i1 in zip(i_mrk[:-1], i_mrk[1:]):
marker = {'name': str(x[i0]),
'start': (i0) / self.s_freq,
'end': i1 / self.s_freq,
}
markers.append(marker)
return markers | Return all the markers (also called triggers or events).
Returns
-------
list of dict
where each dict contains 'name' as str, 'start' and 'end' as float
in seconds from the start of the recordings, and 'chan' as list of
str with the channels involved (if not of relevance, it's None).
Raises
------
FileNotFoundError
when it cannot read the events for some reason (don't use other
exceptions). |
def from_Composition(composition):
"""Return the LilyPond equivalent of a Composition in a string."""
# warning Throw exception
if not hasattr(composition, 'tracks'):
return False
result = '\\header { title = "%s" composer = "%s" opus = "%s" } '\
% (composition.title, composition.author, composition.subtitle)
for track in composition.tracks:
result += from_Track(track) + ' '
return result[:-1] | Return the LilyPond equivalent of a Composition in a string. |
def dict_factory(cursor, row):
"""
Converts the cursor information from a SQLite query to a dictionary.
:param cursor | <sqlite3.Cursor>
row | <sqlite3.Row>
:return {<str> column: <variant> value, ..}
"""
out = {}
for i, col in enumerate(cursor.description):
out[col[0]] = row[i]
return out | Converts the cursor information from a SQLite query to a dictionary.
:param cursor | <sqlite3.Cursor>
row | <sqlite3.Row>
:return {<str> column: <variant> value, ..} |
def _render_select(selections):
"""Render the selection part of a query.
Parameters
----------
selections : dict
Selections for a table
Returns
-------
str
A string for the "select" part of a query
See Also
--------
render_query : Further clarification of `selections` dict formatting
"""
if not selections:
return 'SELECT *'
rendered_selections = []
for name, options in selections.items():
if not isinstance(options, list):
options = [options]
original_name = name
for options_dict in options:
name = original_name
alias = options_dict.get('alias')
alias = "as %s" % alias if alias else ""
formatter = options_dict.get('format')
if formatter:
name = _format_select(formatter, name)
rendered_selections.append("%s %s" % (name, alias))
return "SELECT " + ", ".join(rendered_selections) | Render the selection part of a query.
Parameters
----------
selections : dict
Selections for a table
Returns
-------
str
A string for the "select" part of a query
See Also
--------
render_query : Further clarification of `selections` dict formatting |
def entrance_beveled(Di, l, angle, method='Rennels'):
r'''Returns loss coefficient for a beveled or chamfered entrance to a pipe
flush with the wall of a reservoir. This calculation has two methods
available.
The 'Rennels' and 'Idelchik' methods have similar trends, but the 'Rennels'
formulation is centered around a straight loss coefficient of 0.57, so it
is normally at least 0.07 higher.
The Rennels [1]_ formulas are:
.. math::
K = 0.0696\left(1 - C_b\frac{l}{d}\right)\lambda^2 + (\lambda-1)^2
.. math::
\lambda = 1 + 0.622\left[1-1.5C_b\left(\frac{l}{d}
\right)^{\frac{1-(l/d)^{1/4}}{2}}\right]
.. math::
C_b = \left(1 - \frac{\theta}{90}\right)\left(\frac{\theta}{90}
\right)^{\frac{1}{1+l/d}}
.. figure:: fittings/flush_mounted_beveled_entrance.png
:scale: 30 %
:alt: Beveled entrace mounted straight; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
l : float
Length of bevel measured parallel to the pipe length, [m]
angle : float
Angle of bevel with respect to the pipe length, [degrees]
method : str, optional
One of 'Rennels', or 'Idelchik', [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
A cheap way of getting a lower pressure drop.
Little credible data is available.
The table of data in [2]_ uses the angle for both bevels, so it runs from 0
to 180 degrees; this function follows the convention in [1]_ which uses
only one angle, with the angle varying from 0 to 90 degrees.
.. plot:: plots/entrance_beveled.py
Examples
--------
>>> entrance_beveled(Di=0.1, l=0.003, angle=45)
0.45086864221916984
>>> entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik')
0.3995000000000001
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of
Local Resistance and of Friction (Spravochnik Po Gidravlicheskim
Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya). National technical information Service, 1966.
'''
if method is None:
method = 'Rennels'
if method == 'Rennels':
Cb = (1-angle/90.)*(angle/90.)**(1./(1 + l/Di ))
lbd = 1 + 0.622*(1 - 1.5*Cb*(l/Di)**((1 - (l/Di)**0.25)/2.))
return 0.0696*(1 - Cb*l/Di)*lbd**2 + (lbd - 1.)**2
elif method == 'Idelchik':
return float(entrance_beveled_Idelchik_obj(angle*2.0, l/Di))
else:
raise ValueError('Specified method not recognized; methods are %s'
%(entrance_beveled_methods)) | r'''Returns loss coefficient for a beveled or chamfered entrance to a pipe
flush with the wall of a reservoir. This calculation has two methods
available.
The 'Rennels' and 'Idelchik' methods have similar trends, but the 'Rennels'
formulation is centered around a straight loss coefficient of 0.57, so it
is normally at least 0.07 higher.
The Rennels [1]_ formulas are:
.. math::
K = 0.0696\left(1 - C_b\frac{l}{d}\right)\lambda^2 + (\lambda-1)^2
.. math::
\lambda = 1 + 0.622\left[1-1.5C_b\left(\frac{l}{d}
\right)^{\frac{1-(l/d)^{1/4}}{2}}\right]
.. math::
C_b = \left(1 - \frac{\theta}{90}\right)\left(\frac{\theta}{90}
\right)^{\frac{1}{1+l/d}}
.. figure:: fittings/flush_mounted_beveled_entrance.png
:scale: 30 %
:alt: Beveled entrace mounted straight; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
l : float
Length of bevel measured parallel to the pipe length, [m]
angle : float
Angle of bevel with respect to the pipe length, [degrees]
method : str, optional
One of 'Rennels', or 'Idelchik', [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
A cheap way of getting a lower pressure drop.
Little credible data is available.
The table of data in [2]_ uses the angle for both bevels, so it runs from 0
to 180 degrees; this function follows the convention in [1]_ which uses
only one angle, with the angle varying from 0 to 90 degrees.
.. plot:: plots/entrance_beveled.py
Examples
--------
>>> entrance_beveled(Di=0.1, l=0.003, angle=45)
0.45086864221916984
>>> entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik')
0.3995000000000001
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of
Local Resistance and of Friction (Spravochnik Po Gidravlicheskim
Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya). National technical information Service, 1966. |
def main(
output_file: str,
entry_point: Optional[str],
console_script: Optional[str],
python: Optional[str],
site_packages: Optional[str],
compressed: bool,
compile_pyc: bool,
extend_pythonpath: bool,
pip_args: List[str],
) -> None:
"""
Shiv is a command line utility for building fully self-contained Python zipapps
as outlined in PEP 441, but with all their dependencies included!
"""
if not pip_args and not site_packages:
sys.exit(NO_PIP_ARGS_OR_SITE_PACKAGES)
if output_file is None:
sys.exit(NO_OUTFILE)
# check for disallowed pip arguments
for disallowed in DISALLOWED_ARGS:
for supplied_arg in pip_args:
if supplied_arg in disallowed:
sys.exit(
DISALLOWED_PIP_ARGS.format(
arg=supplied_arg, reason=DISALLOWED_ARGS[disallowed]
)
)
with TemporaryDirectory() as working_path:
tmp_site_packages = Path(working_path, "site-packages")
if site_packages:
shutil.copytree(site_packages, tmp_site_packages)
if pip_args:
# install deps into staged site-packages
pip.install(["--target", str(tmp_site_packages)] + list(pip_args))
# if entry_point is a console script, get the callable
if entry_point is None and console_script is not None:
try:
entry_point = find_entry_point(tmp_site_packages, console_script)
except KeyError:
if not Path(tmp_site_packages, "bin", console_script).exists():
sys.exit(NO_ENTRY_POINT.format(entry_point=console_script))
# create runtime environment metadata
env = Environment(
build_id=str(uuid.uuid4()),
entry_point=entry_point,
script=console_script,
compile_pyc=compile_pyc,
extend_pythonpath=extend_pythonpath,
)
Path(working_path, "environment.json").write_text(env.to_json())
# create bootstrapping directory in working path
bootstrap_target = Path(working_path, "_bootstrap")
bootstrap_target.mkdir(parents=True, exist_ok=True)
# copy bootstrap code
copy_bootstrap(bootstrap_target)
# create the zip
builder.create_archive(
Path(working_path),
target=Path(output_file).expanduser(),
interpreter=python or _interpreter_path(),
main="_bootstrap:bootstrap",
compressed=compressed,
) | Shiv is a command line utility for building fully self-contained Python zipapps
as outlined in PEP 441, but with all their dependencies included! |
def _update_rr_ce_entry(self, rec):
# type: (dr.DirectoryRecord) -> int
'''
An internal method to update the Rock Ridge CE entry for the given
record.
Parameters:
rec - The record to update the Rock Ridge CE entry for (if it exists).
Returns:
The number of additional bytes needed for this Rock Ridge CE entry.
'''
if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None:
celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area
added_block, block, offset = self.pvd.add_rr_ce_entry(celen)
rec.rock_ridge.update_ce_block(block)
rec.rock_ridge.dr_entries.ce_record.update_offset(offset)
if added_block:
return self.pvd.logical_block_size()
return 0 | An internal method to update the Rock Ridge CE entry for the given
record.
Parameters:
rec - The record to update the Rock Ridge CE entry for (if it exists).
Returns:
The number of additional bytes needed for this Rock Ridge CE entry. |
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
Series or Index
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr) | Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
Series or Index |
def _build(self, build_method):
"""
build image from provided build_args
:return: BuildResults
"""
logger.info("building image '%s'", self.image)
self.ensure_not_built()
self.temp_dir = tempfile.mkdtemp()
temp_path = os.path.join(self.temp_dir, BUILD_JSON)
try:
with open(temp_path, 'w') as build_json:
json.dump(self.build_args, build_json)
self.build_container_id = build_method(self.build_image, self.temp_dir)
try:
logs_gen = self.dt.logs(self.build_container_id, stream=True)
wait_for_command(logs_gen)
return_code = self.dt.wait(self.build_container_id)
except KeyboardInterrupt:
logger.info("killing build container on user's request")
self.dt.remove_container(self.build_container_id, force=True)
results = BuildResults()
results.return_code = 1
return results
else:
results = self._load_results(self.build_container_id)
results.return_code = return_code
return results
finally:
shutil.rmtree(self.temp_dir) | build image from provided build_args
:return: BuildResults |
def continue_login(self, login_token, **params):
"""
Continues a login that requires an additional step. This is common
for when login requires completing a captcha or supplying a two-factor
authentication token.
:Parameters:
login_token : `str`
A login token generated by the MediaWiki API (and used in a
previous call to login())
params : `mixed`
A set of parameters to include with the request. This depends
on what "requests" for additional information were made by the
MediaWiki API.
"""
login_params = {
'action': "clientlogin",
'logintoken': login_token,
'logincontinue': 1
}
login_params.update(params)
login_doc = self.post(**login_params)
if login_doc['clientlogin']['status'] != 'PASS':
raise LoginError.from_doc(login_doc['clientlogin'])
return login_doc['clientlogin'] | Continues a login that requires an additional step. This is common
for when login requires completing a captcha or supplying a two-factor
authentication token.
:Parameters:
login_token : `str`
A login token generated by the MediaWiki API (and used in a
previous call to login())
params : `mixed`
A set of parameters to include with the request. This depends
on what "requests" for additional information were made by the
MediaWiki API. |
def get_requirements():
"""Extract the list of requirements from our requirements.txt.
:rtype: 2-tuple
:returns: Two lists, the first is a list of requirements in the form of
pkgname==version. The second is a list of URIs or VCS checkout strings
which specify the dependency links for obtaining a copy of the
requirement.
"""
requirements_file = os.path.join(os.getcwd(), 'requirements.txt')
requirements = []
links=[]
try:
with open(requirements_file) as reqfile:
for line in reqfile.readlines():
line = line.strip()
if line.startswith('#'):
continue
elif line.startswith(
('https://', 'git://', 'hg://', 'svn://')):
links.append(line)
else:
requirements.append(line)
except (IOError, OSError) as error:
print(error)
if python26():
# Required to make `collections.OrderedDict` available on Python<=2.6
requirements.append('ordereddict==1.1#a0ed854ee442051b249bfad0f638bbec')
# Don't try to install psutil on PyPy:
if _isPyPy:
for line in requirements[:]:
if line.startswith('psutil'):
print("Not installing %s on PyPy..." % line)
requirements.remove(line)
return requirements, links | Extract the list of requirements from our requirements.txt.
:rtype: 2-tuple
:returns: Two lists, the first is a list of requirements in the form of
pkgname==version. The second is a list of URIs or VCS checkout strings
which specify the dependency links for obtaining a copy of the
requirement. |
def ask_captcha(length=4):
"""Prompts the user for a random string."""
captcha = "".join(random.choice(string.ascii_lowercase) for _ in range(length))
ask_str('Enter the following letters, "%s"' % (captcha), vld=[captcha, captcha.upper()], blk=False) | Prompts the user for a random string. |
def get_activities(self, before=None, after=None, limit=None):
"""
Get activities for authenticated user sorted by newest first.
http://strava.github.io/api/v3/activities/
:param before: Result will start with activities whose start date is
before specified date. (UTC)
:type before: datetime.datetime or str or None
:param after: Result will start with activities whose start date is after
specified value. (UTC)
:type after: datetime.datetime or str or None
:param limit: How many maximum activities to return.
:type limit: int or None
:return: An iterator of :class:`stravalib.model.Activity` objects.
:rtype: :class:`BatchedResultsIterator`
"""
if before:
before = self._utc_datetime_to_epoch(before)
if after:
after = self._utc_datetime_to_epoch(after)
params = dict(before=before, after=after)
result_fetcher = functools.partial(self.protocol.get,
'/athlete/activities',
**params)
return BatchedResultsIterator(entity=model.Activity,
bind_client=self,
result_fetcher=result_fetcher,
limit=limit) | Get activities for authenticated user sorted by newest first.
http://strava.github.io/api/v3/activities/
:param before: Result will start with activities whose start date is
before specified date. (UTC)
:type before: datetime.datetime or str or None
:param after: Result will start with activities whose start date is after
specified value. (UTC)
:type after: datetime.datetime or str or None
:param limit: How many maximum activities to return.
:type limit: int or None
:return: An iterator of :class:`stravalib.model.Activity` objects.
:rtype: :class:`BatchedResultsIterator` |
def id(self, value):
"""Split into server_and_prefix and identifier."""
i = value.rfind('/')
if (i > 0):
self.server_and_prefix = value[:i]
self.identifier = value[(i + 1):]
elif (i == 0):
self.server_and_prefix = ''
self.identifier = value[(i + 1):]
else:
self.server_and_prefix = ''
self.identifier = value | Split into server_and_prefix and identifier. |
def FindModuleDefiningFlag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
registered_flag = self.FlagDict().get(flagname)
if registered_flag is None:
return default
for module, flags in six.iteritems(self.FlagsByModuleDict()):
for flag in flags:
# It must compare the flag with the one in FlagDict. This is because a
# flag might be overridden only for its long name (or short name),
# and only its short name (or long name) is considered registered.
if (flag.name == registered_flag.name and
flag.short_name == registered_flag.short_name):
return module
return default | Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default. |
def import_data_to_restful_server(args, content):
'''call restful server to import data to the experiment'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
running, _ = check_rest_server_quick(rest_port)
if running:
response = rest_post(import_data_url(rest_port), content, REST_TIME_OUT)
if response and check_response(response):
return response
else:
print_error('Restful server is not running...')
return None | call restful server to import data to the experiment |
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
winchester_config['database'],
prefix='',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close() | Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. |
def timing(self, stat, value, tags=None):
"""Measure a timing for statistical distribution.
Note: timing is a special case of histogram.
"""
self.histogram(stat, value, tags) | Measure a timing for statistical distribution.
Note: timing is a special case of histogram. |
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.append(self)
return result | Activate a patch, returning any created mock. |
def _get_flavor():
"""
Download flavor from github
"""
target = op.join("seqcluster", "flavor")
url = "https://github.com/lpantano/seqcluster.git"
if not os.path.exists(target):
# shutil.rmtree("seqcluster")
subprocess.check_call(["git", "clone","-b", "flavor", "--single-branch", url])
return op.abspath(target) | Download flavor from github |
def getService(self, name, auto_execute=True):
"""
Returns a L{ServiceProxy} for the supplied name. Sets up an object that
can have method calls made to it that build the AMF requests.
@rtype: L{ServiceProxy}
"""
if not isinstance(name, basestring):
raise TypeError('string type required')
return ServiceProxy(self, name, auto_execute) | Returns a L{ServiceProxy} for the supplied name. Sets up an object that
can have method calls made to it that build the AMF requests.
@rtype: L{ServiceProxy} |
def _find_match(self, position):
""" Given a valid position in the text document, try to find the
position of the matching bracket. Returns -1 if unsuccessful.
"""
# Decide what character to search for and what direction to search in.
document = self._text_edit.document()
start_char = document.characterAt(position)
search_char = self._opening_map.get(start_char)
if search_char:
increment = 1
else:
search_char = self._closing_map.get(start_char)
if search_char:
increment = -1
else:
return -1
# Search for the character.
char = start_char
depth = 0
while position >= 0 and position < document.characterCount():
if char == start_char:
depth += 1
elif char == search_char:
depth -= 1
if depth == 0:
break
position += increment
char = document.characterAt(position)
else:
position = -1
return position | Given a valid position in the text document, try to find the
position of the matching bracket. Returns -1 if unsuccessful. |
def _split_generators(self, dl_manager):
"""Returns SplitGenerators from the folder names."""
# At data creation time, parse the folder to deduce number of splits,
# labels, image size,
# The splits correspond to the high level folders
split_names = list_folders(dl_manager.manual_dir)
# Extract all label names and associated images
split_label_images = {} # dict[split_name][label_name] = list(img_paths)
for split_name in split_names:
split_dir = os.path.join(dl_manager.manual_dir, split_name)
split_label_images[split_name] = {
label_name: list_imgs(os.path.join(split_dir, label_name))
for label_name in list_folders(split_dir)
}
# Merge all label names from all splits to get the final list of labels
# Sorted list for determinism
labels = [split.keys() for split in split_label_images.values()]
labels = list(sorted(set(itertools.chain(*labels))))
# Could improve the automated encoding format detection
# Extract the list of all image paths
image_paths = [
image_paths
for label_images in split_label_images.values()
for image_paths in label_images.values()
]
if any(f.lower().endswith(".png") for f in itertools.chain(*image_paths)):
encoding_format = "png"
else:
encoding_format = "jpeg"
# Update the info.features. Those info will be automatically resored when
# the dataset is re-created
self.info.features["image"].set_encoding_format(encoding_format)
self.info.features["label"].names = labels
def num_examples(label_images):
return sum(len(imgs) for imgs in label_images.values())
# Define the splits
return [
tfds.core.SplitGenerator(
name=split_name,
# The number of shards is a dynamic function of the total
# number of images (between 0-10)
num_shards=min(10, max(num_examples(label_images) // 1000, 1)),
gen_kwargs=dict(label_images=label_images,),
) for split_name, label_images in split_label_images.items()
] | Returns SplitGenerators from the folder names. |
def register(self, event_type, callback,
args=None, kwargs=None, details_filter=None,
weak=False):
"""Register a callback to be called when event of a given type occurs.
Callback will be called with provided ``args`` and ``kwargs`` and
when event type occurs (or on any event if ``event_type`` equals to
:attr:`.ANY`). It will also get additional keyword argument,
``details``, that will hold event details provided to the
:meth:`.notify` method (if a details filter callback is provided then
the target callback will *only* be triggered if the details filter
callback returns a truthy value).
:param event_type: event type to get triggered on
:param callback: function callback to be registered.
:param args: non-keyworded arguments
:type args: list
:param kwargs: key-value pair arguments
:type kwargs: dictionary
:param weak: if the callback retained should be referenced via
a weak reference or a strong reference (defaults to
holding a strong reference)
:type weak: bool
:returns: the listener that was registered
:rtype: :py:class:`~.Listener`
"""
if not six.callable(callback):
raise ValueError("Event callback must be callable")
if details_filter is not None:
if not six.callable(details_filter):
raise ValueError("Details filter must be callable")
if not self.can_be_registered(event_type):
raise ValueError("Disallowed event type '%s' can not have a"
" callback registered" % event_type)
if kwargs:
for k in self.RESERVED_KEYS:
if k in kwargs:
raise KeyError("Reserved key '%s' not allowed in "
"kwargs" % k)
with self._lock:
if self.is_registered(event_type, callback,
details_filter=details_filter):
raise ValueError("Event callback already registered with"
" equivalent details filter")
listener = Listener(_make_ref(callback, weak=weak),
args=args, kwargs=kwargs,
details_filter=details_filter,
weak=weak)
listeners = self._topics.setdefault(event_type, [])
listeners.append(listener)
return listener | Register a callback to be called when event of a given type occurs.
Callback will be called with provided ``args`` and ``kwargs`` and
when event type occurs (or on any event if ``event_type`` equals to
:attr:`.ANY`). It will also get additional keyword argument,
``details``, that will hold event details provided to the
:meth:`.notify` method (if a details filter callback is provided then
the target callback will *only* be triggered if the details filter
callback returns a truthy value).
:param event_type: event type to get triggered on
:param callback: function callback to be registered.
:param args: non-keyworded arguments
:type args: list
:param kwargs: key-value pair arguments
:type kwargs: dictionary
:param weak: if the callback retained should be referenced via
a weak reference or a strong reference (defaults to
holding a strong reference)
:type weak: bool
:returns: the listener that was registered
:rtype: :py:class:`~.Listener` |
def notify_program_learners(cls, enterprise_customer, program_details, users):
"""
Notify learners about a program in which they've been enrolled.
Args:
enterprise_customer: The EnterpriseCustomer being linked to
program_details: Details about the specific program the learners were enrolled in
users: An iterable of the users or pending users who were enrolled
"""
program_name = program_details.get('title')
program_branding = program_details.get('type')
program_uuid = program_details.get('uuid')
lms_root_url = get_configuration_value_for_site(
enterprise_customer.site,
'LMS_ROOT_URL',
settings.LMS_ROOT_URL
)
program_path = urlquote(
'/dashboard/programs/{program_uuid}/?tpa_hint={tpa_hint}'.format(
program_uuid=program_uuid,
tpa_hint=enterprise_customer.identity_provider,
)
)
destination_url = '{site}/{login_or_register}?next={program_path}'.format(
site=lms_root_url,
login_or_register='{login_or_register}',
program_path=program_path
)
program_type = 'program'
program_start = get_earliest_start_date_from_program(program_details)
with mail.get_connection() as email_conn:
for user in users:
login_or_register = 'register' if isinstance(user, PendingEnterpriseCustomerUser) else 'login'
destination_url = destination_url.format(login_or_register=login_or_register)
send_email_notification_message(
user=user,
enrolled_in={
'name': program_name,
'url': destination_url,
'type': program_type,
'start': program_start,
'branding': program_branding,
},
enterprise_customer=enterprise_customer,
email_connection=email_conn
) | Notify learners about a program in which they've been enrolled.
Args:
enterprise_customer: The EnterpriseCustomer being linked to
program_details: Details about the specific program the learners were enrolled in
users: An iterable of the users or pending users who were enrolled |
def tm(seq, dna_conc=50, salt_conc=50, parameters='cloning'):
'''Calculate nearest-neighbor melting temperature (Tm).
:param seq: Sequence for which to calculate the tm.
:type seq: coral.DNA
:param dna_conc: DNA concentration in nM.
:type dna_conc: float
:param salt_conc: Salt concentration in mM.
:type salt_conc: float
:param parameters: Nearest-neighbor parameter set. Available options:
'breslauer': Breslauer86 parameters
'sugimoto': Sugimoto96 parameters
'santalucia96': SantaLucia96 parameters
'santalucia98': SantaLucia98 parameters
'cloning': breslauer without corrections
'cloning_sl98': santalucia98 fit to 'cloning'
:type parameters: str
:returns: Melting temperature (Tm) in °C.
:rtype: float
:raises: ValueError if parameter argument is invalid.
'''
if parameters == 'breslauer':
params = tm_params.BRESLAUER
elif parameters == 'sugimoto':
params = tm_params.SUGIMOTO
elif parameters == 'santalucia96':
params = tm_params.SANTALUCIA96
elif parameters == 'santalucia98' or parameters == 'cloning_sl98':
params = tm_params.SANTALUCIA98
elif parameters == 'cloning':
params = tm_params.CLONING
else:
raise ValueError('Unsupported parameter set.')
# Thermodynamic parameters
pars = {'delta_h': params['delta_h'], 'delta_s': params['delta_s']}
pars_error = {'delta_h': params['delta_h_err'],
'delta_s': params['delta_s_err']}
# Error corrections - done first for use of reverse_complement parameters
if parameters == 'breslauer':
deltas = breslauer_corrections(seq, pars_error)
elif parameters == 'sugimoto':
deltas = breslauer_corrections(seq, pars_error)
elif parameters == 'santalucia96':
deltas = breslauer_corrections(seq, pars_error)
elif parameters == 'santalucia98' or parameters == 'cloning_sl98':
deltas = santalucia98_corrections(seq, pars_error)
elif parameters == 'cloning':
deltas = breslauer_corrections(seq, pars_error)
deltas[0] += 3.4
deltas[1] += 12.4
# Sum up the nearest-neighbor enthalpy and entropy
seq = str(seq).upper()
# TODO: catch more cases when alphabets expand
if 'N' in seq:
raise ValueError('Can\'t calculate Tm of an N base.')
new_delt = _pair_deltas(seq, pars)
deltas[0] += new_delt[0]
deltas[1] += new_delt[1]
# Unit corrections
salt_conc /= 1e3
dna_conc /= 1e9
deltas[0] *= 1e3
# Universal gas constant (R)
R = 1.9872
# Supposedly this is what dnamate does, but the output doesn't match theirs
# melt = (-deltas[0] / (-deltas[1] + R * log(dna_conc / 4.0))) +
# 16.6 * log(salt_conc) - 273.15
# return melt
# Overall equation is supposedly:
# sum{dH}/(sum{dS} + R ln(dna_conc/b)) - 273.15
# with salt corrections for the whole term (or for santalucia98,
# salt corrections added to the dS term.
# So far, implementing this as described does not give results that match
# any calculator but Biopython's
if parameters == 'breslauer' or parameters == 'cloning':
numerator = -deltas[0]
# Modified dna_conc denominator
denominator = (-deltas[1]) + R * log(dna_conc / 16.0)
# Modified Schildkraut-Lifson equation adjustment
salt_adjustment = 16.6 * log(salt_conc) / log(10.0)
melt = numerator / denominator + salt_adjustment - 273.15
elif parameters == 'santalucia98' or 'cloning_sl98':
# TODO: dna_conc should be divided by 2.0 when dna_conc >> template
# (like PCR)
numerator = -deltas[0]
# SantaLucia 98 salt correction
salt_adjustment = 0.368 * (len(seq) - 1) * log(salt_conc)
denominator = -deltas[1] + salt_adjustment + R * log(dna_conc / 4.0)
melt = -deltas[0] / denominator - 273.15
elif parameters == 'santalucia96':
# TODO: find a way to test whether the code below matches another
# algorithm. It appears to be correct, but need to test it.
numerator = -deltas[0]
denominator = -deltas[1] + R * log(dna_conc / 4.0)
# SantaLucia 96 salt correction
salt_adjustment = 12.5 * log10(salt_conc)
melt = numerator / denominator + salt_adjustment - 273.15
elif parameters == 'sugimoto':
# TODO: the stuff below is untested and probably wrong
numerator = -deltas[0]
denominator = -deltas[1] + R * log(dna_conc / 4.0)
# Sugimoto parameters were fit holding salt concentration constant
# Salt correction can be chosen / ignored? Remove sugimoto set since
# it's so similar to santalucia98?
salt_correction = 16.6 * log10(salt_conc)
melt = numerator / denominator + salt_correction - 273.15
if parameters == 'cloning_sl98':
# Corrections to make santalucia98 method approximate cloning method.
# May be even better for cloning with Phusion than 'cloning' method
melt *= 1.27329212575
melt += -2.55585450119
return melt | Calculate nearest-neighbor melting temperature (Tm).
:param seq: Sequence for which to calculate the tm.
:type seq: coral.DNA
:param dna_conc: DNA concentration in nM.
:type dna_conc: float
:param salt_conc: Salt concentration in mM.
:type salt_conc: float
:param parameters: Nearest-neighbor parameter set. Available options:
'breslauer': Breslauer86 parameters
'sugimoto': Sugimoto96 parameters
'santalucia96': SantaLucia96 parameters
'santalucia98': SantaLucia98 parameters
'cloning': breslauer without corrections
'cloning_sl98': santalucia98 fit to 'cloning'
:type parameters: str
:returns: Melting temperature (Tm) in °C.
:rtype: float
:raises: ValueError if parameter argument is invalid. |
def on_mouse_wheel(self, event):
'''handle mouse wheel zoom changes'''
rotation = event.GetWheelRotation() / event.GetWheelDelta()
if rotation > 0:
zoom = 1.0/(1.1 * rotation)
elif rotation < 0:
zoom = 1.1 * (-rotation)
self.change_zoom(zoom)
self.redraw_map() | handle mouse wheel zoom changes |
def del_repo(repo, root=None):
'''
Delete a repo.
root
operate on a different root directory.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo alias
'''
repos_cfg = _get_configured_repos(root=root)
for alias in repos_cfg.sections():
if alias == repo:
doc = __zypper__(root=root).xml.call('rr', '--loose-auth', '--loose-query', alias)
msg = doc.getElementsByTagName('message')
if doc.getElementsByTagName('progress') and msg:
return {
repo: True,
'message': msg[0].childNodes[0].nodeValue,
}
raise CommandExecutionError('Repository \'{0}\' not found.'.format(repo)) | Delete a repo.
root
operate on a different root directory.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo alias |
def update_anomalous_score(self):
"""Update anomalous score.
New anomalous score is a weighted average of differences
between current summary and reviews. The weights come from credibilities.
Therefore, the new anomalous score of reviewer :math:`p` is as
.. math::
{\\rm anomalous}(r) = \\frac{
\\sum_{p \\in P} {\\rm credibility}(p)|
{\\rm review}(r, p)-{\\rm summary}(p)|
}{
\\sum_{p \\in P} {\\rm credibility}(p)
}
where :math:`P` is a set of products reviewed by reviewer :math:`p`,
review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted
to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are
summary and credibility of product :math:`p`, respectively.
Returns:
absolute difference between old anomalous score and updated one.
"""
products = self._graph.retrieve_products(self)
diffs = [
p.summary.difference(self._graph.retrieve_review(self, p))
for p in products
]
old = self.anomalous_score
try:
self.anomalous_score = np.average(
diffs, weights=list(map(self._credibility, products)))
except ZeroDivisionError:
self.anomalous_score = np.average(diffs)
return abs(self.anomalous_score - old) | Update anomalous score.
New anomalous score is a weighted average of differences
between current summary and reviews. The weights come from credibilities.
Therefore, the new anomalous score of reviewer :math:`p` is as
.. math::
{\\rm anomalous}(r) = \\frac{
\\sum_{p \\in P} {\\rm credibility}(p)|
{\\rm review}(r, p)-{\\rm summary}(p)|
}{
\\sum_{p \\in P} {\\rm credibility}(p)
}
where :math:`P` is a set of products reviewed by reviewer :math:`p`,
review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted
to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are
summary and credibility of product :math:`p`, respectively.
Returns:
absolute difference between old anomalous score and updated one. |
def write(self, more):
"""Append the Unicode representation of `s` to our output."""
if more:
self.output += str(more).upper()
self.output += '\n' | Append the Unicode representation of `s` to our output. |
def get_lbaas_agent_hosting_loadbalancer(self, loadbalancer, **_params):
"""Fetches a loadbalancer agent hosting a loadbalancer."""
return self.get((self.lbaas_loadbalancer_path +
self.LOADBALANCER_HOSTING_AGENT) % loadbalancer,
params=_params) | Fetches a loadbalancer agent hosting a loadbalancer. |
def draw_identity_line(ax=None, dynamic=True, **kwargs):
"""
Draws a 45 degree identity line such that y=x for all points within the
given axes x and y limits. This function also registeres a callback so
that as the figure is modified, the axes are updated and the line remains
drawn correctly.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
dynamic : bool, default : True
If the plot is dynamic, callbacks will be registered to update the
identiy line as axes are changed.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style the
identity line.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
Notes
-----
.. seealso:: `StackOverflow discussion: Does matplotlib have a function for drawing diagonal lines in axis coordinates? <https://stackoverflow.com/questions/22104256/does-matplotlib-have-a-function-for-drawing-diagonal-lines-in-axis-coordinates>`_
"""
# Get the current working axes
ax = ax or plt.gca()
# Define the standard line color
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['color'] = LINE_COLOR
# Define the standard opacity
if 'alpha' not in kwargs:
kwargs['alpha'] = 0.5
# Draw the identity line
identity, = ax.plot([],[], **kwargs)
# Define the callback
def callback(ax):
# Get the x and y limits on the axes
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Set the bounding range of the line
data = (
max(xlim[0], ylim[0]), min(xlim[1], ylim[1])
)
identity.set_data(data, data)
# Register the callback and return
callback(ax)
if dynamic:
ax.callbacks.connect('xlim_changed', callback)
ax.callbacks.connect('ylim_changed', callback)
return ax | Draws a 45 degree identity line such that y=x for all points within the
given axes x and y limits. This function also registeres a callback so
that as the figure is modified, the axes are updated and the line remains
drawn correctly.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
dynamic : bool, default : True
If the plot is dynamic, callbacks will be registered to update the
identiy line as axes are changed.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style the
identity line.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
Notes
-----
.. seealso:: `StackOverflow discussion: Does matplotlib have a function for drawing diagonal lines in axis coordinates? <https://stackoverflow.com/questions/22104256/does-matplotlib-have-a-function-for-drawing-diagonal-lines-in-axis-coordinates>`_ |
def get_form_language(self, request, obj=None):
"""
Return the current language for the currently displayed object fields.
"""
if self._has_translatable_parent_model():
return super(TranslatableInlineModelAdmin, self).get_form_language(request, obj=obj)
else:
# Follow the ?language parameter
return self._language(request) | Return the current language for the currently displayed object fields. |
def _pname_and_metadata(in_file):
"""Retrieve metadata and project name from the input metadata CSV file.
Uses the input file name for the project name and for back compatibility,
accepts the project name as an input, providing no metadata.
"""
if os.path.isfile(in_file):
with open(in_file) as in_handle:
md, global_vars = _parse_metadata(in_handle)
base = os.path.splitext(os.path.basename(in_file))[0]
md_file = in_file
elif objectstore.is_remote(in_file):
with objectstore.open_file(in_file) as in_handle:
md, global_vars = _parse_metadata(in_handle)
base = os.path.splitext(os.path.basename(in_file))[0]
md_file = None
else:
if in_file.endswith(".csv"):
raise ValueError("Did not find input metadata file: %s" % in_file)
base, md, global_vars = _safe_name(os.path.splitext(os.path.basename(in_file))[0]), {}, {}
md_file = None
return _safe_name(base), md, global_vars, md_file | Retrieve metadata and project name from the input metadata CSV file.
Uses the input file name for the project name and for back compatibility,
accepts the project name as an input, providing no metadata. |
def from_response(raw_response):
"""The Yelp Fusion API returns error messages with a json body
like:
{
'error': {
'code': 'ALL_CAPS_CODE',
'description': 'Human readable description.'
}
}
Some errors may have additional fields. For example, a
validation error:
{
'error': {
'code': 'VALIDATION_ERROR',
'description': "'en_USS' does not match '^[a-z]{2,3}_[A-Z]{2}$'",
'field': 'locale',
'instance': 'en_USS'
}
}
"""
json_response = raw_response.json()
error_info = json_response["error"]
code = error_info["code"]
try:
error_cls = _error_map[code]
except KeyError:
raise NotImplementedError(
"Unknown error code '{}' returned in Yelp API response. "
"This code may have been newly added. Please ensure you are "
"using the latest version of the yelp-python library, and if "
"so, create a new issue at https://github.com/Yelp/yelp-python "
"to add support for this error.".format(code)
)
else:
return error_cls(raw_response, **error_info) | The Yelp Fusion API returns error messages with a json body
like:
{
'error': {
'code': 'ALL_CAPS_CODE',
'description': 'Human readable description.'
}
}
Some errors may have additional fields. For example, a
validation error:
{
'error': {
'code': 'VALIDATION_ERROR',
'description': "'en_USS' does not match '^[a-z]{2,3}_[A-Z]{2}$'",
'field': 'locale',
'instance': 'en_USS'
}
} |
def affine(self, func:AffineFunc, *args, **kwargs)->'Image':
"Equivalent to `image.affine_mat = image.affine_mat @ func()`."
m = tensor(func(*args, **kwargs)).to(self.device)
self.affine_mat = self.affine_mat @ m
return self | Equivalent to `image.affine_mat = image.affine_mat @ func()`. |
def _fasta_slice(fasta, seqid, start, stop, strand):
"""
Return slice of fasta, given (seqid, start, stop, strand)
"""
_strand = 1 if strand == '+' else -1
return fasta.sequence({'chr': seqid, 'start': start, 'stop': stop, \
'strand': _strand}) | Return slice of fasta, given (seqid, start, stop, strand) |
def demacronize(string_matrix: List[List[str]]) -> List[List[str]]:
"""
Transform macronized vowels into normal vowels
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return: string_matrix
>>> demacronize([['ōdī', 'et', 'amō',]])
[['odi', 'et', 'amo']]
"""
scansion = ScansionConstants()
accent_dropper = str.maketrans(scansion.ACCENTED_VOWELS, scansion.VOWELS)
return [[word.translate(accent_dropper)
for word in sentence]
for sentence in string_matrix] | Transform macronized vowels into normal vowels
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return: string_matrix
>>> demacronize([['ōdī', 'et', 'amō',]])
[['odi', 'et', 'amo']] |
def destroy(self):
"""
Delete the document. The *whole* document. There will be no survivors.
"""
logger.info("Destroying doc: %s" % self.path)
self.fs.rm_rf(self.path)
logger.info("Done") | Delete the document. The *whole* document. There will be no survivors. |
def tenengrad(img, ksize=3):
''''TENG' algorithm (Krotkov86)'''
Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize)
Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize)
FM = Gx*Gx + Gy*Gy
mn = cv2.mean(FM)[0]
if np.isnan(mn):
return np.nanmean(FM)
return mn | TENG' algorithm (Krotkov86) |
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='Feature importance', ylabel='Features',
importance_type='split', max_num_features=None,
ignore_zero=True, figsize=None, grid=True,
precision=None, **kwargs):
"""Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=None)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib to plot importance.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
importance = booster.feature_importance(importance_type=importance_type)
feature_name = booster.feature_name()
if not len(importance):
raise ValueError("Booster's feature_importance is empty.")
tuples = sorted(zip_(feature_name, importance), key=lambda x: x[1])
if ignore_zero:
tuples = [x for x in tuples if x[1] > 0]
if max_num_features is not None and max_num_features > 0:
tuples = tuples[-max_num_features:]
labels, values = zip_(*tuples)
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
for x, y in zip_(values, ylocs):
ax.text(x + 1, y,
_float2str(x, precision) if importance_type == 'gain' else x,
va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax | Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=None)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances. |
def populate_requirement_set(requirement_set, # type: RequirementSet
args, # type: List[str]
options, # type: Values
finder, # type: PackageFinder
session, # type: PipSession
name, # type: str
wheel_cache # type: Optional[WheelCache]
):
# type: (...) -> None
"""
Marshal cmd line args into a requirement set.
"""
# NOTE: As a side-effect, options.require_hashes and
# requirement_set.require_hashes may be updated
for filename in options.constraints:
for req_to_add in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session, wheel_cache=wheel_cache):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in args:
req_to_add = install_req_from_line(
req, None, isolated=options.isolated_mode,
use_pep517=options.use_pep517,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in options.editables:
req_to_add = install_req_from_editable(
req,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for filename in options.requirements:
for req_to_add in parse_requirements(
filename,
finder=finder, options=options, session=session,
wheel_cache=wheel_cache,
use_pep517=options.use_pep517):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
# If --require-hashes was a line in a requirements file, tell
# RequirementSet about it:
requirement_set.require_hashes = options.require_hashes
if not (args or options.editables or options.requirements):
opts = {'name': name}
if options.find_links:
raise CommandError(
'You must give at least one requirement to %(name)s '
'(maybe you meant "pip %(name)s %(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
raise CommandError(
'You must give at least one requirement to %(name)s '
'(see "pip help %(name)s")' % opts) | Marshal cmd line args into a requirement set. |
async def register(self):
"""Register library device id and get initial device list. """
url = '{}/Sessions'.format(self.construct_url(API_URL))
params = {'api_key': self._api_key}
reg = await self.api_request(url, params)
if reg is None:
self._registered = False
_LOGGER.error('Unable to register emby client.')
else:
self._registered = True
_LOGGER.info('Emby client registered!, Id: %s', self.unique_id)
self._sessions = reg
# Build initial device list.
self.update_device_list(self._sessions)
asyncio.ensure_future(self.socket_connection(), loop=self._event_loop) | Register library device id and get initial device list. |
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False):
"""Returns a groupby on the schema rdd. This returns a GroupBy object.
Note that grouping by a column name will be faster than most other
options due to implementation."""
from sparklingpandas.groupby import GroupBy
return GroupBy(self, by=by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys, squeeze=squeeze) | Returns a groupby on the schema rdd. This returns a GroupBy object.
Note that grouping by a column name will be faster than most other
options due to implementation. |
def processed(self):
'''Increase the processed task counter and show progress message'''
self.processed_tasks += 1
qsize = self.tasks.qsize()
if qsize > 0:
progress('[%d task(s) completed, %d remaining, %d thread(s)]', self.processed_tasks, qsize, len(self.workers))
else:
progress('[%d task(s) completed, %d thread(s)]', self.processed_tasks, len(self.workers)) | Increase the processed task counter and show progress message |
def get_families_by_ids(self, family_ids=None):
"""Gets a ``FamilyList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the families
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible families may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: family_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.relationship.FamilyList) - the returned ``Family
list``
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``family_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if family_ids is None:
raise NullArgument()
families = []
for i in family_ids:
family = None
url_path = '/handcar/services/relationship/families/' + str(i)
try:
family = self._get_request(url_path)
except (NotFound, OperationFailed):
if self._family_view == PLENARY:
raise
else:
pass
if family:
if not (self._family_view == COMPARATIVE and
family in families):
families.append(family)
return objects.FamilykList(families) | Gets a ``FamilyList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the families
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible families may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: family_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.relationship.FamilyList) - the returned ``Family
list``
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``family_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
async def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
await self.callHandlers(record) | Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied. |
def copy_data_from_remote(args, nni_config, trial_content, path_list, host_list, temp_nni_path):
'''use ssh client to copy data from remote machine to local machien'''
machine_list = nni_config.get_config('experimentConfig').get('machineList')
machine_dict = {}
local_path_list = []
for machine in machine_list:
machine_dict[machine['ip']] = {'port': machine['port'], 'passwd': machine['passwd'], 'username': machine['username']}
for index, host in enumerate(host_list):
local_path = os.path.join(temp_nni_path, trial_content[index].get('id'))
local_path_list.append(local_path)
print_normal('Copying log data from %s to %s' % (host + ':' + path_list[index], local_path))
sftp = create_ssh_sftp_client(host, machine_dict[host]['port'], machine_dict[host]['username'], machine_dict[host]['passwd'])
copy_remote_directory_to_local(sftp, path_list[index], local_path)
print_normal('Copy done!')
return local_path_list | use ssh client to copy data from remote machine to local machien |
def _integrate(self, time_steps, capture_elements, return_timestamps):
"""
Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries
"""
# Todo: consider adding the timestamp to the return elements, and using that as the index
outputs = []
for t2 in time_steps[1:]:
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
self._euler_step(t2 - self.time())
self.time.update(t2) # this will clear the stepwise caches
# need to add one more time step, because we run only the state updates in the previous
# loop and thus may be one short.
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
return outputs | Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries |
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.focus_changed.emit()
return super(ControlWidget, self).focusInEvent(event) | Reimplement Qt method to send focus change notification |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.