code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _create_client_impl(self, api_version):
"""
Creates the client implementation corresponding to the specifeid api_version.
:param api_version:
:return:
"""
if api_version == v7_0_VERSION:
from azure.keyvault.v7_0 import KeyVaultClient as ImplClient
elif api_version == v2016_10_01_VERSION:
from azure.keyvault.v2016_10_01 import KeyVaultClient as ImplClient
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
impl = ImplClient(credentials=self._credentials)
impl.config = self.config
# if __enter__ has previously been called and the impl client has __enter__ defined we need to call it
if self._entered and hasattr(impl, '__enter__'):
impl.__enter__()
self._client_impls[api_version] = impl
return impl | Creates the client implementation corresponding to the specifeid api_version.
:param api_version:
:return: |
def to_array(self):
"""
Serializes this InputMediaDocument to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(InputMediaDocument, self).to_array()
# 'type' given by superclass
# 'media' given by superclass
if self.thumb is not None:
if isinstance(self.thumb, InputFile):
array['thumb'] = None # type InputFile
elif isinstance(self.thumb, str):
array['thumb'] = u(self.thumb) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
# 'caption' given by superclass
# 'parse_mode' given by superclass
return array | Serializes this InputMediaDocument to a dictionary.
:return: dictionary representation of this object.
:rtype: dict |
def vad(self, location=1, normalize=True, activity_threshold=7.0,
min_activity_duration=0.25, initial_search_buffer=1.0,
max_gap=0.25, initial_pad=0.0):
'''Voice Activity Detector. Attempts to trim silence and quiet
background sounds from the ends of recordings of speech. The algorithm
currently uses a simple cepstral power measurement to detect voice, so
may be fooled by other things, especially music.
The effect can trim only from the front of the audio, so in order to
trim from the back, the reverse effect must also be used.
Parameters
----------
location : 1 or -1, default=1
If 1, trims silence from the beginning
If -1, trims silence from the end
normalize : bool, default=True
If true, normalizes audio before processing.
activity_threshold : float, default=7.0
The measurement level used to trigger activity detection. This may
need to be cahnged depending on the noise level, signal level, and
other characteristics of the input audio.
min_activity_duration : float, default=0.25
The time constant (in seconds) used to help ignore short bursts of
sound.
initial_search_buffer : float, default=1.0
The amount of audio (in seconds) to search for quieter/shorter
bursts of audio to include prior to the detected trigger point.
max_gap : float, default=0.25
The allowed gap (in seconds) between quiteter/shorter bursts of
audio to include prior to the detected trigger point
initial_pad : float, default=0.0
The amount of audio (in seconds) to preserve before the trigger
point and any found quieter/shorter bursts.
See Also
--------
silence
Examples
--------
>>> tfm = sox.Transformer()
Remove silence from the beginning of speech
>>> tfm.vad(initial_pad=0.3)
Remove silence from the end of speech
>>> tfm.vad(location=-1, initial_pad=0.2)
'''
if location not in [-1, 1]:
raise ValueError("location must be -1 or 1.")
if not isinstance(normalize, bool):
raise ValueError("normalize muse be a boolean.")
if not is_number(activity_threshold):
raise ValueError("activity_threshold must be a number.")
if not is_number(min_activity_duration) or min_activity_duration < 0:
raise ValueError("min_activity_duration must be a positive number")
if not is_number(initial_search_buffer) or initial_search_buffer < 0:
raise ValueError("initial_search_buffer must be a positive number")
if not is_number(max_gap) or max_gap < 0:
raise ValueError("max_gap must be a positive number.")
if not is_number(initial_pad) or initial_pad < 0:
raise ValueError("initial_pad must be a positive number.")
effect_args = []
if normalize:
effect_args.append('norm')
if location == -1:
effect_args.append('reverse')
effect_args.extend([
'vad',
'-t', '{:f}'.format(activity_threshold),
'-T', '{:f}'.format(min_activity_duration),
'-s', '{:f}'.format(initial_search_buffer),
'-g', '{:f}'.format(max_gap),
'-p', '{:f}'.format(initial_pad)
])
if location == -1:
effect_args.append('reverse')
self.effects.extend(effect_args)
self.effects_log.append('vad')
return self | Voice Activity Detector. Attempts to trim silence and quiet
background sounds from the ends of recordings of speech. The algorithm
currently uses a simple cepstral power measurement to detect voice, so
may be fooled by other things, especially music.
The effect can trim only from the front of the audio, so in order to
trim from the back, the reverse effect must also be used.
Parameters
----------
location : 1 or -1, default=1
If 1, trims silence from the beginning
If -1, trims silence from the end
normalize : bool, default=True
If true, normalizes audio before processing.
activity_threshold : float, default=7.0
The measurement level used to trigger activity detection. This may
need to be cahnged depending on the noise level, signal level, and
other characteristics of the input audio.
min_activity_duration : float, default=0.25
The time constant (in seconds) used to help ignore short bursts of
sound.
initial_search_buffer : float, default=1.0
The amount of audio (in seconds) to search for quieter/shorter
bursts of audio to include prior to the detected trigger point.
max_gap : float, default=0.25
The allowed gap (in seconds) between quiteter/shorter bursts of
audio to include prior to the detected trigger point
initial_pad : float, default=0.0
The amount of audio (in seconds) to preserve before the trigger
point and any found quieter/shorter bursts.
See Also
--------
silence
Examples
--------
>>> tfm = sox.Transformer()
Remove silence from the beginning of speech
>>> tfm.vad(initial_pad=0.3)
Remove silence from the end of speech
>>> tfm.vad(location=-1, initial_pad=0.2) |
def run_helper_process(python_file, metadata_queue, quit_event, options):
"""
:param python_file: The absolute path of a python file containing the helper process that should be run.
It must define a class which is a subclass of BotHelperProcess.
:param metadata_queue: A queue from which the helper process will read AgentMetadata updates.
:param quit_event: An event which should be set when rlbot is shutting down.
:param options: A dict with arbitrary options that will be passed through to the helper process.
"""
class_wrapper = import_class_with_base(python_file, BotHelperProcess)
helper_class = class_wrapper.get_loaded_class()
helper = helper_class(metadata_queue, quit_event, options)
helper.start() | :param python_file: The absolute path of a python file containing the helper process that should be run.
It must define a class which is a subclass of BotHelperProcess.
:param metadata_queue: A queue from which the helper process will read AgentMetadata updates.
:param quit_event: An event which should be set when rlbot is shutting down.
:param options: A dict with arbitrary options that will be passed through to the helper process. |
def read_segment(self, segment):
"""Read one memory segment (128 byte).
"""
log.debug("read segment {0}".format(segment))
if segment < 0 or segment > 15:
raise ValueError("invalid segment number")
cmd = "\x10" + chr(segment << 4) + 8 * chr(0) + self.uid
rsp = self.transceive(cmd)
if len(rsp) < 129:
raise Type1TagCommandError(RESPONSE_ERROR)
return rsp[1:129] | Read one memory segment (128 byte). |
def make_fil_file(filename,out_dir='./', new_filename=None, max_load = None):
''' Converts file to Sigproc filterbank (.fil) format. Default saves output in current dir.
'''
fil_file = Waterfall(filename, max_load = max_load)
if not new_filename:
new_filename = out_dir+filename.replace('.h5','.fil').split('/')[-1]
if '.fil' not in new_filename:
new_filename = new_filename+'.fil'
fil_file.write_to_fil(new_filename) | Converts file to Sigproc filterbank (.fil) format. Default saves output in current dir. |
def show(self):
"""Shows the main window and grabs the focus on it.
"""
self.hidden = False
# setting window in all desktops
window_rect = RectCalculator.set_final_window_rect(self.settings, self.window)
self.window.stick()
# add tab must be called before window.show to avoid a
# blank screen before adding the tab.
if not self.get_notebook().has_page():
self.add_tab()
self.window.set_keep_below(False)
self.window.show_all()
# this is needed because self.window.show_all() results in showing every
# thing which includes the scrollbar too
self.settings.general.triggerOnChangedValue(self.settings.general, "use-scrollbar")
# move the window even when in fullscreen-mode
log.debug("Moving window to: %r", window_rect)
self.window.move(window_rect.x, window_rect.y)
# this works around an issue in fluxbox
if not FullscreenManager(self.settings, self.window).is_fullscreen():
self.settings.general.triggerOnChangedValue(self.settings.general, 'window-height')
time = get_server_time(self.window)
# TODO PORT this
# When minized, the window manager seems to refuse to resume
# log.debug("self.window: %s. Dir=%s", type(self.window), dir(self.window))
# is_iconified = self.is_iconified()
# if is_iconified:
# log.debug("Is iconified. Ubuntu Trick => "
# "removing skip_taskbar_hint and skip_pager_hint "
# "so deiconify can work!")
# self.get_widget('window-root').set_skip_taskbar_hint(False)
# self.get_widget('window-root').set_skip_pager_hint(False)
# self.get_widget('window-root').set_urgency_hint(False)
# log.debug("get_skip_taskbar_hint: {}".format(
# self.get_widget('window-root').get_skip_taskbar_hint()))
# log.debug("get_skip_pager_hint: {}".format(
# self.get_widget('window-root').get_skip_pager_hint()))
# log.debug("get_urgency_hint: {}".format(
# self.get_widget('window-root').get_urgency_hint()))
# glib.timeout_add_seconds(1, lambda: self.timeout_restore(time))
#
log.debug("order to present and deiconify")
self.window.present()
self.window.deiconify()
self.window.show()
self.window.get_window().focus(time)
self.window.set_type_hint(Gdk.WindowTypeHint.DOCK)
self.window.set_type_hint(Gdk.WindowTypeHint.NORMAL)
# log.debug("Restoring skip_taskbar_hint and skip_pager_hint")
# if is_iconified:
# self.get_widget('window-root').set_skip_taskbar_hint(False)
# self.get_widget('window-root').set_skip_pager_hint(False)
# self.get_widget('window-root').set_urgency_hint(False)
# This is here because vte color configuration works only after the
# widget is shown.
self.settings.styleFont.triggerOnChangedValue(self.settings.styleFont, 'color')
self.settings.styleBackground.triggerOnChangedValue(self.settings.styleBackground, 'color')
log.debug("Current window position: %r", self.window.get_position())
self.execute_hook('show') | Shows the main window and grabs the focus on it. |
def init_stash(stash_path, passphrase, passphrase_size, backend):
r"""Init a stash
`STASH_PATH` is the path to the storage endpoint. If this isn't supplied,
a default path will be used. In the path, you can specify a name
for the stash (which, if omitted, will default to `ghost`) like so:
`ghost init http://10.10.1.1:8500;stash1`.
After initializing a stash, don't forget you can set environment
variables for both your stash's path and its passphrase.
On Linux/OSx you can run:
export GHOST_STASH_PATH='http://10.10.1.1:8500;stash1'
export GHOST_PASSPHRASE=$(cat passphrase.ghost)
export GHOST_BACKEND='tinydb'
"""
stash_path = stash_path or STORAGE_DEFAULT_PATH_MAPPING[backend]
click.echo('Stash: {0} at {1}'.format(backend, stash_path))
storage = STORAGE_MAPPING[backend](**_parse_path_string(stash_path))
try:
click.echo('Initializing stash...')
if os.path.isfile(PASSPHRASE_FILENAME):
raise GhostError(
'{0} already exists. Overwriting might prevent you '
'from accessing the stash it was generated for. '
'Please make sure to save and remove the file before '
'initializing another stash.'.format(PASSPHRASE_FILENAME))
stash = Stash(
storage,
passphrase=passphrase,
passphrase_size=passphrase_size)
passphrase = stash.init()
if not passphrase:
click.echo('Stash already initialized.')
sys.exit(0)
_write_passphrase_file(passphrase)
except GhostError as ex:
sys.exit(ex)
except (OSError, IOError) as ex:
click.echo("Seems like we've run into a problem.")
file_path = _parse_path_string(stash_path)['db_path']
click.echo(
'Removing stale stash and passphrase: {0}. Note that any '
'directories created are not removed for safety reasons and you '
'might want to remove them manually.'.format(file_path))
if os.path.isfile(file_path):
os.remove(file_path)
sys.exit(ex)
click.echo('Initialized stash at: {0}'.format(stash_path))
click.echo(
'Your passphrase can be found under the `{0}` file in the '
'current directory.'.format(PASSPHRASE_FILENAME))
click.echo(
'Make sure you save your passphrase somewhere safe. '
'If lost, you will lose access to your stash.') | r"""Init a stash
`STASH_PATH` is the path to the storage endpoint. If this isn't supplied,
a default path will be used. In the path, you can specify a name
for the stash (which, if omitted, will default to `ghost`) like so:
`ghost init http://10.10.1.1:8500;stash1`.
After initializing a stash, don't forget you can set environment
variables for both your stash's path and its passphrase.
On Linux/OSx you can run:
export GHOST_STASH_PATH='http://10.10.1.1:8500;stash1'
export GHOST_PASSPHRASE=$(cat passphrase.ghost)
export GHOST_BACKEND='tinydb' |
def resolve_to_callable(callable_name):
""" Resolve string :callable_name: to a callable.
:param callable_name: String representing callable name as registered
in ramses registry or dotted import path of callable. Can be
wrapped in double curly brackets, e.g. '{{my_callable}}'.
"""
from . import registry
clean_callable_name = callable_name.replace(
'{{', '').replace('}}', '').strip()
try:
return registry.get(clean_callable_name)
except KeyError:
try:
from zope.dottedname.resolve import resolve
return resolve(clean_callable_name)
except ImportError:
raise ImportError(
'Failed to load callable `{}`'.format(clean_callable_name)) | Resolve string :callable_name: to a callable.
:param callable_name: String representing callable name as registered
in ramses registry or dotted import path of callable. Can be
wrapped in double curly brackets, e.g. '{{my_callable}}'. |
def features(sentence, i):
"""Features for i'th token in sentence.
Currently baseline named-entity recognition features, but these can
easily be changed to do POS tagging or chunking.
"""
word = sentence[i]
yield "word:{}" + word.lower()
if word[0].isupper():
yield "CAP"
if i > 0:
yield "word-1:{}" + sentence[i - 1].lower()
if i > 1:
yield "word-2:{}" + sentence[i - 2].lower()
if i + 1 < len(sentence):
yield "word+1:{}" + sentence[i + 1].lower()
if i + 2 < len(sentence):
yield "word+2:{}" + sentence[i + 2].lower() | Features for i'th token in sentence.
Currently baseline named-entity recognition features, but these can
easily be changed to do POS tagging or chunking. |
def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
'''
Returns the proxy module for this salt-proxy-minion
'''
ret = LazyLoader(
_module_dirs(opts, 'proxy'),
opts,
tag='proxy',
pack={'__salt__': functions, '__ret__': returners, '__utils__': utils},
)
ret.pack['__proxy__'] = ret
return ret | Returns the proxy module for this salt-proxy-minion |
def p_statement_break(p):
'''statement : BREAK SEMI
| BREAK expr SEMI'''
if len(p) == 3:
p[0] = ast.Break(None, lineno=p.lineno(1))
else:
p[0] = ast.Break(p[2], lineno=p.lineno(1)) | statement : BREAK SEMI
| BREAK expr SEMI |
def get_top_n_action_types(self, top_n):
"""Returns the top N actions by count."""
# Count action types
action_type_to_counts = dict()
for action in self.actions:
actiontype = action.actiontype
if actiontype not in action_type_to_counts:
action_type_to_counts[actiontype] = 1
else:
action_type_to_counts[actiontype] = \
action_type_to_counts[actiontype] + 1
# Convert the dictionary representation into a pair of lists
action_types = list()
counts = list()
for actiontype in action_type_to_counts.keys():
action_types.append(actiontype)
counts.append(action_type_to_counts[actiontype])
# How many actions in total?
num_actions = len(self.actions)
num_actions2 = 0
for count in counts:
num_actions2 = num_actions2 + count
if num_actions != num_actions2:
raise(Exception('Problem counting everything up!'))
# Sort action types by count (lowest to highest)
sorted_inds = np.argsort(counts)
last_ind = len(sorted_inds)-1
# Return the top N actions
top_actions = list()
if top_n > len(sorted_inds):
raise Exception('Asked for top %d action types, ' +
'but there are only %d action types'
% (top_n, len(sorted_inds)))
for i in range(top_n):
top_actions.append(action_types[sorted_inds[last_ind-i]])
return top_actions | Returns the top N actions by count. |
def create_interface_connection(interface_a, interface_b):
'''
.. versionadded:: 2019.2.0
Create an interface connection between 2 interfaces
interface_a
Interface id for Side A
interface_b
Interface id for Side B
CLI Example:
.. code-block:: bash
salt myminion netbox.create_interface_connection 123 456
'''
payload = {'interface_a': interface_a,
'interface_b': interface_b}
ret = _add('dcim', 'interface-connections', payload)
if ret:
return {'dcim': {'interface-connections': {ret['id']: payload}}}
else:
return ret | .. versionadded:: 2019.2.0
Create an interface connection between 2 interfaces
interface_a
Interface id for Side A
interface_b
Interface id for Side B
CLI Example:
.. code-block:: bash
salt myminion netbox.create_interface_connection 123 456 |
def _children_(self):
""" get children objects
:rtype: a dict of children {child_name: child_object}
"""
ret = {}
names = self._field_names_
def down(name, obj):
if isinstance(obj, BaseObj):
if not isinstance(obj, weakref.ProxyTypes):
ret[name] = obj
elif isinstance(obj, list):
for i, v in zip(range(len(obj)), obj):
down(jp_compose(str(i), name), v)
elif isinstance(obj, dict):
for k, v in six.iteritems(obj):
down(jp_compose(k, name), v)
for n in names:
down(jp_compose(n), getattr(self, n))
return ret | get children objects
:rtype: a dict of children {child_name: child_object} |
def decrypt_file(file_path, recipient_key, *, base64=False):
"Returns (filename, file_contents) if successful"
crypto.assert_type_and_length('recipient_key', recipient_key, crypto.UserLock)
with open(file_path, "rb") as I:
contents = I.read()
if base64:
contents = crypto.b64decode(contents)
crypted = crypto.MiniLockFile(contents)
return crypted.decrypt(recipient_key) | Returns (filename, file_contents) if successful |
def prefix(self, name):
"""
:param string name: the name of an attribute to look up.
:return: the prefix component of the named attribute's name,
or None.
"""
a_node = self.adapter.get_node_attribute_node(self.impl_element, name)
if a_node is None:
return None
return a_node.prefix | :param string name: the name of an attribute to look up.
:return: the prefix component of the named attribute's name,
or None. |
def twitter_credential(name):
""" Grab twitter credential from settings """
credential_name = 'TWITTER_' + name.upper()
if hasattr(settings, credential_name):
return getattr(settings, credential_name)
else:
raise AttributeError('Missing twitter credential in settings: ' + credential_name) | Grab twitter credential from settings |
def _channel_loop(detection, template, min_cc, detection_id, interpolate, i,
pre_lag_ccsum=None, detect_chans=0,
horizontal_chans=['E', 'N', '1', '2'], vertical_chans=['Z'],
debug=0):
"""
Inner loop for correlating and assigning picks.
Utility function to take a stream of data for the detected event and write
maximum correlation to absolute time as picks in an obspy.core.event.Event
object.
Only outputs picks for picks above min_cc.
:type detection: obspy.core.stream.Stream
:param detection:
Stream of data for the slave event detected using template.
:type template: obspy.core.stream.Stream
:param template: Stream of data as the template for the detection.
:type min_cc: float
:param min_cc: Minimum cross-correlation value to allow a pick to be made.
:type detection_id: str
:param detection_id: Detection ID to associate the event with.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type i: int
:param i:
Used to track which process has occurred when running in parallel.
:type pre_lag_ccsum: float
:param pre_lag_ccsum:
Cross-correlation sum before lag-calc, will check that the
cross-correlation sum is increased by lag-calc (using all channels,
ignoring min_cc)
:type detect_chans: int
:param detect_chans:
Number of channels originally used in detections, must match the number
used here to allow for cccsum checking.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type debug: int
:param debug: Debug output level 0-5.
:returns:
Event object containing network, station, channel and pick information.
:rtype: :class:`obspy.core.event.Event`
"""
from eqcorrscan.core.match_filter import normxcorr2
import math
event = Event()
s_stachans = {}
cccsum = 0
checksum = 0
used_chans = 0
for tr in template:
temp_net = tr.stats.network
temp_sta = tr.stats.station
temp_chan = tr.stats.channel
debug_print('Working on: %s.%s.%s' % (temp_net, temp_sta, temp_chan),
3, debug)
image = detection.select(station=temp_sta, channel=temp_chan)
if len(image) == 0 or sum(image[0].data) == 0:
print('No match in image.')
continue
if interpolate:
try:
ccc = normxcorr2(tr.data, image[0].data)
except Exception:
print('Could not calculate cc')
print('Image is %i long' % len(image[0].data))
print('Template is %i long' % len(tr.data))
continue
try:
shift, cc_max = _xcorr_interp(ccc=ccc, dt=image[0].stats.delta)
except IndexError:
print('Could not interpolate ccc, not smooth')
ccc = normxcorr2(tr.data, image[0].data)
cc_max = np.amax(ccc)
shift = np.argmax(ccc) * image[0].stats.delta
# Convert the maximum cross-correlation time to an actual time
if math.isnan(cc_max):
print('Problematic trace, no cross correlation possible')
continue
else:
picktime = image[0].stats.starttime + shift
else:
# Convert the maximum cross-correlation time to an actual time
try:
ccc = normxcorr2(tr.data, image[0].data)
except Exception:
print('Could not calculate cc')
print('Image is %i long' % len(image[0].data))
print('Template is %i long' % len(tr.data))
continue
cc_max = np.amax(ccc)
if math.isnan(cc_max):
print('Problematic trace, no cross correlation possible')
continue
else:
picktime = image[0].stats.starttime + (
np.argmax(ccc) * image[0].stats.delta)
debug_print('Maximum cross-corr=%s' % cc_max, 3, debug)
checksum += cc_max
used_chans += 1
if cc_max < min_cc:
debug_print('Correlation below threshold, not used', 3, debug)
continue
cccsum += cc_max
# Perhaps weight each pick by the cc val or cc val^2?
# weight = np.amax(ccc) ** 2
if temp_chan[-1] in vertical_chans:
phase = 'P'
# Only take the S-pick with the best correlation
elif temp_chan[-1] in horizontal_chans:
phase = 'S'
debug_print('Making S-pick on: %s.%s.%s' %
(temp_net, temp_sta, temp_chan), 4, debug)
if temp_sta not in s_stachans.keys():
s_stachans[temp_sta] = ((temp_chan, np.amax(ccc),
picktime))
elif temp_sta in s_stachans.keys():
if np.amax(ccc) > s_stachans[temp_sta][1]:
picktime = picktime
else:
continue
else:
phase = None
_waveform_id = WaveformStreamID(
network_code=temp_net, station_code=temp_sta,
channel_code=temp_chan)
event.picks.append(Pick(
waveform_id=_waveform_id, time=picktime,
method_id=ResourceIdentifier('EQcorrscan'), phase_hint=phase,
creation_info='eqcorrscan.core.lag_calc',
evaluation_mode='automatic',
comments=[Comment(text='cc_max=%s' % cc_max)]))
event.resource_id = detection_id
ccc_str = ("detect_val=%s" % cccsum)
event.comments.append(Comment(text=ccc_str))
if used_chans == detect_chans:
if pre_lag_ccsum is not None and\
checksum - pre_lag_ccsum < -(0.3 * pre_lag_ccsum):
msg = ('lag-calc has decreased cccsum from %f to %f - '
% (pre_lag_ccsum, checksum))
raise LagCalcError(msg)
else:
warnings.warn('Cannot check if cccsum is better, used %i channels '
'for detection, but %i are used here'
% (detect_chans, used_chans))
return i, event | Inner loop for correlating and assigning picks.
Utility function to take a stream of data for the detected event and write
maximum correlation to absolute time as picks in an obspy.core.event.Event
object.
Only outputs picks for picks above min_cc.
:type detection: obspy.core.stream.Stream
:param detection:
Stream of data for the slave event detected using template.
:type template: obspy.core.stream.Stream
:param template: Stream of data as the template for the detection.
:type min_cc: float
:param min_cc: Minimum cross-correlation value to allow a pick to be made.
:type detection_id: str
:param detection_id: Detection ID to associate the event with.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type i: int
:param i:
Used to track which process has occurred when running in parallel.
:type pre_lag_ccsum: float
:param pre_lag_ccsum:
Cross-correlation sum before lag-calc, will check that the
cross-correlation sum is increased by lag-calc (using all channels,
ignoring min_cc)
:type detect_chans: int
:param detect_chans:
Number of channels originally used in detections, must match the number
used here to allow for cccsum checking.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type debug: int
:param debug: Debug output level 0-5.
:returns:
Event object containing network, station, channel and pick information.
:rtype: :class:`obspy.core.event.Event` |
def forgot_password(self):
"""
View function to request a password recovery email with a reset token.
Supports html and json requests.
"""
form = self._get_form('SECURITY_FORGOT_PASSWORD_FORM')
if form.validate_on_submit():
self.security_service.send_reset_password_instructions(form.user)
self.flash(_('flask_unchained.bundles.security:flash.password_reset_request',
email=form.user.email),
category='info')
if request.is_json:
return '', HTTPStatus.NO_CONTENT
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('forgot_password',
forgot_password_form=form,
**self.security.run_ctx_processor('forgot_password')) | View function to request a password recovery email with a reset token.
Supports html and json requests. |
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
generated_arr = np.random.normal(loc=self.__mu, scale=self.__sigma, size=self.__output_shape)
if self.noise_sampler is not None:
self.noise_sampler.output_shape = generated_arr.shape
generated_arr += self.noise_sampler.generate()
return generated_arr | Generate noise samples.
Returns:
`np.ndarray` of samples. |
def connect_discussion_signals():
"""
Connect all the signals on the Comment model to
maintains a valid discussion count on each entries
when an action is done with the comments.
"""
post_save.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS)
post_delete.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS)
comment_was_flagged.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS)
comment_was_posted.connect(
count_comments_handler, sender=comment_model,
dispatch_uid=COMMENT_WP_COUNT_COMMENTS)
pingback_was_posted.connect(
count_pingbacks_handler, sender=comment_model,
dispatch_uid=PINGBACK_WF_COUNT_PINGBACKS)
trackback_was_posted.connect(
count_trackbacks_handler, sender=comment_model,
dispatch_uid=TRACKBACK_WF_COUNT_TRACKBACKS) | Connect all the signals on the Comment model to
maintains a valid discussion count on each entries
when an action is done with the comments. |
def _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):
"""Record a purchase in Sailthru
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
item (dict): Sailthru required information about the course
purchase_incomplete (boolean): True if adding item to shopping cart
message_id (str): Cookie used to identify marketing campaign
options (dict): Sailthru purchase API options (e.g. template name)
Returns:
False if retryable error, else True
"""
try:
sailthru_response = sailthru_client.purchase(email, [item],
incomplete=purchase_incomplete, message_id=message_id,
options=options)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to record purchase in Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
except SailthruClientError as exc:
logger.exception("Exception attempting to record purchase for %s in Sailthru - %s", email, text_type(exc))
return False
return True | Record a purchase in Sailthru
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
item (dict): Sailthru required information about the course
purchase_incomplete (boolean): True if adding item to shopping cart
message_id (str): Cookie used to identify marketing campaign
options (dict): Sailthru purchase API options (e.g. template name)
Returns:
False if retryable error, else True |
def debug_command(self, cmd, args=None, progress_callback=None):
"""Send a debug command to the connected device.
This generic method will send a named debug command with the given
arguments to the connected device. Debug commands are typically used
for things like forcible reflashing of firmware or other, debug-style,
operations. Not all transport protocols support debug commands and
the supported operations vary depeneding on the transport protocol.
Args:
cmd (str): The name of the debug command to send.
args (dict): Any arguments required by the given debug command
progress_callback (callable): A function that will be called periodically to
report progress. The signature must be callback(done_count, total_count)
where done_count and total_count will be passed as integers.
Returns:
object: The return value of the debug command, if there is one.
"""
if args is None:
args = {}
try:
self._on_progress = progress_callback
return self._loop.run_coroutine(self.adapter.debug(0, cmd, args))
finally:
self._on_progress = None | Send a debug command to the connected device.
This generic method will send a named debug command with the given
arguments to the connected device. Debug commands are typically used
for things like forcible reflashing of firmware or other, debug-style,
operations. Not all transport protocols support debug commands and
the supported operations vary depeneding on the transport protocol.
Args:
cmd (str): The name of the debug command to send.
args (dict): Any arguments required by the given debug command
progress_callback (callable): A function that will be called periodically to
report progress. The signature must be callback(done_count, total_count)
where done_count and total_count will be passed as integers.
Returns:
object: The return value of the debug command, if there is one. |
def call_actions_future(
self,
service_name,
actions,
expansions=None,
raise_job_errors=True,
raise_action_errors=True,
timeout=None,
**kwargs
):
"""
This method is identical in signature and behavior to `call_actions`, except that it sends the request and
then immediately returns a `FutureResponse` instead of blocking waiting on a response and returning a
`JobResponse`. Just call `result(timeout=None)` on the future response to block for an available
response. Some of the possible exceptions may be raised when this method is called; others may be raised when
the future is used.
:return: A future from which the job response can later be retrieved
:rtype: Client.FutureResponse
"""
kwargs.pop('suppress_response', None) # If this kwarg is used, this method would always result in a timeout
if timeout:
kwargs['message_expiry_in_seconds'] = timeout
expected_request_id = self.send_request(service_name, actions, **kwargs)
def get_response(_timeout=None):
# Get all responses
responses = list(self.get_all_responses(service_name, receive_timeout_in_seconds=_timeout or timeout))
# Try to find the expected response
found = False
response = None
for request_id, response in responses:
if request_id == expected_request_id:
found = True
break
if not found:
# This error should be impossible if `get_all_responses` is behaving correctly, but let's raise a
# meaningful error just in case.
raise Exception(
'Got unexpected response(s) with ID(s) {} for request with ID {}'.format(
[r[0] for r in responses],
expected_request_id,
)
)
# Process errors at the Job and Action level
if response.errors and raise_job_errors:
raise self.JobError(response.errors)
if raise_action_errors:
error_actions = [action for action in response.actions if action.errors]
if error_actions:
raise self.CallActionError(error_actions)
if expansions:
kwargs.pop('continue_on_error', None)
self._perform_expansion(response.actions, expansions, **kwargs)
return response
return self.FutureResponse(get_response) | This method is identical in signature and behavior to `call_actions`, except that it sends the request and
then immediately returns a `FutureResponse` instead of blocking waiting on a response and returning a
`JobResponse`. Just call `result(timeout=None)` on the future response to block for an available
response. Some of the possible exceptions may be raised when this method is called; others may be raised when
the future is used.
:return: A future from which the job response can later be retrieved
:rtype: Client.FutureResponse |
def satisfy_one(self, assumptions=None, **params):
"""
If the input CNF is satisfiable, return a satisfying input point.
A contradiction will return None.
"""
verbosity = params.get('verbosity', 0)
default_phase = params.get('default_phase', 2)
propagation_limit = params.get('propagation_limit', -1)
decision_limit = params.get('decision_limit', -1)
seed = params.get('seed', 1)
return picosat.satisfy_one(self.nvars, self.clauses, assumptions,
verbosity, default_phase, propagation_limit,
decision_limit, seed) | If the input CNF is satisfiable, return a satisfying input point.
A contradiction will return None. |
def _bind(self, _descriptor):
"""
Bind a ResponseObject to a given action descriptor. This
updates the default HTTP response code and selects the
appropriate content type and serializer for the response.
"""
# If the method has a default code, use it
self._defcode = getattr(_descriptor.method, '_wsgi_code', 200)
# Set up content type and serializer
self.content_type, self.serializer = _descriptor.serializer(self.req) | Bind a ResponseObject to a given action descriptor. This
updates the default HTTP response code and selects the
appropriate content type and serializer for the response. |
def get_person_by_nickname(self, nickname):
"""Retrieves a person by nickname"""
return next((person for person in self.get_all_people()
if person.nickname.lower() == nickname.lower()), None) | Retrieves a person by nickname |
def loadtxt_str(path:PathOrStr)->np.ndarray:
"Return `ndarray` of `str` of lines of text from `path`."
with open(path, 'r') as f: lines = f.readlines()
return np.array([l.strip() for l in lines]) | Return `ndarray` of `str` of lines of text from `path`. |
def create_issues_report(self, timeout=-1):
"""
Creates an unexpected zoning report for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
list: A list of FCIssueResponse dict.
"""
uri = "{}/issues/".format(self.data["uri"])
return self._helper.create_report(uri, timeout) | Creates an unexpected zoning report for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
list: A list of FCIssueResponse dict. |
def phone_number(self, phone_number):
"""
Sets the phone_number of this OrderFulfillmentRecipient.
The phone number of the fulfillment recipient. If provided, overrides the value from customer profile indicated by customer_id.
:param phone_number: The phone_number of this OrderFulfillmentRecipient.
:type: str
"""
if phone_number is None:
raise ValueError("Invalid value for `phone_number`, must not be `None`")
if len(phone_number) > 16:
raise ValueError("Invalid value for `phone_number`, length must be less than `16`")
self._phone_number = phone_number | Sets the phone_number of this OrderFulfillmentRecipient.
The phone number of the fulfillment recipient. If provided, overrides the value from customer profile indicated by customer_id.
:param phone_number: The phone_number of this OrderFulfillmentRecipient.
:type: str |
def errorhandler(self, code_or_exception):
"""
Register a function to handle errors by code or exception class.
A decorator that is used to register a function given an
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
:param code_or_exception: the code as integer for the handler, or
an arbitrary exception
"""
def decorator(fn):
self._defer(lambda app: app.register_error_handler(code_or_exception, fn))
return fn
return decorator | Register a function to handle errors by code or exception class.
A decorator that is used to register a function given an
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
:param code_or_exception: the code as integer for the handler, or
an arbitrary exception |
def lookup(self, auth, type, mapping, defer=False):
""" Look up a Resource ID by alias, owned Resource ID, or share activation code under the
client specified in <ClientID>.
Args:
auth: <cik>
type: Type of resource to lookup (alias | owner | shared)
mapping: Based on resource type defined above.
"""
return self._call('lookup', auth, [type, mapping], defer) | Look up a Resource ID by alias, owned Resource ID, or share activation code under the
client specified in <ClientID>.
Args:
auth: <cik>
type: Type of resource to lookup (alias | owner | shared)
mapping: Based on resource type defined above. |
def getOrderVector(self):
"""
Returns a list of lists. Each list represents tiers of candidates. candidates in earlier
tiers are preferred to candidates appearing in later tiers. Candidates in the same tier
are preferred equally.
"""
# We sort the candidates based on the number of incoming edges they have in the graph. If
# two candidates have the same number, we assume that they are tied.
incEdgesMap = self.getIncEdgesMap()
sortedKeys = sorted(incEdgesMap.keys(), reverse = True)
orderVector = []
print(sortedKeys)
# print("sortedKeys",sortedKeys)
# print("incEdgesMap", incEdgesMap)
for key in sortedKeys:
tier = []
cands = incEdgesMap[key]
# print("qq",cands)
for cand in cands:
tier.append(cand)
# print("cand=",cand)
# print("tier", tier)
orderVector.append(tier[0]) # replace tier with tier[0]
return orderVector | Returns a list of lists. Each list represents tiers of candidates. candidates in earlier
tiers are preferred to candidates appearing in later tiers. Candidates in the same tier
are preferred equally. |
def defer_function(self, callable):
"""Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred because code later in the file might modify
the global scope. When 'callable' is called, the scope at the time this is called will be restored, however it
will contain any new bindings added to it.
"""
self._deferred_functions.append((callable, self.scope_stack[:], self.offset)) | Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred because code later in the file might modify
the global scope. When 'callable' is called, the scope at the time this is called will be restored, however it
will contain any new bindings added to it. |
def compile_migrations(migrator, models, reverse=False):
"""Compile migrations for given models."""
source = migrator.orm.values()
if reverse:
source, models = models, source
migrations = diff_many(models, source, migrator, reverse=reverse)
if not migrations:
return False
migrations = NEWLINE + NEWLINE.join('\n\n'.join(migrations).split('\n'))
return CLEAN_RE.sub('\n', migrations) | Compile migrations for given models. |
def get_system_uptime_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_system_uptime = ET.Element("get_system_uptime")
config = get_system_uptime
input = ET.SubElement(get_system_uptime, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
"""Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
file_set = self._generate_file_set(var=var, start_date=start_date,
end_date=end_date, **DataAttrs)
ds = _load_data_from_disk(
file_set, self.preprocess_func, data_vars=self.data_vars,
coords=self.coords, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs
)
if var.def_time:
ds = _prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load() | Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in |
def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf() | Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting. |
def cnst_A1T(self, Y1):
r"""Compute :math:`A_1^T \mathbf{y}_1` component of
:math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 =
(\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`.
"""
Y1f = sl.rfftn(Y1, None, axes=self.cri.axisN)
return sl.irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv,
self.cri.axisN) | r"""Compute :math:`A_1^T \mathbf{y}_1` component of
:math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 =
(\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`. |
def _mpda(self, re_grammar, splitstring=0):
"""
Args:
re_grammar (list): A list of grammar rules
splitstring (bool): A boolean for enabling or disabling
the splitting of symbols using a space
Returns:
PDA: The generated PDA
"""
cnfgrammar = CNFGenerator(re_grammar)
if not self.alphabet:
self._extract_alphabet(cnfgrammar)
cnftopda = CnfPda(self.alphabet)
productions = {}
nonterminals = []
nonterminals.append(cnfgrammar.init_symbol)
for key in list(cnfgrammar.grammar_nonterminals):
if key != cnfgrammar.init_symbol:
nonterminals.append(key)
for key in list(cnfgrammar.grammar_nonterminals):
j = 0
productions[key] = {}
# print 'testing '+key
for pair in cnfgrammar.grammar_rules:
cnf_form = list(pair)
if cnf_form[0] == key:
productions[key][j] = {}
if isinstance(cnf_form[1], type(())):
# print list(p[1])
productions[key][j]['b0'] = list(cnf_form[1])[0]
productions[key][j]['b1'] = list(cnf_form[1])[1]
else:
# print p[1]
productions[key][j]['a'] = cnf_form[1]
j = j + 1
return cnftopda.initialize(
nonterminals, productions, list(
cnfgrammar.grammar_terminals), splitstring) | Args:
re_grammar (list): A list of grammar rules
splitstring (bool): A boolean for enabling or disabling
the splitting of symbols using a space
Returns:
PDA: The generated PDA |
def _exec(self, cmd, url, json_data=None):
"""
execute a command at the device using the RESTful API
:param str cmd: one of the REST commands, e.g. GET or POST
:param str url: URL of the REST API the command should be applied to
:param dict json_data: json data that should be attached to the command
"""
assert(cmd in ("GET", "POST", "PUT", "DELETE"))
assert(self.dev is not None)
if json_data is None:
json_data = {}
# add device address to the URL
url = url.format(self.dev["ipv4_internal"])
# set basic authentication
auth = HTTPBasicAuth("dev", self.dev["api_key"])
# execute HTTP request
res = None
if cmd == "GET":
res = self._local_session.session.get(
url, auth=auth, verify=False
)
elif cmd == "POST":
res = self._local_session.session.post(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "PUT":
res = self._local_session.session.put(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "DELETE":
res = self._local_session.session.delete(
url, auth=auth, verify=False
)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() | execute a command at the device using the RESTful API
:param str cmd: one of the REST commands, e.g. GET or POST
:param str url: URL of the REST API the command should be applied to
:param dict json_data: json data that should be attached to the command |
def set_user_access(self, uid, channel=None, callback=False,
link_auth=True, ipmi_msg=True, privilege_level='user'):
"""Set user access
:param uid: user number [1:16]
:param channel: number [1:7]
:parm callback: User Restricted to Callback
False = User Privilege Limit is determined by the User Privilege Limit
parameter, below, for both callback and non-callback connections.
True = User Privilege Limit is determined by the User Privilege Limit
parameter for callback connections, but is restricted to Callback
level for non-callback connections. Thus, a user can only initiate
a Callback when they 'call in' to the BMC, but once the callback
connection has been made, the user could potentially establish a
session as an Operator.
:param link_auth: User Link authentication
enable/disable (used to enable whether this
user's name and password information will be used for link
authentication, e.g. PPP CHAP) for the given channel. Link
authentication itself is a global setting for the channel and is
enabled/disabled via the serial/modem configuration parameters.
:param ipmi_msg: User IPMI Messaginge:
(used to enable/disable whether
this user's name and password information will be used for IPMI
Messaging. In this case, 'IPMI Messaging' refers to the ability to
execute generic IPMI commands that are not associated with a
particular payload type. For example, if IPMI Messaging is disabled for
a user, but that user is enabled for activatallow_authing the SOL
payload type, then IPMI commands associated with SOL and session
management, such as Get SOL Configuration Parameters and Close Session
are available, but generic IPMI commands such as Get SEL Time are
unavailable.)
:param privilege_level:
User Privilege Limit. (Determines the maximum privilege level that the
user is allowed to switch to on the specified channel.)
* callback
* user
* operator
* administrator
* proprietary
* no_access
"""
if channel is None:
channel = self.get_network_channel()
b = 0b10000000
if callback:
b |= 0b01000000
if link_auth:
b |= 0b00100000
if ipmi_msg:
b |= 0b00010000
b |= channel & 0b00001111
privilege_levels = {
'reserved': 0,
'callback': 1,
'user': 2,
'operator': 3,
'administrator': 4,
'proprietary': 5,
'no_access': 0x0F,
}
self.oem_init()
self._oem.set_user_access(
uid, channel, callback, link_auth, ipmi_msg, privilege_level)
data = [b, uid & 0b00111111,
privilege_levels[privilege_level] & 0b00001111, 0]
response = self.raw_command(netfn=0x06, command=0x43, data=data)
if 'error' in response:
raise Exception(response['error'])
return True | Set user access
:param uid: user number [1:16]
:param channel: number [1:7]
:parm callback: User Restricted to Callback
False = User Privilege Limit is determined by the User Privilege Limit
parameter, below, for both callback and non-callback connections.
True = User Privilege Limit is determined by the User Privilege Limit
parameter for callback connections, but is restricted to Callback
level for non-callback connections. Thus, a user can only initiate
a Callback when they 'call in' to the BMC, but once the callback
connection has been made, the user could potentially establish a
session as an Operator.
:param link_auth: User Link authentication
enable/disable (used to enable whether this
user's name and password information will be used for link
authentication, e.g. PPP CHAP) for the given channel. Link
authentication itself is a global setting for the channel and is
enabled/disabled via the serial/modem configuration parameters.
:param ipmi_msg: User IPMI Messaginge:
(used to enable/disable whether
this user's name and password information will be used for IPMI
Messaging. In this case, 'IPMI Messaging' refers to the ability to
execute generic IPMI commands that are not associated with a
particular payload type. For example, if IPMI Messaging is disabled for
a user, but that user is enabled for activatallow_authing the SOL
payload type, then IPMI commands associated with SOL and session
management, such as Get SOL Configuration Parameters and Close Session
are available, but generic IPMI commands such as Get SEL Time are
unavailable.)
:param privilege_level:
User Privilege Limit. (Determines the maximum privilege level that the
user is allowed to switch to on the specified channel.)
* callback
* user
* operator
* administrator
* proprietary
* no_access |
def submit(jman, command, arguments, deps=[], array=None):
"""An easy submission option for grid-enabled scripts. Create the log
directories using random hash codes. Use the arguments as parsed by the main
script."""
logdir = os.path.join(os.path.realpath(arguments.logdir),
tools.random_logdir())
jobname = os.path.splitext(os.path.basename(command[0]))[0]
cmd = tools.make_shell(sys.executable, command)
if arguments.dryrun:
return DryRunJob(cmd, cwd=arguments.cwd, queue=arguments.queue,
hostname=arguments.hostname, memfree=arguments.memfree,
hvmem=arguments.hvmem, gpumem=arguments.gpumem, pe_opt=arguments.pe_opt,
stdout=logdir, stderr=logdir, name=jobname, deps=deps,
array=array)
# really submit
return jman.submit(cmd, cwd=arguments.cwd, queue=arguments.queue,
hostname=arguments.hostname, memfree=arguments.memfree,
hvmem=arguments.hvmem, gpumem=arguments.gpumem, pe_opt=arguments.pe_opt,
stdout=logdir, stderr=logdir, name=jobname, deps=deps,
array=array) | An easy submission option for grid-enabled scripts. Create the log
directories using random hash codes. Use the arguments as parsed by the main
script. |
def get_code(module):
"""
Compile and return a Module's code object.
"""
fp = open(module.path)
try:
return compile(fp.read(), str(module.name), 'exec')
finally:
fp.close() | Compile and return a Module's code object. |
def process_event(self, event_name: str, data: dict):
"""
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None
"""
if (isinstance(self.opt.get("learning_rate", None), float) and
isinstance(self.opt.get("learning_rate_decay", None), float)):
pass
else:
if event_name == 'after_train_log':
if (self.get_learning_rate_variable() is not None) and ('learning_rate' not in data):
data['learning_rate'] = float(K.get_value(self.get_learning_rate_variable()))
# data['learning_rate'] = self._lr
if (self.get_momentum_variable() is not None) and ('momentum' not in data):
data['momentum'] = float(K.get_value(self.get_momentum_variable()))
# data['momentum'] = self._mom
else:
super().process_event(event_name, data) | Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None |
def parse_args(argv):
"""Parse the command line arguments and return the namespace that was
creates by argparse.ArgumentParser.parse_args().
:returns: the namespace parsed from the command line
:rtype: argparse.Namespace
"""
# Create the base argument parser. It will be reused for the first and
# second round of argument parsing.
base = argparse.ArgumentParser(
description="Khard is a carddav address book for the console",
formatter_class=argparse.RawTextHelpFormatter, add_help=False)
base.add_argument("-c", "--config", default="", help="config file to use")
base.add_argument("--debug", action="store_true",
help="enable debug output")
base.add_argument("--skip-unparsable", action="store_true",
help="skip unparsable vcard files")
base.add_argument("-v", "--version", action="version",
version="Khard version %s" % khard_version)
# Create the first argument parser. Its main job is to set the correct
# config file. The config file is needed to get the default command if no
# subcommand is given on the command line. This parser will ignore most
# arguments, as they will be parsed by the second parser.
first_parser = argparse.ArgumentParser(parents=[base])
first_parser.add_argument('remainder', nargs=argparse.REMAINDER)
# Create the main argument parser. It will handle the complete command
# line only ignoring the config and debug options as these have already
# been set.
parser = argparse.ArgumentParser(parents=[base])
# create address book subparsers with different help texts
default_addressbook_parser = argparse.ArgumentParser(add_help=False)
default_addressbook_parser.add_argument(
"-a", "--addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify one or several comma separated address book names to "
"narrow the list of contacts")
new_addressbook_parser = argparse.ArgumentParser(add_help=False)
new_addressbook_parser.add_argument(
"-a", "--addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify address book in which to create the new contact")
copy_move_addressbook_parser = argparse.ArgumentParser(add_help=False)
copy_move_addressbook_parser.add_argument(
"-a", "--addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify one or several comma separated address book names to "
"narrow the list of contacts")
copy_move_addressbook_parser.add_argument(
"-A", "--target-addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify target address book in which to copy / move the "
"selected contact")
merge_addressbook_parser = argparse.ArgumentParser(add_help=False)
merge_addressbook_parser.add_argument(
"-a", "--addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify one or several comma separated address book names to "
"narrow the list of source contacts")
merge_addressbook_parser.add_argument(
"-A", "--target-addressbook", default=[],
type=lambda x: [y.strip() for y in x.split(",")],
help="Specify one or several comma separated address book names to "
"narrow the list of target contacts")
# create input file subparsers with different help texts
email_header_input_file_parser = argparse.ArgumentParser(add_help=False)
email_header_input_file_parser.add_argument(
"-i", "--input-file", default="-",
help="Specify input email header file name or use stdin by default")
template_input_file_parser = argparse.ArgumentParser(add_help=False)
template_input_file_parser.add_argument(
"-i", "--input-file", default="-",
help="Specify input template file name or use stdin by default")
template_input_file_parser.add_argument(
"--open-editor", action="store_true", help="Open the default text "
"editor after successful creation of new contact")
# create sort subparser
sort_parser = argparse.ArgumentParser(add_help=False)
sort_parser.add_argument(
"-d", "--display", choices=("first_name", "last_name"),
help="Display names in contact table by first or last name")
sort_parser.add_argument(
"-g", "--group-by-addressbook", action="store_true",
help="Group contact table by address book")
sort_parser.add_argument(
"-r", "--reverse", action="store_true",
help="Reverse order of contact table")
sort_parser.add_argument(
"-s", "--sort", choices=("first_name", "last_name"),
help="Sort contact table by first or last name")
# create search subparsers
default_search_parser = argparse.ArgumentParser(add_help=False)
default_search_parser.add_argument(
"-f", "--search-in-source-files", action="store_true",
help="Look into source vcf files to speed up search queries in "
"large address books. Beware that this option could lead "
"to incomplete results.")
default_search_parser.add_argument(
"-e", "--strict-search", action="store_true",
help="narrow contact search to name field")
default_search_parser.add_argument(
"-u", "--uid", default="", help="select contact by uid")
default_search_parser.add_argument(
"search_terms", nargs="*", metavar="search terms",
help="search in all fields to find matching contact")
merge_search_parser = argparse.ArgumentParser(add_help=False)
merge_search_parser.add_argument(
"-f", "--search-in-source-files", action="store_true",
help="Look into source vcf files to speed up search queries in "
"large address books. Beware that this option could lead "
"to incomplete results.")
merge_search_parser.add_argument(
"-e", "--strict-search", action="store_true",
help="narrow contact search to name fields")
merge_search_parser.add_argument(
"-t", "--target-contact", "--target", default="",
help="search in all fields to find matching target contact")
merge_search_parser.add_argument(
"-u", "--uid", default="", help="select source contact by uid")
merge_search_parser.add_argument(
"-U", "--target-uid", default="", help="select target contact by uid")
merge_search_parser.add_argument(
"source_search_terms", nargs="*", metavar="source",
help="search in all fields to find matching source contact")
# create subparsers for actions
subparsers = parser.add_subparsers(dest="action")
list_parser = subparsers.add_parser(
"list",
aliases=Actions.get_aliases("list"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list all (selected) contacts",
help="list all (selected) contacts")
list_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: uid\\tcontact_name\\taddress_book_name")
subparsers.add_parser(
"details",
aliases=Actions.get_aliases("details"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="display detailed information about one contact",
help="display detailed information about one contact")
export_parser = subparsers.add_parser(
"export",
aliases=Actions.get_aliases("export"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="export a contact to the custom yaml format that is "
"also used for editing and creating contacts",
help="export a contact to the custom yaml format that is also "
"used for editing and creating contacts")
export_parser.add_argument(
"--empty-contact-template", action="store_true",
help="Export an empty contact template")
export_parser.add_argument(
"-o", "--output-file", default=sys.stdout,
type=argparse.FileType("w"),
help="Specify output template file name or use stdout by default")
birthdays_parser = subparsers.add_parser(
"birthdays",
aliases=Actions.get_aliases("birthdays"),
parents=[default_addressbook_parser, default_search_parser],
description="list birthdays (sorted by month and day)",
help="list birthdays (sorted by month and day)")
birthdays_parser.add_argument(
"-d", "--display", choices=("first_name", "last_name"),
help="Display names in birthdays table by first or last name")
birthdays_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: name\\tdate")
email_parser = subparsers.add_parser(
"email",
aliases=Actions.get_aliases("email"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list email addresses",
help="list email addresses")
email_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: address\\tname\\ttype")
email_parser.add_argument(
"--remove-first-line", action="store_true",
help="remove \"searching for '' ...\" line from parsable output "
"(that line is required by mutt)")
phone_parser = subparsers.add_parser(
"phone",
aliases=Actions.get_aliases("phone"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list phone numbers",
help="list phone numbers")
phone_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: number\\tname\\ttype")
post_address_parser = subparsers.add_parser(
"postaddress",
aliases=Actions.get_aliases("postaddress"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list postal addresses",
help="list postal addresses")
post_address_parser.add_argument(
"-p", "--parsable", action="store_true",
help="Machine readable format: address\\tname\\ttype")
subparsers.add_parser(
"source",
aliases=Actions.get_aliases("source"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="edit the vcard file of a contact directly",
help="edit the vcard file of a contact directly")
new_parser = subparsers.add_parser(
"new",
aliases=Actions.get_aliases("new"),
parents=[new_addressbook_parser, template_input_file_parser],
description="create a new contact",
help="create a new contact")
new_parser.add_argument(
"--vcard-version", choices=("3.0", "4.0"),
help="Select preferred vcard version for new contact")
add_email_parser = subparsers.add_parser(
"add-email",
aliases=Actions.get_aliases("add-email"),
parents=[default_addressbook_parser, email_header_input_file_parser,
default_search_parser, sort_parser],
description="Extract email address from the \"From:\" field of an "
"email header and add to an existing contact or create a new one",
help="Extract email address from the \"From:\" field of an email "
"header and add to an existing contact or create a new one")
add_email_parser.add_argument(
"--vcard-version", choices=("3.0", "4.0"),
help="Select preferred vcard version for new contact")
subparsers.add_parser(
"merge",
aliases=Actions.get_aliases("merge"),
parents=[merge_addressbook_parser, merge_search_parser, sort_parser],
description="merge two contacts",
help="merge two contacts")
subparsers.add_parser(
"modify",
aliases=Actions.get_aliases("modify"),
parents=[default_addressbook_parser, template_input_file_parser,
default_search_parser, sort_parser],
description="edit the data of a contact",
help="edit the data of a contact")
subparsers.add_parser(
"copy",
aliases=Actions.get_aliases("copy"),
parents=[copy_move_addressbook_parser, default_search_parser,
sort_parser],
description="copy a contact to a different addressbook",
help="copy a contact to a different addressbook")
subparsers.add_parser(
"move",
aliases=Actions.get_aliases("move"),
parents=[copy_move_addressbook_parser, default_search_parser,
sort_parser],
description="move a contact to a different addressbook",
help="move a contact to a different addressbook")
remove_parser = subparsers.add_parser(
"remove",
aliases=Actions.get_aliases("remove"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="remove a contact",
help="remove a contact")
remove_parser.add_argument(
"--force", action="store_true",
help="Remove contact without confirmation")
subparsers.add_parser(
"addressbooks",
aliases=Actions.get_aliases("addressbooks"),
description="list addressbooks",
help="list addressbooks")
subparsers.add_parser(
"filename",
aliases=Actions.get_aliases("filename"),
parents=[default_addressbook_parser, default_search_parser,
sort_parser],
description="list filenames of all matching contacts",
help="list filenames of all matching contacts")
# Replace the print_help method of the first parser with the print_help
# method of the main parser. This makes it possible to have the first
# parser handle the help option so that command line help can be printed
# without parsing the config file first (which is a problem if there are
# errors in the config file). The config file will still be parsed before
# the full command line is parsed so errors in the config file might be
# reported before command line syntax errors.
first_parser.print_help = parser.print_help
# Parese the command line with the first argument parser. It will handle
# the config option (its main job) and also the help, version and debug
# options as these do not depend on anything else.
args = first_parser.parse_args(argv)
remainder = args.remainder
# Set the loglevel to debug if given on the command line. This is done
# before parsing the config file to make it possible to debug the parsing
# of the config file.
if "debug" in args and args.debug:
logging.basicConfig(level=logging.DEBUG)
# Create the global config instance.
global config
config = Config(args.config)
# Check the log level again and merge the value from the command line with
# the config file.
if ("debug" in args and args.debug) or config.debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("first args=%s", args)
logging.debug("remainder=%s", remainder)
# Set the default command from the config file if none was given on the
# command line.
if not remainder or remainder[0] not in Actions.get_all():
remainder.insert(0, config.default_action)
logging.debug("updated remainder=%s", remainder)
# Save the last option that needs to be carried from the first parser run
# to the second.
skip = args.skip_unparsable
# Parse the remainder of the command line. All options from the previous
# run have already been processed and are not needed any more.
args = parser.parse_args(remainder)
# Restore settings that are left from the first parser run.
args.skip_unparsable = skip
logging.debug("second args=%s", args)
# An integrity check for some options.
if "uid" in args and args.uid and (
("search_terms" in args and args.search_terms) or
("source_search_terms" in args and args.source_search_terms)):
# If an uid was given we require that no search terms where given.
parser.error("You can not give arbitrary search terms and --uid at the"
" same time.")
return args | Parse the command line arguments and return the namespace that was
creates by argparse.ArgumentParser.parse_args().
:returns: the namespace parsed from the command line
:rtype: argparse.Namespace |
def parent(self):
"""Return parent resource
:rtype: Resource
:raises ResourceNotFound: parent resource doesn't exists
:raises ResourceMissing: parent resource is not defined
"""
try:
return Resource(self['parent_type'], uuid=self['parent_uuid'], check=True)
except KeyError:
raise ResourceMissing('%s has no parent resource' % self) | Return parent resource
:rtype: Resource
:raises ResourceNotFound: parent resource doesn't exists
:raises ResourceMissing: parent resource is not defined |
def fcoe_get_login_input_fcoe_login_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
input = ET.SubElement(fcoe_get_login, "input")
fcoe_login_rbridge_id = ET.SubElement(input, "fcoe-login-rbridge-id")
fcoe_login_rbridge_id.text = kwargs.pop('fcoe_login_rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def finish(self):
"""Combines coverage data and sets the list of coverage objects to report on."""
# Combine all the suffix files into the data file.
self.cov.stop()
self.cov.combine()
self.cov.save() | Combines coverage data and sets the list of coverage objects to report on. |
def search(self, **kwargs):
"""
Method to search vip's based on extends search.
:param search: Dict containing QuerySets to find vip's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing vip's
"""
return super(ApiVipRequest, self).get(self.prepare_url('api/v3/vip-request/',
kwargs)) | Method to search vip's based on extends search.
:param search: Dict containing QuerySets to find vip's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing vip's |
def from_pyfile(self, filename):
"""
ๅจไธไธช Python ๆไปถไธญ่ฏปๅ้
็ฝฎใ
:param filename: ้
็ฝฎๆไปถ็ๆไปถๅ
:return: ๅฆๆ่ฏปๅๆๅ๏ผ่ฟๅ ``True``๏ผๅฆๆๅคฑ่ดฅ๏ผไผๆๅบ้่ฏฏๅผๅธธ
"""
d = types.ModuleType('config')
d.__file__ = filename
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
self.from_object(d)
return True | ๅจไธไธช Python ๆไปถไธญ่ฏปๅ้
็ฝฎใ
:param filename: ้
็ฝฎๆไปถ็ๆไปถๅ
:return: ๅฆๆ่ฏปๅๆๅ๏ผ่ฟๅ ``True``๏ผๅฆๆๅคฑ่ดฅ๏ผไผๆๅบ้่ฏฏๅผๅธธ |
def __initialize_ui(self):
"""
Initializes the View ui.
"""
self.viewport().installEventFilter(ReadOnlyFilter(self))
if issubclass(type(self), QListView):
super(type(self), self).setUniformItemSizes(True)
elif issubclass(type(self), QTreeView):
super(type(self), self).setUniformRowHeights(True) | Initializes the View ui. |
def define_density_matrix(Ne, explicitly_hermitian=False, normalized=False,
variables=None):
r"""Return a symbolic density matrix.
The arguments are
Ne (integer):
The number of atomic states.
explicitly_hermitian (boolean):
Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$
normalized (boolean):
Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$
A very simple example:
>>> define_density_matrix(2)
Matrix([
[rho11, rho12],
[rho21, rho22]])
The density matrix can be made explicitly hermitian
>>> define_density_matrix(2, explicitly_hermitian=True)
Matrix([
[rho11, conjugate(rho21)],
[rho21, rho22]])
or normalized
>>> define_density_matrix(2, normalized=True)
Matrix([
[-rho22 + 1, rho12],
[ rho21, rho22]])
or it can be made an explicit function of given variables
>>> from sympy import symbols
>>> t, z = symbols("t, z", positive=True)
>>> define_density_matrix(2, variables=[t, z])
Matrix([
[rho11(t, z), rho12(t, z)],
[rho21(t, z), rho22(t, z)]])
"""
if Ne > 9:
comma = ","
name = r"\rho"
open_brace = "_{"
close_brace = "}"
else:
comma = ""
name = "rho"
open_brace = ""
close_brace = ""
rho = []
for i in range(Ne):
row_rho = []
for j in range(Ne):
if i == j:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables,
positive=True)]
elif i > j:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables)]
else:
if explicitly_hermitian:
row_rho += [conjugate(define_symbol(name, open_brace,
comma, j, i,
close_brace,
variables))]
else:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables)]
rho += [row_rho]
if normalized:
rho11 = 1-sum([rho[i][i] for i in range(1, Ne)])
rho[0][0] = rho11
rho = Matrix(rho)
return rho | r"""Return a symbolic density matrix.
The arguments are
Ne (integer):
The number of atomic states.
explicitly_hermitian (boolean):
Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$
normalized (boolean):
Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$
A very simple example:
>>> define_density_matrix(2)
Matrix([
[rho11, rho12],
[rho21, rho22]])
The density matrix can be made explicitly hermitian
>>> define_density_matrix(2, explicitly_hermitian=True)
Matrix([
[rho11, conjugate(rho21)],
[rho21, rho22]])
or normalized
>>> define_density_matrix(2, normalized=True)
Matrix([
[-rho22 + 1, rho12],
[ rho21, rho22]])
or it can be made an explicit function of given variables
>>> from sympy import symbols
>>> t, z = symbols("t, z", positive=True)
>>> define_density_matrix(2, variables=[t, z])
Matrix([
[rho11(t, z), rho12(t, z)],
[rho21(t, z), rho22(t, z)]]) |
def createSubtitle(self, fps, section):
"""Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot
be parsed, None is returned.
By default 'section' is checked against 'subPattern' regular expression."""
matched = self._pattern.search(section)
if matched is not None:
matchedDict = matched.groupdict()
return Subtitle(
self.frametime(fps, matchedDict.get("time_from")),
self.frametime(fps, matchedDict.get("time_to")),
self.formatSub(matchedDict.get("text"))
)
return None | Returns a correct 'Subtitle' object from a text given in 'section'. If 'section' cannot
be parsed, None is returned.
By default 'section' is checked against 'subPattern' regular expression. |
def write(nml, nml_path, force=False, sort=False):
"""Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True)
"""
# Promote dicts to Namelists
if not isinstance(nml, Namelist) and isinstance(nml, dict):
nml_in = Namelist(nml)
else:
nml_in = nml
nml_in.write(nml_path, force=force, sort=sort) | Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True) |
def camel_case_to_name(name):
"""
Used to convert a classname to a lowercase name
"""
def convert_func(val):
return "_" + val.group(0).lower()
return name[0].lower() + re.sub(r'([A-Z])', convert_func, name[1:]) | Used to convert a classname to a lowercase name |
def is_subsequence(needle, haystack):
"""Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout"""
it = iter(haystack)
for element in needle:
if element not in it:
return False
return True | Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout |
def intcomma(value):
"""
Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new) | Borrowed from django.contrib.humanize
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'. |
def area(poly):
"""Area of a polygon poly"""
if len(poly) < 3: # not a plane - no area
return 0
total = [0, 0, 0]
num = len(poly)
for i in range(num):
vi1 = poly[i]
vi2 = poly[(i+1) % num]
prod = np.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
if total == [0, 0, 0]: # points are in a straight line - no area
return 0
result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2) | Area of a polygon poly |
def parse_tasks_file_header(header, input_file_param_util,
output_file_param_util):
"""Parse the header from the tasks file into env, input, output definitions.
Elements are formatted similar to their equivalent command-line arguments,
but with associated values coming from the data rows.
Environment variables columns are headered as "--env <name>"
Inputs columns are headered as "--input <name>" with the name optional.
Outputs columns are headered as "--output <name>" with the name optional.
For historical reasons, bareword column headers (such as "JOB_ID") are
equivalent to "--env var_name".
Args:
header: Array of header fields
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
Returns:
job_params: A list of EnvParams and FileParams for the environment
variables, LabelParams, input file parameters, and output file parameters.
Raises:
ValueError: If a header contains a ":" and the prefix is not supported.
"""
job_params = []
for col in header:
# Reserve the "-" and "--" namespace.
# If the column has no leading "-", treat it as an environment variable
col_type = '--env'
col_value = col
if col.startswith('-'):
col_type, col_value = split_pair(col, ' ', 1)
if col_type == '--env':
job_params.append(job_model.EnvParam(col_value))
elif col_type == '--label':
job_params.append(job_model.LabelParam(col_value))
elif col_type == '--input' or col_type == '--input-recursive':
name = input_file_param_util.get_variable_name(col_value)
job_params.append(
job_model.InputFileParam(
name, recursive=(col_type.endswith('recursive'))))
elif col_type == '--output' or col_type == '--output-recursive':
name = output_file_param_util.get_variable_name(col_value)
job_params.append(
job_model.OutputFileParam(
name, recursive=(col_type.endswith('recursive'))))
else:
raise ValueError('Unrecognized column header: %s' % col)
return job_params | Parse the header from the tasks file into env, input, output definitions.
Elements are formatted similar to their equivalent command-line arguments,
but with associated values coming from the data rows.
Environment variables columns are headered as "--env <name>"
Inputs columns are headered as "--input <name>" with the name optional.
Outputs columns are headered as "--output <name>" with the name optional.
For historical reasons, bareword column headers (such as "JOB_ID") are
equivalent to "--env var_name".
Args:
header: Array of header fields
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
Returns:
job_params: A list of EnvParams and FileParams for the environment
variables, LabelParams, input file parameters, and output file parameters.
Raises:
ValueError: If a header contains a ":" and the prefix is not supported. |
def start_module(self):
"""Wrapper for _main function.
Catch and raise any exception occurring in the main function
:return: None
"""
try:
self._main()
except Exception as exp:
logger.exception('%s', traceback.format_exc())
raise Exception(exp) | Wrapper for _main function.
Catch and raise any exception occurring in the main function
:return: None |
def guess_rank(M_E):
'''Guess the rank of the incomplete matrix '''
n, m = M_E.shape
epsilon = np.count_nonzero(M_E) / np.sqrt(m * n)
_, S0, _ = svds_descending(M_E, min(100, max(M_E.shape) - 1))
S0 = np.diag(S0)
S1 = S0[:-1] - S0[1:]
S1_ = S1 / np.mean(S1[-10:])
r1 = 0
lam = 0.05
cost = [None] * len(S1_)
while r1 <= 0:
for idx in range(len(S1_)):
cost[idx] = lam * max(S1_[idx:]) + idx
i2 = np.argmin(cost)
r1 = np.max(i2)
lam += 0.05
cost = [None] * (len(S0) - 1)
for idx in range(len(S0) - 1):
cost[idx] = (S0[idx + 1] + np.sqrt(idx * epsilon)
* S0[0] / epsilon) / S0[idx]
i2 = np.argmin(cost)
r2 = np.max(i2 + 1)
r = max([r1, r2])
return r | Guess the rank of the incomplete matrix |
def rename(old_name, new_name):
"""Renames a snapshot"""
app = get_app()
snapshot = app.get_snapshot(old_name)
if not snapshot:
click.echo("Couldn't find snapshot %s" % old_name)
sys.exit(1)
new_snapshot = app.get_snapshot(new_name)
if new_snapshot:
click.echo("Snapshot with name %s already exists" % new_name)
sys.exit(1)
app.rename_snapshot(snapshot, new_name)
click.echo("Renamed snapshot %s to %s" % (old_name, new_name)) | Renames a snapshot |
def from_seedhex_file(path: str) -> SigningKeyType:
"""
Return SigningKey instance from Seedhex file
:param str path: Hexadecimal seed file path
"""
with open(path, 'r') as fh:
seedhex = fh.read()
return SigningKey.from_seedhex(seedhex) | Return SigningKey instance from Seedhex file
:param str path: Hexadecimal seed file path |
def _as_chunk(self):
"""
A method to return a chunk of data that can be combined for
constructed method values
:return:
A native Python value that can be added together. Examples include
byte strings, unicode strings or tuples.
"""
if self._chunks_offset == 0:
return self.contents
return self.contents[self._chunks_offset:] | A method to return a chunk of data that can be combined for
constructed method values
:return:
A native Python value that can be added together. Examples include
byte strings, unicode strings or tuples. |
def get_worker(self, *queues):
"""
Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues.
"""
if not queues:
queues = self.queues
queues = [self.get_queue(name) for name in queues]
worker_cls = import_attribute(self.worker_class)
worker = worker_cls(
queues,
connection=self.connection,
job_class=self.job_class,
queue_class=self.queue_class,
)
for exception_handler in self._exception_handlers:
worker.push_exc_handler(import_attribute(exception_handler))
return worker | Returns an RQ worker instance for the given queue names, e.g.::
configured_worker = rq.get_worker()
default_worker = rq.get_worker('default')
default_low_worker = rq.get_worker('default', 'low')
:param \\*queues: Names of queues the worker should act on, falls back
to the configured queues. |
def split_query(query):
"""
Handle the query as a WWW HTTP 1630 query, as this is how people
usually thinks of URI queries in general. We do not decode anything
in split operations, neither percent nor the terrible plus-to-space
conversion. Return:
>>> split_query("k1=v1&k2=v+2%12&k3=&k4&&&k5==&=k&==")
(('k1', 'v1'), ('k2', 'v+2%12'), ('k3', ''), ('k4', None), ('k5', '='), ('', 'k'), ('', '='))
"""
def split_assignment(a):
sa = a.split('=', 1)
return len(sa) == 2 and tuple(sa) or (sa[0], None)
assignments = query.split('&')
return tuple([split_assignment(a) for a in assignments if a]) | Handle the query as a WWW HTTP 1630 query, as this is how people
usually thinks of URI queries in general. We do not decode anything
in split operations, neither percent nor the terrible plus-to-space
conversion. Return:
>>> split_query("k1=v1&k2=v+2%12&k3=&k4&&&k5==&=k&==")
(('k1', 'v1'), ('k2', 'v+2%12'), ('k3', ''), ('k4', None), ('k5', '='), ('', 'k'), ('', '=')) |
def getXML(self, CorpNum, NTSConfirmNum, UserID=None):
""" ์ ์์ธ๊ธ๊ณ์ฐ์ ์์ธ์ ๋ณด ํ์ธ - XML
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
NTSConfirmNum : ๊ตญ์ธ์ฒญ ์น์ธ๋ฒํธ
UserID : ํ๋นํ์ ์์ด๋
return
์ ์์ธ๊ธ๊ณ์ฐ์ ์ ๋ณด๊ฐ์ฒด
raise
PopbillException
"""
if NTSConfirmNum == None or len(NTSConfirmNum) != 24:
raise PopbillException(-99999999, "๊ตญ์ธ์ฒญ์น์ธ๋ฒํธ(NTSConfirmNum)๊ฐ ์ฌ๋ฐ๋ฅด์ง ์์ต๋๋ค.")
return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum + '?T=xml', CorpNum, UserID) | ์ ์์ธ๊ธ๊ณ์ฐ์ ์์ธ์ ๋ณด ํ์ธ - XML
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
NTSConfirmNum : ๊ตญ์ธ์ฒญ ์น์ธ๋ฒํธ
UserID : ํ๋นํ์ ์์ด๋
return
์ ์์ธ๊ธ๊ณ์ฐ์ ์ ๋ณด๊ฐ์ฒด
raise
PopbillException |
def update_ref(profile, ref, sha):
"""Point a ref to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to update, e.g., ``heads/my-feature-branch``.
sha
The SHA of the commit to point the ref to.
Returns
A dict with data about the ref.
"""
resource = "/refs/" + ref
payload = {"sha": sha}
data = api.patch_request(profile, resource, payload)
return prepare(data) | Point a ref to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to update, e.g., ``heads/my-feature-branch``.
sha
The SHA of the commit to point the ref to.
Returns
A dict with data about the ref. |
def heating_level(self):
"""Return heating level."""
try:
if self.side == 'left':
level = self.device.device_data['leftHeatingLevel']
elif self.side == 'right':
level = self.device.device_data['rightHeatingLevel']
return level
except TypeError:
return None | Return heating level. |
def reset(self, id=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID is given, only that specific route is affected. '''
if id is None: self.ccache.clear()
else: self.ccache.pop(id, None)
if DEBUG:
for route in self.routes:
if route['id'] not in self.ccache:
self.ccache[route['id']] = self._build_callback(route) | Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID is given, only that specific route is affected. |
def treeplot(self, qlist, credible_interval):
"""Get data for each treeplot for the variable."""
for y, _, label, values, color in self.iterator():
ntiles = np.percentile(values.flatten(), qlist)
ntiles[0], ntiles[-1] = hpd(values.flatten(), credible_interval)
yield y, label, ntiles, color | Get data for each treeplot for the variable. |
def delete_user(self, user_id, **kwargs): # noqa: E501
"""Delete a user. # noqa: E501
An endpoint for deleting a user. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/users/{user-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_user(user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_user_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.delete_user_with_http_info(user_id, **kwargs) # noqa: E501
return data | Delete a user. # noqa: E501
An endpoint for deleting a user. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/users/{user-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_user(user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def log_predictive_density(self, x_test, y_test, Y_metadata=None):
"""
Calculation of the log predictive density. Notice we add
the jacobian of the warping function here.
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points
"""
mu_star, var_star = self._raw_predict(x_test)
fy = self.warping_function.f(y_test)
ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata)
return ll_lpd + np.log(self.warping_function.fgrad_y(y_test)) | Calculation of the log predictive density. Notice we add
the jacobian of the warping function here.
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points |
def codebox(msg="", title=" ", text=""):
"""
Display some text in a monospaced font, with no line wrapping.
This function is suitable for displaying code and text that is
formatted using spaces.
The text parameter should be a string, or a list or tuple of lines to be
displayed in the textbox.
:param str msg: the msg to be displayed
:param str title: the window title
:param str text: what to display in the textbox
"""
return tb.textbox(msg, title, text, codebox=1) | Display some text in a monospaced font, with no line wrapping.
This function is suitable for displaying code and text that is
formatted using spaces.
The text parameter should be a string, or a list or tuple of lines to be
displayed in the textbox.
:param str msg: the msg to be displayed
:param str title: the window title
:param str text: what to display in the textbox |
def set_timezone(tz=None, deploy=False):
'''
Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
tz (str): The name of the timezone to set.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_timezone UTC
salt '*' panos.set_timezone UTC deploy=True
'''
if not tz:
raise CommandExecutionError("Timezone name option must not be none.")
ret = {}
query = {'type': 'config',
'action': 'set',
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone',
'element': '<timezone>{0}</timezone>'.format(tz)}
ret.update(__proxy__['panos.call'](query))
if deploy is True:
ret.update(commit())
return ret | Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
tz (str): The name of the timezone to set.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_timezone UTC
salt '*' panos.set_timezone UTC deploy=True |
def _create_hash_from_doc(doc: Mapping[str, Any]) -> str:
"""Create hash Id from edge record
Args:
edge (Mapping[str, Any]): edge record to create hash from
Returns:
str: Murmur3 128 bit hash
"""
doc_string = json.dumps(doc, sort_keys=True)
return _create_hash(doc_string) | Create hash Id from edge record
Args:
edge (Mapping[str, Any]): edge record to create hash from
Returns:
str: Murmur3 128 bit hash |
def decode_token(encoded_token, csrf_value=None, allow_expired=False):
"""
Returns the decoded token (python dict) from an encoded JWT. This does all
the checks to insure that the decoded token is valid before returning it.
:param encoded_token: The encoded JWT to decode into a python dict.
:param csrf_value: Expected CSRF double submit value (optional)
:param allow_expired: Options to ignore exp claim validation in token
:return: Dictionary containing contents of the JWT
"""
jwt_manager = _get_jwt_manager()
unverified_claims = jwt.decode(
encoded_token, verify=False, algorithms=config.algorithm
)
unverified_headers = jwt.get_unverified_header(encoded_token)
# Attempt to call callback with both claims and headers, but fallback to just claims
# for backwards compatibility
try:
secret = jwt_manager._decode_key_callback(unverified_claims, unverified_headers)
except TypeError:
msg = (
"The single-argument (unverified_claims) form of decode_key_callback ",
"is deprecated. Update your code to use the two-argument form ",
"(unverified_claims, unverified_headers)."
)
warn(msg, DeprecationWarning)
secret = jwt_manager._decode_key_callback(unverified_claims)
try:
return decode_jwt(
encoded_token=encoded_token,
secret=secret,
algorithm=config.algorithm,
identity_claim_key=config.identity_claim_key,
user_claims_key=config.user_claims_key,
csrf_value=csrf_value,
audience=config.audience,
leeway=config.leeway,
allow_expired=allow_expired
)
except ExpiredSignatureError:
expired_token = decode_jwt(
encoded_token=encoded_token,
secret=secret,
algorithm=config.algorithm,
identity_claim_key=config.identity_claim_key,
user_claims_key=config.user_claims_key,
csrf_value=csrf_value,
audience=config.audience,
leeway=config.leeway,
allow_expired=True
)
ctx_stack.top.expired_jwt = expired_token
raise | Returns the decoded token (python dict) from an encoded JWT. This does all
the checks to insure that the decoded token is valid before returning it.
:param encoded_token: The encoded JWT to decode into a python dict.
:param csrf_value: Expected CSRF double submit value (optional)
:param allow_expired: Options to ignore exp claim validation in token
:return: Dictionary containing contents of the JWT |
def remove_node(cls, cluster_id_label, private_dns, parameters=None):
"""
Add a node to an existing cluster
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {} if not parameters else parameters
data = {"private_dns" : private_dns, "parameters" : parameters}
return conn.delete(cls.element_path(cluster_id_label) + "/nodes", data) | Add a node to an existing cluster |
def get_iterator_type(script_settings, subscripts={}):
"""
figures out the iterator type based on the script settings and (optionally) subscripts
Args:
script_settings: iterator_type
subscripts: subscripts
Returns:
"""
if 'iterator_type' in script_settings:
# figure out the iterator type
if script_settings['iterator_type'] == 'Loop':
iterator_type = 'loop'
elif script_settings['iterator_type'] == 'Parameter Sweep':
iterator_type = 'sweep'
else:
raise TypeError('unknown iterator type')
else:
# asign the correct iterator script type
if 'sweep_param' in script_settings:
iterator_type = 'sweep'
elif 'num_loops' in script_settings:
iterator_type = 'loop'
else:
raise TypeError('unknown iterator type')
return iterator_type | figures out the iterator type based on the script settings and (optionally) subscripts
Args:
script_settings: iterator_type
subscripts: subscripts
Returns: |
def get_logs_multipart(
w3,
startBlock,
stopBlock,
address,
topics,
max_blocks):
"""Used to break up requests to ``eth_getLogs``
The getLog request is partitioned into multiple calls of the max number of blocks
``max_blocks``.
"""
_block_ranges = block_ranges(startBlock, stopBlock, max_blocks)
for from_block, to_block in _block_ranges:
params = {
'fromBlock': from_block,
'toBlock': to_block,
'address': address,
'topics': topics
}
yield w3.eth.getLogs(
drop_items_with_none_value(params)) | Used to break up requests to ``eth_getLogs``
The getLog request is partitioned into multiple calls of the max number of blocks
``max_blocks``. |
def new_line(self, tokens, line_end, line_start):
"""a new line has been encountered, process it if necessary"""
if _last_token_on_line_is(tokens, line_end, ";"):
self.add_message("unnecessary-semicolon", line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split("\n")[0]
self.check_lines(line, line_num) | a new line has been encountered, process it if necessary |
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx) | set my state from the passed info |
def _calc_sdof_tf(self, osc_freq, damping=0.05):
"""Compute the transfer function for a single-degree-of-freedom
oscillator.
The transfer function computes the pseudo-spectral acceleration.
Parameters
----------
osc_freq : float
natural frequency of the oscillator [Hz]
damping : float, optional
damping ratio of the oscillator in decimal. Default value is
0.05, or 5%.
Returns
-------
tf : :class:`numpy.ndarray`
Complex-valued transfer function with length equal to `self.freq`.
"""
return (-osc_freq ** 2. / (np.square(self.freqs) - np.square(osc_freq)
- 2.j * damping * osc_freq * self.freqs)) | Compute the transfer function for a single-degree-of-freedom
oscillator.
The transfer function computes the pseudo-spectral acceleration.
Parameters
----------
osc_freq : float
natural frequency of the oscillator [Hz]
damping : float, optional
damping ratio of the oscillator in decimal. Default value is
0.05, or 5%.
Returns
-------
tf : :class:`numpy.ndarray`
Complex-valued transfer function with length equal to `self.freq`. |
def to_file(file, array):
"""Wrapper around ndarray.tofile to support any file-like object"""
try:
array.tofile(file)
except (TypeError, IOError, UnsupportedOperation):
# tostring actually returns bytes
file.write(array.tostring()) | Wrapper around ndarray.tofile to support any file-like object |
def example_lab_to_xyz():
"""
This function shows a simple conversion of an Lab color to an XYZ color.
"""
print("=== Simple Example: Lab->XYZ ===")
# Instantiate an Lab color object with the given values.
lab = LabColor(0.903, 16.296, -2.22)
# Show a string representation.
print(lab)
# Convert to XYZ.
xyz = convert_color(lab, XYZColor)
print(xyz)
print("=== End Example ===\n") | This function shows a simple conversion of an Lab color to an XYZ color. |
def _execute(self, sql, args):
"""ๆง่กsql่ฏญๅฅ
:param sql: sql่ฏญๅฅ
:param args: ๅๆฐ
:return: ่ฟๅ็้ฝๆฏๆฐ็ปๅฏน่ฑก
"""
sql = sql.lower().strip()
args = args or ()
tmp = sql[:6]
with (yield self._pool.Connection()) as conn:
try:
with conn.cursor() as cursor:
yield cursor.execute(sql, args=args)
if tmp == 'select':
datas = cursor.fetchall()
return datas
except Exception as e:
err = traceback.format_exc()
print(err)
if tmp in ['insert', 'update', 'delete']:
yield conn.rollback()
else:
if tmp == 'insert':
insertId = conn.insert_id()
yield conn.commit()
# ่ฟๅๆๅ
ฅ็ไธป้ฎid
return insertId
elif tmp in ['update', 'delete']:
yield conn.commit() | ๆง่กsql่ฏญๅฅ
:param sql: sql่ฏญๅฅ
:param args: ๅๆฐ
:return: ่ฟๅ็้ฝๆฏๆฐ็ปๅฏน่ฑก |
def find_related_modules(package, related_name_re='.+',
ignore_exceptions=False):
"""Find matching modules using a package and a module name pattern."""
warnings.warn('find_related_modules has been deprecated.',
DeprecationWarning)
package_elements = package.rsplit(".", 1)
try:
if len(package_elements) == 2:
pkg = __import__(package_elements[0], globals(), locals(), [
package_elements[1]])
pkg = getattr(pkg, package_elements[1])
else:
pkg = __import__(package_elements[0], globals(), locals(), [])
pkg_path = pkg.__path__
except AttributeError:
return []
# Find all modules named according to related_name
p = re.compile(related_name_re)
modules = []
for name in find_modules(package, include_packages=True):
if p.match(name.split('.')[-1]):
try:
modules.append(import_string(name, silent=ignore_exceptions))
except Exception as e:
if not ignore_exceptions:
raise e
return modules | Find matching modules using a package and a module name pattern. |
def cli(ctx, packages, all, list, force, platform):
"""Install packages."""
if packages:
for package in packages:
Installer(package, platform, force).install()
elif all: # pragma: no cover
packages = Resources(platform).packages
for package in packages:
Installer(package, platform, force).install()
elif list:
Resources(platform).list_packages(installed=True, notinstalled=True)
else:
click.secho(ctx.get_help()) | Install packages. |
def _infer_map(node, context):
"""Infer all values based on Dict.items"""
values = {}
for name, value in node.items:
if isinstance(name, nodes.DictUnpack):
double_starred = helpers.safe_infer(value, context)
if not double_starred:
raise exceptions.InferenceError
if not isinstance(double_starred, nodes.Dict):
raise exceptions.InferenceError(node=node, context=context)
unpack_items = _infer_map(double_starred, context)
values = _update_with_replacement(values, unpack_items)
else:
key = helpers.safe_infer(name, context=context)
value = helpers.safe_infer(value, context=context)
if any(not elem for elem in (key, value)):
raise exceptions.InferenceError(node=node, context=context)
values = _update_with_replacement(values, {key: value})
return values | Infer all values based on Dict.items |
def ploidy(args):
"""
%prog ploidy seqids layout
Build a figure that calls graphics.karyotype to illustrate the high ploidy
of B. napus genome.
"""
p = OptionParser(ploidy.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7")
if len(args) != 2:
sys.exit(not p.print_help())
seqidsfile, klayout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Karyotype(fig, root, seqidsfile, klayout)
fc = "darkslategrey"
radius = .012
ot = -.05 # use this to adjust vertical position of the left panel
TextCircle(root, .1, .9 + ot, r'$\gamma$', radius=radius, fc=fc)
root.text(.1, .88 + ot, r"$\times3$", ha="center", va="top", color=fc)
TextCircle(root, .08, .79 + ot, r'$\alpha$', radius=radius, fc=fc)
TextCircle(root, .12, .79 + ot, r'$\beta$', radius=radius, fc=fc)
root.text(.1, .77 + ot, r"$\times3\times2\times2$", ha="center", va="top", color=fc)
root.text(.1, .67 + ot, r"Brassica triplication", ha="center",
va="top", color=fc, size=11)
root.text(.1, .65 + ot, r"$\times3\times2\times2\times3$", ha="center", va="top", color=fc)
root.text(.1, .42 + ot, r"Allo-tetraploidy", ha="center",
va="top", color=fc, size=11)
root.text(.1, .4 + ot, r"$\times3\times2\times2\times3\times2$", ha="center", va="top", color=fc)
bb = dict(boxstyle="round,pad=.5", fc="w", ec="0.5", alpha=0.5)
root.text(.5, .2 + ot, r"\noindent\textit{Brassica napus}\\"
"(A$\mathsf{_n}$C$\mathsf{_n}$ genome)", ha="center",
size=16, color="k", bbox=bb)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "napus"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog ploidy seqids layout
Build a figure that calls graphics.karyotype to illustrate the high ploidy
of B. napus genome. |
def disconnect(self, connection):
"""
Disconnects the given protocol.
"""
proto = self._protocols.pop(connection)
proto.transport = None
return {} | Disconnects the given protocol. |
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions between
formatters and other components, such as storage and Windows EventLog
resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
page_transition_type = event_values.get('page_transition_type', None)
if page_transition_type is not None:
page_transition, page_transition_long = self._PAGE_TRANSITIONS.get(
page_transition_type, self._UNKNOWN_PAGE_TRANSITION)
if page_transition_long:
event_values['page_transition'] = '{0:s} - {1:s}'.format(
page_transition, page_transition_long)
else:
event_values['page_transition'] = page_transition
visit_source = event_values.get('visit_source', None)
if visit_source is not None:
event_values['visit_source'] = self._VISIT_SOURCE.get(
visit_source, 'UNKNOWN')
extras = []
url_hidden = event_values.get('url_hidden', False)
if url_hidden:
extras.append('(url hidden)')
typed_count = event_values.get('typed_count', 0)
if typed_count == 0:
extras.append('(URL not typed directly - no typed count)')
elif typed_count == 1:
extras.append('(type count {0:d} time)'.format(typed_count))
else:
extras.append('(type count {0:d} times)'.format(typed_count))
event_values['extra'] = ' '.join(extras)
return self._ConditionalFormatMessages(event_values) | Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions between
formatters and other components, such as storage and Windows EventLog
resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. |
def plot(x, y, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
"""
Plots the data in `x` on the X axis and the data in `y` on the Y axis
in a 2d visualization, and shows the resulting visualization.
Uses the following heuristic to choose the visualization:
* If `x` and `y` are both numeric (SArray of int or float), and they contain
fewer than or equal to 5,000 values, show a scatter plot.
* If `x` and `y` are both numeric (SArray of int or float), and they contain
more than 5,000 values, show a heat map.
* If `x` is numeric and `y` is an SArray of string, show a box and whisker
plot for the distribution of numeric values for each categorical (string)
value.
* If `x` and `y` are both SArrays of string, show a categorical heat map.
This show method supports SArrays of dtypes: int, float, str.
Notes
-----
- The plot will be returned as a Plot object, which can then be shown,
saved, etc. and will display automatically in a Jupyter Notebook.
Parameters
----------
x : SArray
The data to plot on the X axis of a 2d visualization.
y : SArray
The data to plot on the Y axis of a 2d visualization. Must be the same
length as `x`.
xlabel : str (optional)
The text label for the X axis. Defaults to "X".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Y".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Examples
--------
Show a categorical heat map of pets and their feelings.
>>> x = turicreate.SArray(['dog', 'cat', 'dog', 'dog', 'cat'])
>>> y = turicreate.SArray(['happy', 'grumpy', 'grumpy', 'happy', 'grumpy'])
>>> turicreate.show(x, y)
Show a scatter plot of the function y = 2x, for x from 0 through 9, labeling
the axes and plot title with custom strings.
>>> x = turicreate.SArray(range(10))
>>> y = x * 2
>>> turicreate.show(x, y,
... xlabel="Custom X label",
... ylabel="Custom Y label",
... title="Custom title")
"""
title = _get_title(title)
plt_ref = tc.extensions.plot(x, y, xlabel, ylabel, title)
return Plot(plt_ref) | Plots the data in `x` on the X axis and the data in `y` on the Y axis
in a 2d visualization, and shows the resulting visualization.
Uses the following heuristic to choose the visualization:
* If `x` and `y` are both numeric (SArray of int or float), and they contain
fewer than or equal to 5,000 values, show a scatter plot.
* If `x` and `y` are both numeric (SArray of int or float), and they contain
more than 5,000 values, show a heat map.
* If `x` is numeric and `y` is an SArray of string, show a box and whisker
plot for the distribution of numeric values for each categorical (string)
value.
* If `x` and `y` are both SArrays of string, show a categorical heat map.
This show method supports SArrays of dtypes: int, float, str.
Notes
-----
- The plot will be returned as a Plot object, which can then be shown,
saved, etc. and will display automatically in a Jupyter Notebook.
Parameters
----------
x : SArray
The data to plot on the X axis of a 2d visualization.
y : SArray
The data to plot on the Y axis of a 2d visualization. Must be the same
length as `x`.
xlabel : str (optional)
The text label for the X axis. Defaults to "X".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Y".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Examples
--------
Show a categorical heat map of pets and their feelings.
>>> x = turicreate.SArray(['dog', 'cat', 'dog', 'dog', 'cat'])
>>> y = turicreate.SArray(['happy', 'grumpy', 'grumpy', 'happy', 'grumpy'])
>>> turicreate.show(x, y)
Show a scatter plot of the function y = 2x, for x from 0 through 9, labeling
the axes and plot title with custom strings.
>>> x = turicreate.SArray(range(10))
>>> y = x * 2
>>> turicreate.show(x, y,
... xlabel="Custom X label",
... ylabel="Custom Y label",
... title="Custom title") |
def inverse_kinematics(self, end_effector_transformation,
q=None,
max_iter=1000, tolerance=0.05,
mask=numpy.ones(6),
use_pinv=False):
""" Computes the joint angles corresponding to the end effector transformation.
:param end_effector_transformation: the end effector homogeneous transformation matrix
:param vector q: initial estimate of the joint angles
:param int max_iter: maximum number of iteration
:param float tolerance: tolerance before convergence
:param mask: specify the cartesian DOF that will be ignore (in the case of a chain with less than 6 joints).
:rtype: vector of the joint angles (theta 1, theta 2, ..., theta n)
"""
if q is None:
q = numpy.zeros((len(self.links), 1))
q = numpy.matrix(q.reshape(-1, 1))
best_e = numpy.ones(6) * numpy.inf
best_q = None
alpha = 1.0
for _ in range(max_iter):
e = numpy.multiply(transform_difference(self.forward_kinematics(q)[0], end_effector_transformation), mask)
d = numpy.linalg.norm(e)
if d < numpy.linalg.norm(best_e):
best_e = e.copy()
best_q = q.copy()
alpha *= 2.0 ** (1.0 / 8.0)
else:
q = best_q.copy()
e = best_e.copy()
alpha *= 0.5
if use_pinv:
dq = numpy.linalg.pinv(self._jacob0(q)) * e.reshape((-1, 1))
else:
dq = self._jacob0(q).T * e.reshape((-1, 1))
q += alpha * dq
# d = numpy.linalg.norm(dq)
if d < tolerance:
return q
else:
raise ValueError('could not converge d={}'.format(numpy.linalg.norm(best_e))) | Computes the joint angles corresponding to the end effector transformation.
:param end_effector_transformation: the end effector homogeneous transformation matrix
:param vector q: initial estimate of the joint angles
:param int max_iter: maximum number of iteration
:param float tolerance: tolerance before convergence
:param mask: specify the cartesian DOF that will be ignore (in the case of a chain with less than 6 joints).
:rtype: vector of the joint angles (theta 1, theta 2, ..., theta n) |
def _is_method_retryable(self, method):
""" Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist.
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return True | Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist. |
def assert_shape_match(shape1, shape2):
"""Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None)
"""
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(shape2)
if shape1.ndims is None or shape2.ndims is None:
raise ValueError('Shapes must have known rank. Got %s and %s.' %
(shape1.ndims, shape2.ndims))
shape1.assert_same_rank(shape2)
shape1.assert_is_compatible_with(shape2) | Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None) |
def update_artifact_cache(self, vts_artifactfiles_pairs):
"""Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of absolute paths to artifacts for the VersionedTargetSet.
"""
update_artifact_cache_work = self._get_update_artifact_cache_work(vts_artifactfiles_pairs)
if update_artifact_cache_work:
self.context.submit_background_work_chain([update_artifact_cache_work],
parent_workunit_name='cache') | Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of absolute paths to artifacts for the VersionedTargetSet. |
def parse_string_expr(self, string_expr_node):
""" Parse a string node content. """
string_expr_node_value = string_expr_node['value']
string_expr_str = string_expr_node_value[1:-1]
# Care escaped string literals
if string_expr_node_value[0] == "'":
string_expr_str = string_expr_str.replace("''", "'")
else:
string_expr_str = string_expr_str.replace('\\"', '"')
# NOTE: This is a hack to parse expr1. See :help expr1
raw_ast = self.parse_string('echo ' + string_expr_str)
# We need the left node of ECHO node
parsed_string_expr_nodes = raw_ast['body'][0]['list']
start_pos = string_expr_node['pos']
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
for parsed_string_expr_node in parsed_string_expr_nodes:
traverse(parsed_string_expr_node, on_enter=adjust_position)
return parsed_string_expr_nodes | Parse a string node content. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.