code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def update_metadata(self, metadata):
"""Update cluster state given a MetadataResponse.
Arguments:
metadata (MetadataResponse): broker response to a metadata request
Returns: None
"""
# In the common case where we ask for a single topic and get back an
# error, we should fail the future
if len(metadata.topics) == 1 and metadata.topics[0][0] != 0:
error_code, topic = metadata.topics[0][:2]
error = Errors.for_code(error_code)(topic)
return self.failed_update(error)
if not metadata.brokers:
log.warning("No broker metadata found in MetadataResponse -- ignoring.")
return self.failed_update(Errors.MetadataEmptyBrokerList(metadata))
_new_brokers = {}
for broker in metadata.brokers:
if metadata.API_VERSION == 0:
node_id, host, port = broker
rack = None
else:
node_id, host, port, rack = broker
_new_brokers.update({
node_id: BrokerMetadata(node_id, host, port, rack)
})
if metadata.API_VERSION == 0:
_new_controller = None
else:
_new_controller = _new_brokers.get(metadata.controller_id)
_new_partitions = {}
_new_broker_partitions = collections.defaultdict(set)
_new_unauthorized_topics = set()
_new_internal_topics = set()
for topic_data in metadata.topics:
if metadata.API_VERSION == 0:
error_code, topic, partitions = topic_data
is_internal = False
else:
error_code, topic, is_internal, partitions = topic_data
if is_internal:
_new_internal_topics.add(topic)
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
_new_partitions[topic] = {}
for p_error, partition, leader, replicas, isr in partitions:
_new_partitions[topic][partition] = PartitionMetadata(
topic=topic, partition=partition, leader=leader,
replicas=replicas, isr=isr, error=p_error)
if leader != -1:
_new_broker_partitions[leader].add(
TopicPartition(topic, partition))
elif error_type is Errors.LeaderNotAvailableError:
log.warning("Topic %s is not available during auto-create"
" initialization", topic)
elif error_type is Errors.UnknownTopicOrPartitionError:
log.error("Topic %s not found in cluster metadata", topic)
elif error_type is Errors.TopicAuthorizationFailedError:
log.error("Topic %s is not authorized for this client", topic)
_new_unauthorized_topics.add(topic)
elif error_type is Errors.InvalidTopicError:
log.error("'%s' is not a valid topic name", topic)
else:
log.error("Error fetching metadata for topic %s: %s",
topic, error_type)
with self._lock:
self._brokers = _new_brokers
self.controller = _new_controller
self._partitions = _new_partitions
self._broker_partitions = _new_broker_partitions
self.unauthorized_topics = _new_unauthorized_topics
self.internal_topics = _new_internal_topics
f = None
if self._future:
f = self._future
self._future = None
self._need_update = False
now = time.time() * 1000
self._last_refresh_ms = now
self._last_successful_refresh_ms = now
if f:
f.success(self)
log.debug("Updated cluster metadata to %s", self)
for listener in self._listeners:
listener(self)
if self.need_all_topic_metadata:
# the listener may change the interested topics,
# which could cause another metadata refresh.
# If we have already fetched all topics, however,
# another fetch should be unnecessary.
self._need_update = False | Update cluster state given a MetadataResponse.
Arguments:
metadata (MetadataResponse): broker response to a metadata request
Returns: None |
def show_patterned_file(dir_path, pattern=list(), filename_only=True):
"""Print all file that file name contains ``pattern``.
"""
pattern = [i.lower() for i in pattern]
if filename_only:
def filter(winfile):
for p in pattern:
if p in winfile.fname.lower():
return True
return False
else:
def filter(winfile):
for p in pattern:
if p in winfile.abspath.lower():
return True
return False
fc = FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False)
if filename_only:
fc.sort_by("fname")
else:
fc.sort_by("abspath")
table = {p: "<%s>" % p for p in pattern}
lines = list()
lines.append("Results:")
for winfile in fc.iterfiles():
lines.append(" %s" % winfile)
if filename_only:
lines.append("Above are all files that file name contains %s" % pattern)
else:
lines.append("Above are all files that abspath contains %s" % pattern)
text = "\n".join(lines)
print(text)
with open("__show_patterned_file__.log", "wb") as f:
f.write(text.encode("utf-8")) | Print all file that file name contains ``pattern``. |
def quality_to_apply(self):
"""Value of quality parameter to use in processing request.
Simple substitution of 'native' or 'default' if no quality
parameter is specified.
"""
if (self.request.quality is None):
if (self.api_version <= '1.1'):
return('native')
else:
return('default')
return(self.request.quality) | Value of quality parameter to use in processing request.
Simple substitution of 'native' or 'default' if no quality
parameter is specified. |
def color_toggle(self):
"""Toggle between the currently active color scheme and NoColor."""
if self.color_scheme_table.active_scheme_name == 'NoColor':
self.color_scheme_table.set_active_scheme(self.old_scheme)
self.Colors = self.color_scheme_table.active_colors
else:
self.old_scheme = self.color_scheme_table.active_scheme_name
self.color_scheme_table.set_active_scheme('NoColor')
self.Colors = self.color_scheme_table.active_colors | Toggle between the currently active color scheme and NoColor. |
def write_registers(self, registeraddress, values):
"""Write integers to 16-bit registers in the slave.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Uses Modbus function code 16.
The number of registers that will be written is defined by the length of the ``values`` list.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* values (list of int): The values to store in the slave registers.
Any scaling of the register data, or converting it to negative number (two's complement)
must be done manually.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
if not isinstance(values, list):
raise TypeError('The "values parameter" must be a list. Given: {0!r}'.format(values))
_checkInt(len(values), minvalue=1, description='length of input list')
# Note: The content of the list is checked at content conversion.
self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat='registers') | Write integers to 16-bit registers in the slave.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Uses Modbus function code 16.
The number of registers that will be written is defined by the length of the ``values`` list.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* values (list of int): The values to store in the slave registers.
Any scaling of the register data, or converting it to negative number (two's complement)
must be done manually.
Returns:
None
Raises:
ValueError, TypeError, IOError |
def resolve_dist(cls, dist, working_set):
"""Given a local distribution and a working set, returns all dependencies from the set.
:param dist: A single distribution to find the dependencies of
:type dist: :class:`pkg_resources.Distribution`
:param working_set: A working set to search for all packages
:type working_set: :class:`pkg_resources.WorkingSet`
:return: A set of distributions which the package depends on, including the package
:rtype: set(:class:`pkg_resources.Distribution`)
"""
deps = set()
deps.add(dist)
try:
reqs = dist.requires()
except (AttributeError, OSError, IOError): # The METADATA file can't be found
return deps
for req in reqs:
dist = working_set.find(req)
deps |= cls.resolve_dist(dist, working_set)
return deps | Given a local distribution and a working set, returns all dependencies from the set.
:param dist: A single distribution to find the dependencies of
:type dist: :class:`pkg_resources.Distribution`
:param working_set: A working set to search for all packages
:type working_set: :class:`pkg_resources.WorkingSet`
:return: A set of distributions which the package depends on, including the package
:rtype: set(:class:`pkg_resources.Distribution`) |
def edit_scheme(self):
"""Edit current scheme."""
dlg = self.scheme_editor_dialog
dlg.set_scheme(self.current_scheme)
if dlg.exec_():
# Update temp scheme to reflect instant edits on the preview
temporal_color_scheme = dlg.get_edited_color_scheme()
for key in temporal_color_scheme:
option = "temp/{0}".format(key)
value = temporal_color_scheme[key]
self.set_option(option, value)
self.update_preview(scheme_name='temp') | Edit current scheme. |
def warning(*args):
"""Display warning message via stderr or GUI."""
if sys.stdin.isatty():
print('WARNING:', *args, file=sys.stderr)
else:
notify_warning(*args) | Display warning message via stderr or GUI. |
def no_empty_value(func):
"""Raises an exception if function argument is empty."""
@wraps(func)
def wrapper(value):
if not value:
raise Exception("Empty value not allowed")
return func(value)
return wrapper | Raises an exception if function argument is empty. |
def get_volume_steps(self):
"""Read the maximum volume level of the device."""
if not self.__volume_steps:
self.__volume_steps = yield from self.handle_int(
self.API.get('volume_steps'))
return self.__volume_steps | Read the maximum volume level of the device. |
async def add_relation(self, relation1, relation2):
"""Add a relation between two applications.
:param str relation1: '<application>[:<relation_name>]'
:param str relation2: '<application>[:<relation_name>]'
"""
connection = self.connection()
app_facade = client.ApplicationFacade.from_connection(connection)
log.debug(
'Adding relation %s <-> %s', relation1, relation2)
def _find_relation(*specs):
for rel in self.relations:
if rel.matches(*specs):
return rel
return None
try:
result = await app_facade.AddRelation([relation1, relation2])
except JujuAPIError as e:
if 'relation already exists' not in e.message:
raise
rel = _find_relation(relation1, relation2)
if rel:
return rel
raise JujuError('Relation {} {} exists but not in model'.format(
relation1, relation2))
specs = ['{}:{}'.format(app, data['name'])
for app, data in result.endpoints.items()]
await self.block_until(lambda: _find_relation(*specs) is not None)
return _find_relation(*specs) | Add a relation between two applications.
:param str relation1: '<application>[:<relation_name>]'
:param str relation2: '<application>[:<relation_name>]' |
def epochs(steps=None, epoch_steps=1):
"""Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch)
"""
try:
iter(epoch_steps)
except TypeError:
epoch_steps = itertools.repeat(epoch_steps)
step = 0
for epoch, epoch_steps in enumerate(epoch_steps):
epoch_steps = min(epoch_steps, steps - step)
yield (epoch + 1, epoch_steps)
step += epoch_steps
if steps and step >= steps:
break | Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch) |
def depth(self):
"""
Compute the depth of the tree (depth of a leaf=0).
"""
return self.fold_up(lambda n, fl, fg: max(fl + 1, fg + 1), lambda leaf: 0) | Compute the depth of the tree (depth of a leaf=0). |
def add_item(self, assessment_id, item_id):
"""Adds an existing ``Item`` to an assessment.
arg: assessment_id (osid.id.Id): the ``Id`` of the
``Assessment``
arg: item_id (osid.id.Id): the ``Id`` of the ``Item``
raise: NotFound - ``assessment_id`` or ``item_id`` not found
raise: NullArgument - ``assessment_id`` or ``item_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
if assessment_id.get_identifier_namespace() != 'assessment.Assessment':
raise errors.InvalidArgument
self._part_item_design_session.add_item(item_id, self._get_first_part_id(assessment_id)) | Adds an existing ``Item`` to an assessment.
arg: assessment_id (osid.id.Id): the ``Id`` of the
``Assessment``
arg: item_id (osid.id.Id): the ``Id`` of the ``Item``
raise: NotFound - ``assessment_id`` or ``item_id`` not found
raise: NullArgument - ``assessment_id`` or ``item_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* |
def timestamp_filename(basename, ext=None):
"""
Return a string of the form [basename-TIMESTAMP.ext]
where TIMESTAMP is of the form YYYYMMDD-HHMMSS-MILSEC
"""
dt = datetime.now().strftime('%Y%m%d-%H%M%S-%f')
if ext:
return '%s-%s.%s' % (basename, dt, ext)
return '%s-%s' % (basename, dt) | Return a string of the form [basename-TIMESTAMP.ext]
where TIMESTAMP is of the form YYYYMMDD-HHMMSS-MILSEC |
def send_one_ping(self, current_socket):
"""
Send one ICMP ECHO_REQUEST.
"""
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
checksum = 0
# Make a dummy header with a 0 checksum.
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
padBytes = []
startVal = 0x42
for i in range(startVal, startVal + (self.packet_size)):
padBytes += [(i & 0xff)] # Keep chars in the 0-255 range
data = bytes(padBytes)
# Calculate the checksum on the data and the dummy header.
checksum = calculate_checksum(header + data) # Checksum is in network order
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
packet = header + data
send_time = default_timer()
try:
current_socket.sendto(packet, (self.destination, 1)) # Port number is irrelevant for ICMP
except socket.error as e:
print("General failure (%s)" % (e.args[1]))
current_socket.close()
return
return send_time | Send one ICMP ECHO_REQUEST. |
def p_block_statements(self, p):
'block_statements : block_statements block_statement'
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) | block_statements : block_statements block_statement |
def embeddedFileGet(self, id):
"""Retrieve embedded file content by name or by number."""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document_embeddedFileGet(self, id) | Retrieve embedded file content by name or by number. |
def _get_marX(self, attr_name, default):
"""
Generalized method to get margin values.
"""
if self.tcPr is None:
return Emu(default)
return Emu(int(self.tcPr.get(attr_name, default))) | Generalized method to get margin values. |
def prime_check(n):
"""Return True if n is a prime number
Else return False.
"""
if n <= 1:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
j = 5
while j * j <= n:
if n % j == 0 or n % (j + 2) == 0:
return False
j += 6
return True | Return True if n is a prime number
Else return False. |
def copy_figure(self):
"""Copy figure to clipboard."""
if self.fmt in ['image/png', 'image/jpeg']:
qpixmap = QPixmap()
qpixmap.loadFromData(self.fig, self.fmt.upper())
QApplication.clipboard().setImage(qpixmap.toImage())
elif self.fmt == 'image/svg+xml':
svg_to_clipboard(self.fig)
else:
return
self.blink_figure() | Copy figure to clipboard. |
def p_try_statement_3(self, p):
"""try_statement : TRY block catch finally"""
p[0] = self.asttypes.Try(statements=p[2], catch=p[3], fin=p[4])
p[0].setpos(p) | try_statement : TRY block catch finally |
def update_particle(self, part, chi=0.729843788, c=2.05):
"""Constriction factor update particle method.
Notes
-----
Looks for a list of neighbours attached to a particle and
uses the particle's best position and that of the best
neighbour.
"""
neighbour_pool = [self.population[i] for i in part.neighbours]
best_neighbour = max(neighbour_pool, key=lambda x: x.best.fitness)
ce1 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce2 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce1_p = map(operator.mul, ce1, map(operator.sub, part.best, part))
ce2_g = map(operator.mul, ce2, map(
operator.sub, best_neighbour.best, part))
chi_list = [chi] * len(part)
chi_list2 = [1 - chi] * len(part)
a = map(operator.sub,
map(operator.mul, chi_list, map(operator.add, ce1_p, ce2_g)),
map(operator.mul, chi_list2, part.speed))
part.speed = list(map(operator.add, part.speed, a))
for i, speed in enumerate(part.speed):
if speed < part.smin:
part.speed[i] = part.smin
elif speed > part.smax:
part.speed[i] = part.smax
part[:] = list(map(operator.add, part, part.speed))
return | Constriction factor update particle method.
Notes
-----
Looks for a list of neighbours attached to a particle and
uses the particle's best position and that of the best
neighbour. |
def _represent_match_traversal(match_traversal):
"""Emit MATCH query code for an entire MATCH traversal sequence."""
output = []
output.append(_first_step_to_match(match_traversal[0]))
for step in match_traversal[1:]:
output.append(_subsequent_step_to_match(step))
return u''.join(output) | Emit MATCH query code for an entire MATCH traversal sequence. |
def _type_insert(self, handle, key, value):
'''
Insert the value into the series.
'''
if value!=0:
if isinstance(value,float):
handle.incrbyfloat(key, value)
else:
handle.incr(key,value) | Insert the value into the series. |
def get_sites(self):
"""
Returns a list of sites.
http://dev.wheniwork.com/#listing-sites
"""
url = "/2/sites"
data = self._get_resource(url)
sites = []
for entry in data['sites']:
sites.append(self.site_from_json(entry))
return sites | Returns a list of sites.
http://dev.wheniwork.com/#listing-sites |
def add_op_request_access_to_group(self, name, namespace=None,
permission=None, key_name=None,
object_prefix_permissions=None):
"""
Adds the requested permissions to the current service's Ceph key,
allowing the key to access only the specified pools or
object prefixes. object_prefix_permissions should be a dictionary
keyed on the permission with the corresponding value being a list
of prefixes to apply that permission to.
{
'rwx': ['prefix1', 'prefix2'],
'class-read': ['prefix3']}
"""
self.ops.append({
'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace,
'name': key_name or service_name(),
'group-permission': permission,
'object-prefix-permissions': object_prefix_permissions}) | Adds the requested permissions to the current service's Ceph key,
allowing the key to access only the specified pools or
object prefixes. object_prefix_permissions should be a dictionary
keyed on the permission with the corresponding value being a list
of prefixes to apply that permission to.
{
'rwx': ['prefix1', 'prefix2'],
'class-read': ['prefix3']} |
def perform_put(self, path, body, x_ms_version=None):
'''
Performs a PUT request and returns the response.
path:
Path to the resource.
Ex: '/<subscription-id>/services/hostedservices/<service-name>'
body:
Body for the PUT request.
x_ms_version:
If specified, this is used for the x-ms-version header.
Otherwise, self.x_ms_version is used.
'''
request = HTTPRequest()
request.method = 'PUT'
request.host = self.host
request.path = path
request.body = _get_request_body(body)
request.path, request.query = self._httpclient._update_request_uri_query(request)
request.headers = self._update_management_header(request, x_ms_version)
response = self._perform_request(request)
return response | Performs a PUT request and returns the response.
path:
Path to the resource.
Ex: '/<subscription-id>/services/hostedservices/<service-name>'
body:
Body for the PUT request.
x_ms_version:
If specified, this is used for the x-ms-version header.
Otherwise, self.x_ms_version is used. |
def catch_all(path):
"""Catch all path - return a JSON 404 """
return (dict(error='Invalid URL: /{}'.format(path),
links=dict(root='{}{}'.format(request.url_root, PREFIX[1:]))),
HTTPStatus.NOT_FOUND) | Catch all path - return a JSON 404 |
def uninstall(self, updates):
'''
Uninstall the updates passed in the updates collection. Load the updates
collection using the ``search`` or ``available`` functions.
.. note:: Starting with Windows 10 the Windows Update Agent is unable to
uninstall updates. An ``Uninstall Not Allowed`` error is returned. If
this error is encountered this function will instead attempt to use
``dism.exe`` to perform the uninstallation. ``dism.exe`` may fail to
to find the KB number for the package. In that case, removal will fail.
Args:
updates (Updates): An instance of the Updates class containing a
the updates to be uninstalled.
Returns:
dict: A dictionary containing the results of the uninstallation
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# uninstall KB3195454
updates = wua.search('KB3195454')
results = wua.uninstall(updates)
'''
# This doesn't work with the WUA API since Windows 10. It always returns
# "0x80240028 # Uninstall not allowed". The full message is: "The update
# could not be uninstalled because the request did not originate from a
# Windows Server Update Services (WSUS) server.
# Check for empty list
if updates.count() == 0:
ret = {'Success': False,
'Updates': 'Nothing to uninstall'}
return ret
installer = self._session.CreateUpdateInstaller()
self._session.ClientApplicationID = 'Salt: Install Update'
with salt.utils.winapi.Com():
uninstall_list = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
ret = {'Updates': {}}
# Check for updates that aren't already installed
for update in updates.updates:
# Define uid to keep the lines shorter
uid = update.Identity.UpdateID
ret['Updates'][uid] = {}
ret['Updates'][uid]['Title'] = update.Title
ret['Updates'][uid]['AlreadyUninstalled'] = \
not bool(update.IsInstalled)
# Make sure the update has actually been Uninstalled
if salt.utils.data.is_true(update.IsInstalled):
log.debug('To Be Uninstalled: %s', uid)
log.debug('\tTitle: %s', update.Title)
uninstall_list.Add(update)
# Check the install list
if uninstall_list.Count == 0:
ret = {'Success': False,
'Updates': 'Nothing to uninstall'}
return ret
# Send the list to the installer
installer.Updates = uninstall_list
# Uninstall the list
try:
log.debug('Uninstalling Updates')
result = installer.Uninstall()
except pywintypes.com_error as error:
# Something happened, return error or try using DISM
hr, msg, exc, arg = error.args # pylint: disable=W0633
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = 'Unknown Failure: {0}'.format(error)
# If "Uninstall Not Allowed" error, try using DISM
if exc[5] == -2145124312:
log.debug('Uninstall Failed with WUA, attempting with DISM')
try:
# Go through each update...
for item in uninstall_list:
# Look for the KB numbers
for kb in item.KBArticleIDs:
# Get the list of packages
cmd = ['dism', '/Online', '/Get-Packages']
pkg_list = self._run(cmd)[0].splitlines()
# Find the KB in the pkg_list
for item in pkg_list:
# Uninstall if found
if 'kb' + kb in item.lower():
pkg = item.split(' : ')[1]
ret['DismPackage'] = pkg
cmd = ['dism',
'/Online',
'/Remove-Package',
'/PackageName:{0}'.format(pkg),
'/Quiet',
'/NoRestart']
self._run(cmd)
except CommandExecutionError as exc:
log.debug('Uninstall using DISM failed')
log.debug('Command: %s', ' '.join(cmd))
log.debug('Error: %s', exc)
raise CommandExecutionError(
'Uninstall using DISM failed: {0}'.format(exc))
# DISM Uninstall Completed Successfully
log.debug('Uninstall Completed using DISM')
# Populate the return dictionary
ret['Success'] = True
ret['Message'] = 'Uninstalled using DISM'
ret['NeedsReboot'] = needs_reboot()
log.debug('NeedsReboot: %s', ret['NeedsReboot'])
# Refresh the Updates Table
self.refresh()
reboot = {0: 'Never Reboot',
1: 'Always Reboot',
2: 'Poss Reboot'}
# Check the status of each update
for update in self._updates:
uid = update.Identity.UpdateID
for item in uninstall_list:
if item.Identity.UpdateID == uid:
if not update.IsInstalled:
ret['Updates'][uid]['Result'] = \
'Uninstallation Succeeded'
else:
ret['Updates'][uid]['Result'] = \
'Uninstallation Failed'
ret['Updates'][uid]['RebootBehavior'] = \
reboot[update.InstallationBehavior.RebootBehavior]
return ret
# Found a differenct exception, Raise error
log.error('Uninstall Failed: %s', failure_code)
raise CommandExecutionError(failure_code)
# Lookup dictionary
result_code = {0: 'Uninstallation Not Started',
1: 'Uninstallation In Progress',
2: 'Uninstallation Succeeded',
3: 'Uninstallation Succeeded With Errors',
4: 'Uninstallation Failed',
5: 'Uninstallation Aborted'}
log.debug('Uninstall Complete')
log.debug(result_code[result.ResultCode])
ret['Message'] = result_code[result.ResultCode]
if result.ResultCode in [2, 3]:
ret['Success'] = True
ret['NeedsReboot'] = result.RebootRequired
log.debug('NeedsReboot: %s', result.RebootRequired)
else:
log.debug('Uninstall Failed')
ret['Success'] = False
reboot = {0: 'Never Reboot',
1: 'Always Reboot',
2: 'Poss Reboot'}
for i in range(uninstall_list.Count):
uid = uninstall_list.Item(i).Identity.UpdateID
ret['Updates'][uid]['Result'] = \
result_code[result.GetUpdateResult(i).ResultCode]
ret['Updates'][uid]['RebootBehavior'] = reboot[
uninstall_list.Item(i).InstallationBehavior.RebootBehavior]
return ret | Uninstall the updates passed in the updates collection. Load the updates
collection using the ``search`` or ``available`` functions.
.. note:: Starting with Windows 10 the Windows Update Agent is unable to
uninstall updates. An ``Uninstall Not Allowed`` error is returned. If
this error is encountered this function will instead attempt to use
``dism.exe`` to perform the uninstallation. ``dism.exe`` may fail to
to find the KB number for the package. In that case, removal will fail.
Args:
updates (Updates): An instance of the Updates class containing a
the updates to be uninstalled.
Returns:
dict: A dictionary containing the results of the uninstallation
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# uninstall KB3195454
updates = wua.search('KB3195454')
results = wua.uninstall(updates) |
def dialectfromstring(s):
"""
Attempts to convert a string representation of a CSV
dialect (as would be read from a file header, for instance)
into an actual csv.Dialect object.
"""
try:
AST = compiler.parse(s)
except SyntaxError:
return
else:
try:
if (len(AST.getChildren()) > 1):
ST = AST.getChildren()[1]
if isinstance(ST, Stmt):
if isinstance(ST.getChildren()[0], Discard):
d = ST.getChildren()[0].asList()[0]
except (TypeError,AttributeError):
pass
else:
if (isinstance(d,Dict) and (len(d.items) > 0)):
if all([isctype(i[0], str) for i in d.items]):
testd = csv.Sniffer().sniff('a,b,c')
if all([n.value in dir(testd) and
isctype(v, type(getattr(testd, n.value))) for (n,v) in
d.items]):
D = eval(s)
for n in D.keys():
setattr(testd, n, D[n])
return testd | Attempts to convert a string representation of a CSV
dialect (as would be read from a file header, for instance)
into an actual csv.Dialect object. |
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_evalframeex():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print 'Unable to find an older python frame'
else:
print 'Unable to find a newer python frame' | Move up or down the stack (for the py-up/py-down command) |
def _echo_setting(key):
"""Echo a setting to the CLI."""
value = getattr(settings, key)
secho('%s: ' % key, fg='magenta', bold=True, nl=False)
secho(
six.text_type(value),
bold=True,
fg='white' if isinstance(value, six.text_type) else 'cyan',
) | Echo a setting to the CLI. |
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
distance_scale=1):
"""Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
detector = Detector(detector_name)
# compute the waveform time series
hp, hc = ringdown_td_approximants[inj['approximant']](
inj, delta_t=delta_t, **self.extra_args)
hp._epoch += inj['tc']
hc._epoch += inj['tc']
if distance_scale != 1:
hp /= distance_scale
hc /= distance_scale
# compute the detector response and add it to the strain
signal = detector.project_wave(hp, hc,
inj['ra'], inj['dec'], inj['polarization'])
return signal | Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection. |
def _check_error(self, response, json_response=None):
''' Check for HTTP error code from the response, raise exception if there's any
Args:
response (object): Object returned by requests' `get` and `post`
methods
json_response (dict): JSON response, if applicable
Raises:
HTTPError: If the status code of response is either 4xx or 5xx
Returns:
True if status code is not error code
'''
# If status code is 4xx or 5xx, that should be an error
if response.status_code >= 400:
json_response = json_response or self._get_json_response(response)
err_cls = self._check_http_error_code(response.status_code)
try:
raise err_cls("%s error: %s" % (response.status_code, json_response["error"]["error_msg"]), response.status_code)
# This is to catch error when we post get oauth data
except TypeError:
raise err_cls("%s error: %s" % (response.status_code, json_response["error_description"]), response.status_code)
# Return True if everything is OK
return True | Check for HTTP error code from the response, raise exception if there's any
Args:
response (object): Object returned by requests' `get` and `post`
methods
json_response (dict): JSON response, if applicable
Raises:
HTTPError: If the status code of response is either 4xx or 5xx
Returns:
True if status code is not error code |
def select_catalogue(self, selector, distance=None):
'''
Selects the catalogue of earthquakes attributable to the source
:param selector:
Populated instance of openquake.hmtk.seismicity.selector.CatalogueSelector
class
:param float distance:
Distance (in km) to extend or contract (if negative) the zone for
selecting events
'''
if selector.catalogue.get_number_events() < 1:
raise ValueError('No events found in catalogue!')
self.catalogue = selector.within_polygon(self.geometry,
distance,
upper_depth=self.upper_depth,
lower_depth=self.lower_depth)
if self.catalogue.get_number_events() < 5:
# Throw a warning regarding the small number of earthquakes in
# the source!
warnings.warn('Source %s (%s) has fewer than 5 events'
% (self.id, self.name)) | Selects the catalogue of earthquakes attributable to the source
:param selector:
Populated instance of openquake.hmtk.seismicity.selector.CatalogueSelector
class
:param float distance:
Distance (in km) to extend or contract (if negative) the zone for
selecting events |
async def join(self, ctx, *, channel: discord.VoiceChannel):
"""Joins a voice channel"""
if ctx.voice_client is not None:
return await ctx.voice_client.move_to(channel)
await channel.connect() | Joins a voice channel |
def read_scanimage_metadata(fh):
"""Read ScanImage BigTIFF v3 static and ROI metadata from open file.
Return non-varying frame data as dict and ROI group data as JSON.
The settings can be used to read image data and metadata without parsing
the TIFF file.
Raise ValueError if file does not contain valid ScanImage v3 metadata.
"""
fh.seek(0)
try:
byteorder, version = struct.unpack('<2sH', fh.read(4))
if byteorder != b'II' or version != 43:
raise Exception
fh.seek(16)
magic, version, size0, size1 = struct.unpack('<IIII', fh.read(16))
if magic != 117637889 or version != 3:
raise Exception
except Exception:
raise ValueError('not a ScanImage BigTIFF v3 file')
frame_data = matlabstr2py(bytes2str(fh.read(size0)[:-1]))
roi_data = read_json(fh, '<', None, size1, None) if size1 > 1 else {}
return frame_data, roi_data | Read ScanImage BigTIFF v3 static and ROI metadata from open file.
Return non-varying frame data as dict and ROI group data as JSON.
The settings can be used to read image data and metadata without parsing
the TIFF file.
Raise ValueError if file does not contain valid ScanImage v3 metadata. |
def get_user(username):
'''
Get username line from switch
.. code-block: bash
salt '*' onyx.cmd get_user username=admin
'''
try:
enable()
configure_terminal()
cmd_out = sendline('show running-config | include "username {0} password 7"'.format(username))
cmd_out.split('\n')
user = cmd_out[1:-1]
configure_terminal_exit()
disable()
return user
except TerminalException as e:
log.error(e)
return 'Failed to get user' | Get username line from switch
.. code-block: bash
salt '*' onyx.cmd get_user username=admin |
async def entries_exists(self, url, urls=''):
"""
GET /api/entries/exists.{_format}
Check if an entry exist by url.
:param url string true An url Url to check if it exists
:param urls string false An array of urls
(?urls[]=http...&urls[]=http...) Urls (as an array)
to check if it exists
:return result
"""
params = {'access_token': self.token,
'url': url,
'urls': urls}
path = '/api/entries/exists.{ext}'.format(ext=self.format)
return await self.query(path, "get", **params) | GET /api/entries/exists.{_format}
Check if an entry exist by url.
:param url string true An url Url to check if it exists
:param urls string false An array of urls
(?urls[]=http...&urls[]=http...) Urls (as an array)
to check if it exists
:return result |
def recommend(self, userid, user_items,
N=10, filter_already_liked_items=True, filter_items=None, recalculate_user=False):
"""
Recommends items for a user
Calculates the N best recommendations for a user, and returns a list of itemids, score.
Parameters
----------
userid : int
The userid to calculate recommendations for
user_items : csr_matrix
A sparse matrix of shape (number_users, number_items). This lets us look
up the liked items and their weights for the user. This is used to filter out
items that have already been liked from the output, and to also potentially
calculate the best items for this user.
N : int, optional
The number of results to return
filter_items : sequence of ints, optional
List of extra item ids to filter out from the output
recalculate_user : bool, optional
When true, don't rely on stored user state and instead recalculate from the
passed in user_items
Returns
-------
list
List of (itemid, score) tuples
"""
pass | Recommends items for a user
Calculates the N best recommendations for a user, and returns a list of itemids, score.
Parameters
----------
userid : int
The userid to calculate recommendations for
user_items : csr_matrix
A sparse matrix of shape (number_users, number_items). This lets us look
up the liked items and their weights for the user. This is used to filter out
items that have already been liked from the output, and to also potentially
calculate the best items for this user.
N : int, optional
The number of results to return
filter_items : sequence of ints, optional
List of extra item ids to filter out from the output
recalculate_user : bool, optional
When true, don't rely on stored user state and instead recalculate from the
passed in user_items
Returns
-------
list
List of (itemid, score) tuples |
async def send_venue(self, latitude: base.Float, longitude: base.Float, title: base.String, address: base.String,
foursquare_id: typing.Union[base.String, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_markup=None,
reply=True) -> Message:
"""
Use this method to send information about a venue.
Source: https://core.telegram.org/bots/api#sendvenue
:param latitude: Latitude of the venue
:type latitude: :obj:`base.Float`
:param longitude: Longitude of the venue
:type longitude: :obj:`base.Float`
:param title: Name of the venue
:type title: :obj:`base.String`
:param address: Address of the venue
:type address: :obj:`base.String`
:param foursquare_id: Foursquare identifier of the venue
:type foursquare_id: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
warn_deprecated('"Message.send_venue" method will be removed in 2.2 version.\n'
'Use "Message.reply_venue" instead.',
stacklevel=8)
return await self.bot.send_venue(chat_id=self.chat.id,
latitude=latitude,
longitude=longitude,
title=title,
address=address,
foursquare_id=foursquare_id,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None,
reply_markup=reply_markup) | Use this method to send information about a venue.
Source: https://core.telegram.org/bots/api#sendvenue
:param latitude: Latitude of the venue
:type latitude: :obj:`base.Float`
:param longitude: Longitude of the venue
:type longitude: :obj:`base.Float`
:param title: Name of the venue
:type title: :obj:`base.String`
:param address: Address of the venue
:type address: :obj:`base.String`
:param foursquare_id: Foursquare identifier of the venue
:type foursquare_id: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message` |
def get_trunk_interfaces(auth, url, devid=None, devip=None):
"""Function takes devId as input to RESTFULL call to HP IMC platform
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param devid: str requires devid of the target device
:param devip: str of ipv4 address of the target device
:return: list of dictionaries where each element of the list represents an interface which
has been configured as a
VLAN trunk port
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vlanm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> trunk_interfaces = get_trunk_interfaces('10', auth.creds, auth.url)
>>> assert type(trunk_interfaces) is list
>>> assert len(trunk_interfaces[0]) == 3
>>> assert 'allowedVlans' in trunk_interfaces[0]
>>> assert 'ifIndex' in trunk_interfaces[0]
>>> assert 'pvid' in trunk_interfaces[0]
>>> get_trunk_interfaces('350', auth.creds, auth.url)
['No trunk inteface']
"""
if devip is not None:
devid = get_dev_details(devip, auth, url)['id']
get_trunk_interfaces_url = "/imcrs/vlan/trunk?devId=" + str(devid) + \
"&start=1&size=5000&total=false"
f_url = url + get_trunk_interfaces_url
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
dev_trunk_interfaces = (json.loads(response.text))
if len(dev_trunk_interfaces) == 2:
if isinstance(dev_trunk_interfaces['trunkIf'], list):
return dev_trunk_interfaces['trunkIf']
elif isinstance(dev_trunk_interfaces['trunkIf'], dict):
return [dev_trunk_interfaces['trunkIf']]
else:
dev_trunk_interfaces['trunkIf'] = ["No trunk inteface"]
return dev_trunk_interfaces['trunkIf']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + ' get_trunk_interfaces: An Error has occured' | Function takes devId as input to RESTFULL call to HP IMC platform
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param devid: str requires devid of the target device
:param devip: str of ipv4 address of the target device
:return: list of dictionaries where each element of the list represents an interface which
has been configured as a
VLAN trunk port
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vlanm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> trunk_interfaces = get_trunk_interfaces('10', auth.creds, auth.url)
>>> assert type(trunk_interfaces) is list
>>> assert len(trunk_interfaces[0]) == 3
>>> assert 'allowedVlans' in trunk_interfaces[0]
>>> assert 'ifIndex' in trunk_interfaces[0]
>>> assert 'pvid' in trunk_interfaces[0]
>>> get_trunk_interfaces('350', auth.creds, auth.url)
['No trunk inteface'] |
def get_timefactor(cls) -> float:
"""Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5
"""
try:
parfactor = hydpy.pub.timegrids.parfactor
except RuntimeError:
if not (cls.parameterstep and cls.simulationstep):
raise RuntimeError(
f'To calculate the conversion factor for adapting '
f'the values of the time-dependent parameters, '
f'you need to define both a parameter and a simulation '
f'time step size first.')
else:
date1 = timetools.Date('2000.01.01')
date2 = date1 + cls.simulationstep
parfactor = timetools.Timegrids(timetools.Timegrid(
date1, date2, cls.simulationstep)).parfactor
return parfactor(cls.parameterstep) | Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5 |
def run_application(component: Union[Component, Dict[str, Any]], *, event_loop_policy: str = None,
max_threads: int = None, logging: Union[Dict[str, Any], int, None] = INFO,
start_timeout: Union[int, float, None] = 10):
"""
Configure logging and start the given root component in the default asyncio event loop.
Assuming the root component was started successfully, the event loop will continue running
until the process is terminated.
Initializes the logging system first based on the value of ``logging``:
* If the value is a dictionary, it is passed to :func:`logging.config.dictConfig` as
argument.
* If the value is an integer, it is passed to :func:`logging.basicConfig` as the logging
level.
* If the value is ``None``, logging setup is skipped entirely.
By default, the logging system is initialized using :func:`~logging.basicConfig` using the
``INFO`` logging level.
The default executor in the event loop is replaced with a new
:class:`~concurrent.futures.ThreadPoolExecutor` where the maximum number of threads is set to
the value of ``max_threads`` or, if omitted, the default value of
:class:`~concurrent.futures.ThreadPoolExecutor`.
:param component: the root component (either a component instance or a configuration dictionary
where the special ``type`` key is either a component class or a ``module:varname``
reference to one)
:param event_loop_policy: entry point name (from the ``asphalt.core.event_loop_policies``
namespace) of an alternate event loop policy (or a module:varname reference to one)
:param max_threads: the maximum number of worker threads in the default thread pool executor
(the default value depends on the event loop implementation)
:param logging: a logging configuration dictionary, :ref:`logging level <python:levels>` or
``None``
:param start_timeout: seconds to wait for the root component (and its subcomponents) to start
up before giving up (``None`` = wait forever)
"""
assert check_argument_types()
# Configure the logging system
if isinstance(logging, dict):
dictConfig(logging)
elif isinstance(logging, int):
basicConfig(level=logging)
# Inform the user whether -O or PYTHONOPTIMIZE was set when Python was launched
logger = getLogger(__name__)
logger.info('Running in %s mode', 'development' if __debug__ else 'production')
# Switch to an alternate event loop policy if one was provided
if event_loop_policy:
create_policy = policies.resolve(event_loop_policy)
policy = create_policy()
asyncio.set_event_loop_policy(policy)
logger.info('Switched event loop policy to %s', qualified_name(policy))
# Assign a new default executor with the given max worker thread limit if one was provided
event_loop = asyncio.get_event_loop()
if max_threads is not None:
event_loop.set_default_executor(ThreadPoolExecutor(max_threads))
logger.info('Installed a new thread pool executor with max_workers=%d', max_threads)
# Instantiate the root component if a dict was given
if isinstance(component, dict):
component = cast(Component, component_types.create_object(**component))
logger.info('Starting application')
context = Context()
exception = None # type: Optional[BaseException]
exit_code = 0
# Start the root component
try:
coro = asyncio.wait_for(component.start(context), start_timeout, loop=event_loop)
event_loop.run_until_complete(coro)
except asyncio.TimeoutError as e:
exception = e
logger.error('Timeout waiting for the root component to start')
exit_code = 1
except Exception as e:
exception = e
logger.exception('Error during application startup')
exit_code = 1
else:
logger.info('Application started')
# Add a signal handler to gracefully deal with SIGTERM
try:
event_loop.add_signal_handler(signal.SIGTERM, sigterm_handler, logger, event_loop)
except NotImplementedError:
pass # Windows does not support signals very well
# Finally, run the event loop until the process is terminated or Ctrl+C is pressed
try:
event_loop.run_forever()
except KeyboardInterrupt:
pass
except SystemExit as e:
exit_code = e.code
# Close the root context
logger.info('Stopping application')
event_loop.run_until_complete(context.close(exception))
# Shut down leftover async generators (requires Python 3.6+)
try:
event_loop.run_until_complete(event_loop.shutdown_asyncgens())
except (AttributeError, NotImplementedError):
pass
# Finally, close the event loop itself
event_loop.close()
logger.info('Application stopped')
# Shut down the logging system
shutdown()
if exit_code:
sys.exit(exit_code) | Configure logging and start the given root component in the default asyncio event loop.
Assuming the root component was started successfully, the event loop will continue running
until the process is terminated.
Initializes the logging system first based on the value of ``logging``:
* If the value is a dictionary, it is passed to :func:`logging.config.dictConfig` as
argument.
* If the value is an integer, it is passed to :func:`logging.basicConfig` as the logging
level.
* If the value is ``None``, logging setup is skipped entirely.
By default, the logging system is initialized using :func:`~logging.basicConfig` using the
``INFO`` logging level.
The default executor in the event loop is replaced with a new
:class:`~concurrent.futures.ThreadPoolExecutor` where the maximum number of threads is set to
the value of ``max_threads`` or, if omitted, the default value of
:class:`~concurrent.futures.ThreadPoolExecutor`.
:param component: the root component (either a component instance or a configuration dictionary
where the special ``type`` key is either a component class or a ``module:varname``
reference to one)
:param event_loop_policy: entry point name (from the ``asphalt.core.event_loop_policies``
namespace) of an alternate event loop policy (or a module:varname reference to one)
:param max_threads: the maximum number of worker threads in the default thread pool executor
(the default value depends on the event loop implementation)
:param logging: a logging configuration dictionary, :ref:`logging level <python:levels>` or
``None``
:param start_timeout: seconds to wait for the root component (and its subcomponents) to start
up before giving up (``None`` = wait forever) |
def read(cls, f):
"""Read header from file. Headers end with length and then 1 blank line."""
url = None
line = f.readline()
if not line:
# EOF
return None
while not line.startswith(cls.LENGTH_HEADER):
if line.startswith(cls.URI_HEADER):
url = line[len(cls.URI_HEADER):].strip()
line = f.readline()
# Consume empty separator
f.readline()
# Read content
length = int(line.split(':')[1])
return cls(url, length) | Read header from file. Headers end with length and then 1 blank line. |
def import_task_to_graph(diagram_graph, process_id, process_attributes, task_element):
"""
Adds to graph the new element that represents BPMN task.
In our representation tasks have only basic attributes and elements, inherited from Activity type,
so this method only needs to call add_flownode_to_graph.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param task_element: object representing a BPMN XML 'task' element.
"""
BpmnDiagramGraphImport.import_activity_to_graph(diagram_graph, process_id, process_attributes, task_element) | Adds to graph the new element that represents BPMN task.
In our representation tasks have only basic attributes and elements, inherited from Activity type,
so this method only needs to call add_flownode_to_graph.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param task_element: object representing a BPMN XML 'task' element. |
def _map_filtered_clusters_to_full_clusters(self,
clusters,
filter_map):
"""
Input: clusters, a list of cluster lists
filter_map, the seq_id in each clusters
is the key to the filter_map
containing all seq_ids with
duplicate FASTA sequences
Output: an extended list of cluster lists
"""
results = []
for cluster in clusters:
full_cluster = []
for seq_id in cluster:
full_cluster += filter_map[seq_id]
results.append(full_cluster)
return results | Input: clusters, a list of cluster lists
filter_map, the seq_id in each clusters
is the key to the filter_map
containing all seq_ids with
duplicate FASTA sequences
Output: an extended list of cluster lists |
def Nu_plate_Muley_Manglik(Re, Pr, chevron_angle, plate_enlargement_factor):
r'''Calculates Nusselt number for single-phase flow in a
Chevron-style plate heat exchanger according to [1]_, also shown in [2]_
and [3]_.
.. math::
Nu = [0.2668 - 0.006967(\beta) + 7.244\times 10^{-5}(\beta)^2]
\times[20.7803 - 50.9372\phi + 41.1585\phi^2 - 10.1507\phi^3]
\times Re^{[0.728 + 0.0543\sin[(2\pi\beta/90) + 3.7]]} Pr^{1/3}
Parameters
----------
Re : float
Reynolds number with respect to the hydraulic diameter of the channels,
[-]
Pr : float
Prandtl number calculated with bulk fluid properties, [-]
chevron_angle : float
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. Many plate exchangers use two alternating patterns; use their
average angle for that situation [degrees]
plate_enlargement_factor : float
The extra surface area multiplier as compared to a flat plate
caused the corrugations, [-]
Returns
-------
Nu : float
Nusselt number with respect to `Dh`, [-]
Notes
-----
The correlation as presented in [1]_ suffers from a typo, with a
coefficient of 10.51 instead of 10.15. Several more decimal places were
published along with the corrected typo in [2]_. This has a *very large*
difference if not implemented.
The viscosity correction power is recommended to be the blanket
Sieder and Tate (1936) value of 0.14.
The correlation is recommended in the range of Reynolds numbers above
1000, chevron angles between 30 and 60 degrees, and enlargement factors
from 1 to 1.5. Due to its cubic nature it is not likely to give good
results if the chevron angle or enlargement factors are out of those
ranges.
Examples
--------
>>> Nu_plate_Muley_Manglik(Re=2000, Pr=.7, chevron_angle=45,
... plate_enlargement_factor=1.18)
36.49087100602062
References
----------
.. [1] Muley, A., and R. M. Manglik. "Experimental Study of Turbulent Flow
Heat Transfer and Pressure Drop in a Plate Heat Exchanger With Chevron
Plates." Journal of Heat Transfer 121, no. 1 (February 1, 1999): 110-17.
doi:10.1115/1.2825923.
.. [2] Palm, Björn, and Joachim Claesson. "Plate Heat Exchangers:
Calculation Methods for Single- and Two-Phase Flow (Keynote)," January
1, 2005, 103-13. https://doi.org/10.1115/ICMM2005-75092.
'''
beta, phi = chevron_angle, plate_enlargement_factor
t1 = (0.2668 - 0.006967*beta + 7.244E-5*beta**2)
#t2 = (20.78 - 50.94*phi + 41.16*phi**2 - 10.51*phi**3)
# It was the extra decimals which were needed
t2 = (20.7803 - 50.9372*phi + 41.1585*phi**2 - 10.1507*phi**3)
t3 = (0.728 + 0.0543*sin((2*pi*beta/90) + 3.7))
return t1*t2*Re**t3*Pr**(1/3.) | r'''Calculates Nusselt number for single-phase flow in a
Chevron-style plate heat exchanger according to [1]_, also shown in [2]_
and [3]_.
.. math::
Nu = [0.2668 - 0.006967(\beta) + 7.244\times 10^{-5}(\beta)^2]
\times[20.7803 - 50.9372\phi + 41.1585\phi^2 - 10.1507\phi^3]
\times Re^{[0.728 + 0.0543\sin[(2\pi\beta/90) + 3.7]]} Pr^{1/3}
Parameters
----------
Re : float
Reynolds number with respect to the hydraulic diameter of the channels,
[-]
Pr : float
Prandtl number calculated with bulk fluid properties, [-]
chevron_angle : float
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. Many plate exchangers use two alternating patterns; use their
average angle for that situation [degrees]
plate_enlargement_factor : float
The extra surface area multiplier as compared to a flat plate
caused the corrugations, [-]
Returns
-------
Nu : float
Nusselt number with respect to `Dh`, [-]
Notes
-----
The correlation as presented in [1]_ suffers from a typo, with a
coefficient of 10.51 instead of 10.15. Several more decimal places were
published along with the corrected typo in [2]_. This has a *very large*
difference if not implemented.
The viscosity correction power is recommended to be the blanket
Sieder and Tate (1936) value of 0.14.
The correlation is recommended in the range of Reynolds numbers above
1000, chevron angles between 30 and 60 degrees, and enlargement factors
from 1 to 1.5. Due to its cubic nature it is not likely to give good
results if the chevron angle or enlargement factors are out of those
ranges.
Examples
--------
>>> Nu_plate_Muley_Manglik(Re=2000, Pr=.7, chevron_angle=45,
... plate_enlargement_factor=1.18)
36.49087100602062
References
----------
.. [1] Muley, A., and R. M. Manglik. "Experimental Study of Turbulent Flow
Heat Transfer and Pressure Drop in a Plate Heat Exchanger With Chevron
Plates." Journal of Heat Transfer 121, no. 1 (February 1, 1999): 110-17.
doi:10.1115/1.2825923.
.. [2] Palm, Björn, and Joachim Claesson. "Plate Heat Exchangers:
Calculation Methods for Single- and Two-Phase Flow (Keynote)," January
1, 2005, 103-13. https://doi.org/10.1115/ICMM2005-75092. |
def __load_file(self, key_list) -> str:
""" Load a translator file """
file = str(key_list[0]) + self.extension
key_list.pop(0)
file_path = os.path.join(self.path, file)
if os.path.exists(file_path):
return Json.from_file(file_path)
else:
raise FileNotFoundError(file_path) | Load a translator file |
def params_as_tensors_for(*objs, convert=True):
"""
Context manager which changes the representation of parameters and data holders
for the specific parameterized object(s).
This can also be used to turn off tensor conversion functions wrapped with
`params_as_tensors`:
```
@gpflow.params_as_tensors
def compute_something(self): # self is parameterized object.
s = tf.reduce_sum(self.a) # self.a is a parameter.
with params_as_tensors_for(self, convert=False):
b = self.c.constrained_tensor
return s + b
```
:param objs: one or more instances of classes deriving from Parameterized
:param convert: Flag which is used for turning tensor convertion
feature on, `True`, or turning it off, `False`.
"""
objs = set(objs) # remove duplicate objects so the tensor mode won't be changed before saving
prev_values = [_params_as_tensors_enter(o, convert) for o in objs]
try:
yield
finally:
for o, pv in reversed(list(zip(objs, prev_values))):
_params_as_tensors_exit(o, pv) | Context manager which changes the representation of parameters and data holders
for the specific parameterized object(s).
This can also be used to turn off tensor conversion functions wrapped with
`params_as_tensors`:
```
@gpflow.params_as_tensors
def compute_something(self): # self is parameterized object.
s = tf.reduce_sum(self.a) # self.a is a parameter.
with params_as_tensors_for(self, convert=False):
b = self.c.constrained_tensor
return s + b
```
:param objs: one or more instances of classes deriving from Parameterized
:param convert: Flag which is used for turning tensor convertion
feature on, `True`, or turning it off, `False`. |
def _validate_calibration_params(strategy='accuracy', min_rate=None,
beta=1.):
"""Ensure that calibration parameters have allowed values"""
if strategy not in ('accuracy', 'f_beta', 'max_tpr',
'max_tnr'):
raise ValueError('Strategy can either be "accuracy", "f_beta" or '
'"max_tpr" or "max_tnr". Got "{}" instead.'
.format(strategy))
if strategy == 'max_tpr' or strategy == 'max_tnr':
if (min_rate is None or not isinstance(min_rate, (int, float)) or
not min_rate >= 0 or not min_rate <= 1):
raise ValueError('Parameter min_rate must be a number in'
'[0, 1]. '
'Got {} instead.'.format(min_rate))
if strategy == 'f_beta':
if beta is None or not isinstance(beta, (int, float)):
raise ValueError('Parameter beta must be a real number. '
'Got {} instead.'.format(type(beta))) | Ensure that calibration parameters have allowed values |
def verifydropdown(self, window_name, object_name):
"""
Verify drop down list / menu poped up
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success 0 on failure.
@rtype: integer
"""
try:
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled or not object_handle.AXChildren:
return 0
# Get AXMenu
children = object_handle.AXChildren[0]
if children:
return 1
except LdtpServerException:
pass
return 0 | Verify drop down list / menu poped up
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success 0 on failure.
@rtype: integer |
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> gargoyle.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
self._registry.pop(condition_set.get_id(), None) | Unregisters a condition set with the manager.
>>> gargoyle.unregister(condition_set) #doctest: +SKIP |
def get_event_consumer(config, success_channel, error_channel, metrics,
**kwargs):
"""Get a GPSEventConsumer client.
A factory function that validates configuration, creates schema
validator and parser clients, creates an auth and a pubsub client,
and returns an event consumer (:interface:`gordon.interfaces.
IRunnable` and :interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
success_channel (asyncio.Queue): Queue to place a successfully
consumed message to be further handled by the ``gordon``
core system.
error_channel (asyncio.Queue): Queue to place a message met
with errors to be further handled by the ``gordon`` core
system.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
event consumer.
Returns:
A :class:`GPSEventConsumer` instance.
"""
builder = event_consumer.GPSEventConsumerBuilder(
config, success_channel, error_channel, metrics, **kwargs)
return builder.build_event_consumer() | Get a GPSEventConsumer client.
A factory function that validates configuration, creates schema
validator and parser clients, creates an auth and a pubsub client,
and returns an event consumer (:interface:`gordon.interfaces.
IRunnable` and :interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
success_channel (asyncio.Queue): Queue to place a successfully
consumed message to be further handled by the ``gordon``
core system.
error_channel (asyncio.Queue): Queue to place a message met
with errors to be further handled by the ``gordon`` core
system.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
event consumer.
Returns:
A :class:`GPSEventConsumer` instance. |
def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
args = parser.parse_args()
if args.width <= 0:
raise argparse.ArgumentError(args.width, 'The contour width must be a positive number.')
return args | Provides additional validation of the arguments collected by argparse. |
def adjust_weights_discrepancy(self, resfile=None,original_ceiling=True):
"""adjusts the weights of each non-zero weight observation based
on the residual in the pest residual file so each observations contribution
to phi is 1.0
Parameters
----------
resfile : str
residual file name. If None, try to use a residual file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True
"""
if resfile is not None:
self.resfile = resfile
self.__res = None
obs = self.observation_data.loc[self.nnz_obs_names,:]
swr = (self.res.loc[self.nnz_obs_names,:].residual * obs.weight)**2
factors = (1.0/swr).apply(np.sqrt)
if original_ceiling:
factors = factors.apply(lambda x: 1.0 if x > 1.0 else x)
self.observation_data.loc[self.nnz_obs_names,"weight"] *= factors | adjusts the weights of each non-zero weight observation based
on the residual in the pest residual file so each observations contribution
to phi is 1.0
Parameters
----------
resfile : str
residual file name. If None, try to use a residual file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True |
def float_greater_or_equal(threshold: float) -> Callable:
"""
Returns a method that can be used in argument parsing to check that the float argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse.
"""
def check_greater_equal(value: str):
value_to_check = float(value)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %f." % threshold)
return value_to_check
return check_greater_equal | Returns a method that can be used in argument parsing to check that the float argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse. |
def mergecn(args):
"""
%prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another.
"""
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
# Use the higher mean coverage componen
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid)) | %prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another. |
def mechanism(self):
"""tuple[int]: The nodes of the mechanism in the partition."""
return tuple(sorted(
chain.from_iterable(part.mechanism for part in self))) | tuple[int]: The nodes of the mechanism in the partition. |
def notify(title,
message,
api_key=NTFY_API_KEY,
provider_key=None,
priority=0,
url=None,
retcode=None):
"""
Optional parameters:
* ``api_key`` - use your own application token
* ``provider_key`` - if you are whitelisted
* ``priority``
* ``url``
"""
data = {
'apikey': api_key,
'application': 'ntfy',
'event': title,
'description': message,
}
if MIN_PRIORITY <= priority <= MAX_PRIORITY:
data['priority'] = priority
else:
raise ValueError('priority must be an integer from {:d} to {:d}'
.format(MIN_PRIORITY, MAX_PRIORITY))
if url is not None:
data['url'] = url
if provider_key is not None:
data['providerkey'] = provider_key
resp = requests.post(
API_URL, data=data, headers={
'User-Agent': USER_AGENT,
})
resp.raise_for_status() | Optional parameters:
* ``api_key`` - use your own application token
* ``provider_key`` - if you are whitelisted
* ``priority``
* ``url`` |
def load(self):
"""
Loads updated attributues for a LoadBalancer object.
Requires self.id to be set.
"""
data = self.get_data('load_balancers/%s' % self.id, type=GET)
load_balancer = data['load_balancer']
# Setting the attribute values
for attr in load_balancer.keys():
if attr == 'health_check':
health_check = HealthCheck(**load_balancer['health_check'])
setattr(self, attr, health_check)
elif attr == 'sticky_sessions':
sticky_ses = StickySesions(**load_balancer['sticky_sessions'])
setattr(self, attr, sticky_ses)
elif attr == 'forwarding_rules':
rules = list()
for rule in load_balancer['forwarding_rules']:
rules.append(ForwardingRule(**rule))
setattr(self, attr, rules)
else:
setattr(self, attr, load_balancer[attr])
return self | Loads updated attributues for a LoadBalancer object.
Requires self.id to be set. |
def verifySignature(ecPublicSigningKey, message, signature):
"""
:type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray
"""
if ecPublicSigningKey.getType() == Curve.DJB_TYPE:
result = _curve.verifySignature(ecPublicSigningKey.getPublicKey(), message, signature)
return result == 0
else:
raise InvalidKeyException("Unknown type: %s" % ecPublicSigningKey.getType()) | :type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray |
def get_login_theme():
"""Load a custom login theme (e.g. snow)"""
today = datetime.now().date()
if today.month == 12 or today.month == 1:
# Snow
return {"js": "themes/snow/snow.js", "css": "themes/snow/snow.css"}
if today.month == 3 and (14 <= today.day <= 16):
return {"js": "themes/piday/piday.js", "css": "themes/piday/piday.css"}
return {} | Load a custom login theme (e.g. snow) |
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with tf.device(gv[0][0].device):
with tf.name_scope("unpack"):
splits = tf.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv | Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction. |
def compile(self, module):
'''compile
High-level api: Compile a module.
Parameters
----------
module : `str`
Module name that is inquired about.
Returns
-------
Model
A Model object.
'''
imports, depends = self.get_dependencies(module)
file_list = list(imports | depends) + [module]
cmd_list = ['pyang', '-f', 'cxml', '--plugindir', self.pyang_plugins]
cmd_list += ['-p', self.dir_yang]
cmd_list += [self.dir_yang + '/' + f + '.yang' for f in file_list]
logger.info('Compiling {}.yang: {}'.format(module,
' '.join(cmd_list)))
p = Popen(' '.join(cmd_list), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
logger.info('pyang return code is {}'.format(p.returncode))
if p.returncode == 0:
logger.debug(stderr.decode())
else:
logger.error(stderr.decode())
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.XML(stdout.decode(), parser)
return Model(tree) | compile
High-level api: Compile a module.
Parameters
----------
module : `str`
Module name that is inquired about.
Returns
-------
Model
A Model object. |
def get_new(mserver_url, token, board):
'''get node sn and key'''
thread = termui.waiting_echo("Getting message from Server...")
thread.daemon = True
thread.start()
try:
params = {"name":"node000", "board":board, "access_token":token}
r = requests.post("%s%s" %(mserver_url, nodes_create_endpoint), params=params, timeout=10, verify=verify)
r.raise_for_status()
json_response = r.json()
except requests.exceptions.HTTPError as e:
thread.stop('')
thread.join()
if r.status_code == 400:
error = r.json().get("error", None)
click.secho(">> %s" %error, fg='red')
else:
click.secho(">> %s" %e, fg='red')
return None
except Exception as e:
thread.stop('')
thread.join()
click.secho(">> %s" %e, fg='red')
return None
thread.stop('')
thread.join()
return json_response | get node sn and key |
def _select_Generic_superclass_parameters(subclass, superclass_origin):
"""Helper for _issubclass_Generic.
"""
subclass = _find_base_with_origin(subclass, superclass_origin)
if subclass is None:
return None
if subclass.__origin__ is superclass_origin:
return subclass.__args__
prms = _find_Generic_super_origin(subclass, superclass_origin)
res = []
for prm in prms:
sub_search = subclass
while not sub_search is None:
try:
res.append(sub_search.__args__[sub_search.__origin__.__parameters__.index(prm)])
break
except ValueError:
# We search the closest base that actually contains the parameter
sub_search = _find_base_with_origin(
sub_search.__origin__, superclass_origin)
else:
return None
return res | Helper for _issubclass_Generic. |
def _update_centers(X, membs, n_clusters):
""" Update Cluster Centers:
calculate the mean of feature vectors for each cluster
"""
centers = np.empty(shape=(n_clusters, X.shape[1]), dtype=float)
sse = np.empty(shape=n_clusters, dtype=float)
for clust_id in range(n_clusters):
memb_ids = np.where(membs == clust_id)[0]
if memb_ids.shape[0] == 0:
memb_ids = np.random.choice(X.shape[0], size=1)
#print("Empty cluster replaced with ", memb_ids)
centers[clust_id,:] = np.mean(X[memb_ids,:], axis=0)
sse[clust_id] = _cal_dist2center(X[memb_ids,:], centers[clust_id,:])
return(centers, sse) | Update Cluster Centers:
calculate the mean of feature vectors for each cluster |
def _download_query(self, as_of):
"""Formulate the specific query needed for download
Not intended to be called by developers directly.
:param as_of: Date in 'YYYYMMDD' format
:type as_of: string
"""
c = self.institution.client()
q = c.bank_account_query(
number=self.number,
date=as_of,
account_type=self.account_type,
bank_id=self.routing_number)
return q | Formulate the specific query needed for download
Not intended to be called by developers directly.
:param as_of: Date in 'YYYYMMDD' format
:type as_of: string |
def spec(self) -> list:
"""Returns prefix unary operators list.
Sets only one regex for all items in the dict."""
spec = [item
for op, pat in self.ops.items()
for item in [('{' + op, {'pat': pat, 'postf': self.postf, 'regex': None}),
('˱' + op, {'pat': pat, 'postf': self.postf, 'regex': None})]
]
spec[0][1]['regex'] = self.regex_pat.format(_ops_regex(self.ops.keys()))
return spec | Returns prefix unary operators list.
Sets only one regex for all items in the dict. |
async def ListModels(self, tag):
'''
tag : str
Returns -> typing.Sequence[~UserModel]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='ModelManager',
request='ListModels',
version=5,
params=_params)
_params['tag'] = tag
reply = await self.rpc(msg)
return reply | tag : str
Returns -> typing.Sequence[~UserModel] |
def mt_fields(fields, nomaster=False, onlydefaultlang=False):
"""
Returns list of fields for multilanguage fields of model.
Examples:
print(mt_fields('name', 'desc'))
['name', 'name_en', 'name_uk', 'desc', 'desc_en', 'desc_uk']
MyModel.objects.only(*mt_fields('name', 'desc', 'content'))
If nomaster then master field will not be append.
F.e.: ['name_en', 'name_uk'] -- without master 'name'.
If onlydefaultlang then wiil be search only default language:
F.e.: ['name', 'name_en'] -- without additional 'name_uk'.
If nomaster and onlydefaultlang then will be use both rulses.
F.e.: ['name_en'] -- without master 'name' and additional 'name_uk'.
"""
assert isinstance(fields, (list, tuple))
fl = []
for field in fields:
if not nomaster:
fl.append(field)
if onlydefaultlang:
fl.append('{}_{}'.format(field, DEFAULT_LANGUAGE))
else:
for lang in AVAILABLE_LANGUAGES:
fl.append('{}_{}'.format(field, lang))
return fl | Returns list of fields for multilanguage fields of model.
Examples:
print(mt_fields('name', 'desc'))
['name', 'name_en', 'name_uk', 'desc', 'desc_en', 'desc_uk']
MyModel.objects.only(*mt_fields('name', 'desc', 'content'))
If nomaster then master field will not be append.
F.e.: ['name_en', 'name_uk'] -- without master 'name'.
If onlydefaultlang then wiil be search only default language:
F.e.: ['name', 'name_en'] -- without additional 'name_uk'.
If nomaster and onlydefaultlang then will be use both rulses.
F.e.: ['name_en'] -- without master 'name' and additional 'name_uk'. |
def generate_lines(input_file,
start=0,
stop=float('inf')):
"""Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time"""
with gzip.GzipFile(input_file, 'rU') as f:
for i, line in enumerate(f):
if i < start:
continue
if i >= stop:
break
yield line.rstrip() | Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time |
def check_site_enabled(site):
'''
Checks to see if the specific site symlink is in /etc/apache2/sites-enabled.
This will only be functional on Debian-based operating systems (Ubuntu,
Mint, etc).
CLI Examples:
.. code-block:: bash
salt '*' apache.check_site_enabled example.com
salt '*' apache.check_site_enabled example.com.conf
'''
if site.endswith('.conf'):
site_file = site
else:
site_file = '{0}.conf'.format(site)
if os.path.islink('{0}/{1}'.format(SITE_ENABLED_DIR, site_file)):
return True
elif site == 'default' and \
os.path.islink('{0}/000-{1}'.format(SITE_ENABLED_DIR, site_file)):
return True
else:
return False | Checks to see if the specific site symlink is in /etc/apache2/sites-enabled.
This will only be functional on Debian-based operating systems (Ubuntu,
Mint, etc).
CLI Examples:
.. code-block:: bash
salt '*' apache.check_site_enabled example.com
salt '*' apache.check_site_enabled example.com.conf |
def parse_argument(string: str) -> Union[str, Tuple[str, str]]:
"""Return a single value for a string understood as a positional
argument or a |tuple| containing a keyword and its value for a
string understood as a keyword argument.
|parse_argument| is intended to be used as a helper function for
function |execute_scriptfunction| only. See the following
examples to see which types of keyword arguments |execute_scriptfunction|
covers:
>>> from hydpy.exe.commandtools import parse_argument
>>> parse_argument('x=3')
('x', '3')
>>> parse_argument('"x=3"')
'"x=3"'
>>> parse_argument("'x=3'")
"'x=3'"
>>> parse_argument('x="3==3"')
('x', '"3==3"')
>>> parse_argument("x='3==3'")
('x', "'3==3'")
"""
idx_equal = string.find('=')
if idx_equal == -1:
return string
idx_quote = idx_equal+1
for quote in ('"', "'"):
idx = string.find(quote)
if -1 < idx < idx_quote:
idx_quote = idx
if idx_equal < idx_quote:
return string[:idx_equal], string[idx_equal+1:]
return string | Return a single value for a string understood as a positional
argument or a |tuple| containing a keyword and its value for a
string understood as a keyword argument.
|parse_argument| is intended to be used as a helper function for
function |execute_scriptfunction| only. See the following
examples to see which types of keyword arguments |execute_scriptfunction|
covers:
>>> from hydpy.exe.commandtools import parse_argument
>>> parse_argument('x=3')
('x', '3')
>>> parse_argument('"x=3"')
'"x=3"'
>>> parse_argument("'x=3'")
"'x=3'"
>>> parse_argument('x="3==3"')
('x', '"3==3"')
>>> parse_argument("x='3==3'")
('x', "'3==3'") |
def query_nexus(query_url, timeout_sec, basic_auth=None):
"""Queries Nexus for an artifact
:param query_url: (str) Query URL
:param timeout_sec: (int) query timeout
:param basic_auth (HTTPBasicAuth) object or none
:return: requests.Response object
:raises: RuntimeError
"""
log = logging.getLogger(mod_logger + '.query_nexus')
# Attempt to query Nexus
retry_sec = 5
max_retries = 6
try_num = 1
query_success = False
nexus_response = None
while try_num <= max_retries:
if query_success:
break
log.debug('Attempt # {n} of {m} to query the Nexus URL: {u}'.format(n=try_num, u=query_url, m=max_retries))
try:
nexus_response = requests.get(query_url, auth=basic_auth, stream=True, timeout=timeout_sec)
except requests.exceptions.Timeout:
_, ex, trace = sys.exc_info()
msg = '{n}: Nexus initial query timed out after {t} seconds:\n{e}'.format(
n=ex.__class__.__name__, t=timeout_sec, r=retry_sec, e=str(ex))
log.warn(msg)
if try_num < max_retries:
log.info('Retrying query in {t} sec...'.format(t=retry_sec))
time.sleep(retry_sec)
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
_, ex, trace = sys.exc_info()
msg = '{n}: Nexus initial query failed with the following exception:\n{e}'.format(
n=ex.__class__.__name__, r=retry_sec, e=str(ex))
log.warn(msg)
if try_num < max_retries:
log.info('Retrying query in {t} sec...'.format(t=retry_sec))
time.sleep(retry_sec)
else:
query_success = True
try_num += 1
if not query_success:
msg = 'Unable to query Nexus after {m} attempts using URL: {u}'.format(
u=query_url, m=max_retries)
log.error(msg)
raise RuntimeError(msg)
if nexus_response.status_code != 200:
msg = 'Nexus request returned code {c}, unable to query Nexus using URL: {u}'.format(
u=query_url, c=nexus_response.status_code)
log.error(msg)
raise RuntimeError(msg)
return nexus_response | Queries Nexus for an artifact
:param query_url: (str) Query URL
:param timeout_sec: (int) query timeout
:param basic_auth (HTTPBasicAuth) object or none
:return: requests.Response object
:raises: RuntimeError |
def p2pkh_input_and_witness(outpoint, sig, pubkey, sequence=0xFFFFFFFE):
'''
OutPoint, hex_string, hex_string, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts
'''
stack_script = '{sig} {pk}'.format(sig=sig, pk=pubkey)
return tb.make_legacy_input_and_empty_witness(
outpoint=outpoint,
stack_script=script_ser.serialize(stack_script),
redeem_script=b'',
sequence=sequence) | OutPoint, hex_string, hex_string, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts |
def send_invite_email(application, link, is_secret):
""" Sends an email inviting someone to create an account"""
if not application.applicant.email:
return
context = CONTEXT.copy()
context['receiver'] = application.applicant
context['application'] = application
context['link'] = link
context['is_secret'] = is_secret
to_email = application.applicant.email
subject, body = render_email('common_invite', context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email]) | Sends an email inviting someone to create an account |
def map_entity(self, entity: dal.Price) -> PriceModel:
""" Map the price entity """
if not entity:
return None
result = PriceModel()
result.currency = entity.currency
# date/time
dt_string = entity.date
format_string = "%Y-%m-%d"
if entity.time:
dt_string += f"T{entity.time}"
format_string += "T%H:%M:%S"
price_datetime = datetime.strptime(dt_string, format_string)
result.datum = Datum()
result.datum.from_datetime(price_datetime)
assert isinstance(result.datum, Datum)
#result.namespace = entity.namespace
#result.symbol = entity.symbol
result.symbol = SecuritySymbol(entity.namespace, entity.symbol)
# Value
value = Decimal(entity.value) / Decimal(entity.denom)
result.value = Decimal(value)
return result | Map the price entity |
def replace_between_tags(text, repl_, start_tag, end_tag=None):
r"""
Replaces text between sentinal lines in a block of text.
Args:
text (str):
repl_ (str):
start_tag (str):
end_tag (str): (default=None)
Returns:
str: new_text
CommandLine:
python -m utool.util_str --exec-replace_between_tags
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> text = ut.codeblock(
'''
class:
# <FOO>
bar
# </FOO>
baz
''')
>>> repl_ = 'spam'
>>> start_tag = '# <FOO>'
>>> end_tag = '# </FOO>'
>>> new_text = replace_between_tags(text, repl_, start_tag, end_tag)
>>> result = ('new_text =\n%s' % (str(new_text),))
>>> print(result)
new_text =
class:
# <FOO>
spam
# </FOO>
baz
"""
new_lines = []
editing = False
lines = text.split('\n')
for line in lines:
if not editing:
new_lines.append(line)
if line.strip().startswith(start_tag):
new_lines.append(repl_)
editing = True
if end_tag is not None and line.strip().startswith(end_tag):
editing = False
new_lines.append(line)
new_text = '\n'.join(new_lines)
return new_text | r"""
Replaces text between sentinal lines in a block of text.
Args:
text (str):
repl_ (str):
start_tag (str):
end_tag (str): (default=None)
Returns:
str: new_text
CommandLine:
python -m utool.util_str --exec-replace_between_tags
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> text = ut.codeblock(
'''
class:
# <FOO>
bar
# </FOO>
baz
''')
>>> repl_ = 'spam'
>>> start_tag = '# <FOO>'
>>> end_tag = '# </FOO>'
>>> new_text = replace_between_tags(text, repl_, start_tag, end_tag)
>>> result = ('new_text =\n%s' % (str(new_text),))
>>> print(result)
new_text =
class:
# <FOO>
spam
# </FOO>
baz |
def _get_error_code(self, e):
"""Extract error code from ftp exception"""
try:
matches = self.error_code_pattern.match(str(e))
code = int(matches.group(0))
return code
except ValueError:
return e | Extract error code from ftp exception |
def get_host(environ):
# type: (Dict[str, str]) -> str
"""Return the host for the given WSGI environment. Yanked from Werkzeug."""
if environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv | Return the host for the given WSGI environment. Yanked from Werkzeug. |
def insert_object_into_db_pk_unknown(self,
obj: Any,
table: str,
fieldlist: Sequence[str]) -> None:
"""Inserts object into database table, with PK (first field) initially
unknown (and subsequently set in the object from the database)."""
self.ensure_db_open()
valuelist = []
for f in fieldlist[1:]:
valuelist.append(getattr(obj, f))
cursor = self.db.cursor()
self.db_exec_with_cursor(
cursor,
get_sql_insert_without_first_field(table, fieldlist,
self.get_delims()),
*valuelist
)
pkvalue = get_pk_of_last_insert(cursor)
setattr(obj, fieldlist[0], pkvalue) | Inserts object into database table, with PK (first field) initially
unknown (and subsequently set in the object from the database). |
def toggle_exclusivity(self,override=None):
"""
Toggles mouse exclusivity via pyglet's :py:meth:`set_exclusive_mouse()` method.
If ``override`` is given, it will be used instead.
You may also read the current exclusivity state via :py:attr:`exclusive`\ .
"""
if override is not None:
new = override
else:
new = not self.exclusive
self.exclusive = new
self.set_exclusive_mouse(self.exclusive)
self.peng.sendEvent("peng3d:window.toggle_exclusive",{"peng":self.peng,"window":self,"exclusive":self.exclusive}) | Toggles mouse exclusivity via pyglet's :py:meth:`set_exclusive_mouse()` method.
If ``override`` is given, it will be used instead.
You may also read the current exclusivity state via :py:attr:`exclusive`\ . |
def crop(self, vector, resolution=None, masked=None,
bands=None, resampling=Resampling.cubic):
"""
crops raster outside vector (convex hull)
:param vector: GeoVector, GeoFeature, FeatureCollection
:param resolution: output resolution, None for full resolution
:param resampling: reprojection resampling method, default `cubic`
:return: GeoRaster
"""
bounds, window = self._vector_to_raster_bounds(vector.envelope, boundless=self._image is None)
if resolution:
xsize, ysize = self._resolution_to_output_shape(bounds, resolution)
else:
xsize, ysize = (None, None)
return self.pixel_crop(bounds, xsize, ysize, window=window,
masked=masked, bands=bands, resampling=resampling) | crops raster outside vector (convex hull)
:param vector: GeoVector, GeoFeature, FeatureCollection
:param resolution: output resolution, None for full resolution
:param resampling: reprojection resampling method, default `cubic`
:return: GeoRaster |
def alerts(self, alert_level='High'):
"""Get a filtered list of alerts at the given alert level, and sorted by alert level."""
alerts = self.zap.core.alerts()
alert_level_value = self.alert_levels[alert_level]
alerts = sorted((a for a in alerts if self.alert_levels[a['risk']] >= alert_level_value),
key=lambda k: self.alert_levels[k['risk']], reverse=True)
return alerts | Get a filtered list of alerts at the given alert level, and sorted by alert level. |
def get_rich_menu_image(self, rich_menu_id, timeout=None):
"""Call download rich menu image API.
https://developers.line.me/en/docs/messaging-api/reference/#download-rich-menu-image
:param str rich_menu_id: ID of the rich menu with the image to be downloaded
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.Content`
:return: Content instance
"""
response = self._get(
'/v2/bot/richmenu/{rich_menu_id}/content'.format(rich_menu_id=rich_menu_id),
timeout=timeout
)
return Content(response) | Call download rich menu image API.
https://developers.line.me/en/docs/messaging-api/reference/#download-rich-menu-image
:param str rich_menu_id: ID of the rich menu with the image to be downloaded
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.Content`
:return: Content instance |
def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = []
for cfg_node in self.graph.nodes():
size = cfg_node.size
lst.append((cfg_node.addr, size))
lst = sorted(lst, key=lambda x: x[0])
return lst | Generate a list of all recovered basic blocks. |
def write_data(self, data, dstart=None, swap_axes=True):
"""Write ``data`` to `file`.
Parameters
----------
data : `array-like`
Data that should be written to `file`.
dstart : non-negative int, optional
Offset in bytes of the start position of the written data.
If provided, reshaping and axis swapping of ``data`` is
skipped.
For ``None``, `header_size` is used.
swap_axes : bool, optional
If ``True``, use the ``'mapc', 'mapr', 'maps'`` header entries
to swap the axes in the ``data`` before writing. Use ``False``
only if the data is already consistent with the final axis
order.
"""
if dstart is None:
shape = self.data_shape
dstart = int(self.header_size)
elif dstart < 0:
raise ValueError('`dstart` must be non-negative, got {}'
''.format(dstart))
else:
shape = -1
dstart = int(dstart)
if dstart < self.header_size:
raise ValueError('invalid `dstart`, resulting in absolute '
'`dstart` < `header_size` ({} < {})'
''.format(dstart, self.header_size))
data = np.asarray(data, dtype=self.data_dtype).reshape(shape)
if swap_axes:
# Need to argsort here since `data_axis_order` tells
# "which axis comes from where", which is the inverse of what the
# `transpose` function needs.
data = np.transpose(data, axes=np.argsort(self.data_axis_order))
assert data.shape == self.data_storage_shape
data = data.reshape(-1, order='F')
self.file.seek(dstart)
data.tofile(self.file) | Write ``data`` to `file`.
Parameters
----------
data : `array-like`
Data that should be written to `file`.
dstart : non-negative int, optional
Offset in bytes of the start position of the written data.
If provided, reshaping and axis swapping of ``data`` is
skipped.
For ``None``, `header_size` is used.
swap_axes : bool, optional
If ``True``, use the ``'mapc', 'mapr', 'maps'`` header entries
to swap the axes in the ``data`` before writing. Use ``False``
only if the data is already consistent with the final axis
order. |
def update(self, campaign_id, area, nick=None):
'''xxxxx.xxxxx.campaign.area.update
===================================
更新一个推广计划的投放地域'''
request = TOPRequest('xxxxx.xxxxx.campaign.area.update')
request['campaign_id'] = campaign_id
request['area'] = area
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignArea})
return self.result | xxxxx.xxxxx.campaign.area.update
===================================
更新一个推广计划的投放地域 |
def create(cls, options, session, build_root=None, exclude_patterns=None, tags=None):
"""
:param Options options: An `Options` instance to use.
:param session: The Scheduler session
:param string build_root: The build root.
"""
# Determine the literal target roots.
spec_roots = cls.parse_specs(
target_specs=options.target_specs,
build_root=build_root,
exclude_patterns=exclude_patterns,
tags=tags)
# Determine `Changed` arguments directly from options to support pre-`Subsystem`
# initialization paths.
changed_options = options.for_scope('changed')
changed_request = ChangedRequest.from_options(changed_options)
# Determine the `--owner-of=` arguments provided from the global options
owned_files = options.for_global_scope().owner_of
logger.debug('spec_roots are: %s', spec_roots)
logger.debug('changed_request is: %s', changed_request)
logger.debug('owned_files are: %s', owned_files)
targets_specified = sum(1 for item
in (changed_request.is_actionable(), owned_files, spec_roots.dependencies)
if item)
if targets_specified > 1:
# We've been provided more than one of: a change request, an owner request, or spec roots.
raise InvalidSpecConstraint(
'Multiple target selection methods provided. Please use only one of '
'--changed-*, --owner-of, or target specs'
)
if changed_request.is_actionable():
scm = get_scm()
if not scm:
raise InvalidSpecConstraint(
'The --changed-* options are not available without a recognized SCM (usually git).'
)
changed_files = cls.changed_files(
scm,
changes_since=changed_request.changes_since,
diffspec=changed_request.diffspec)
# We've been provided no spec roots (e.g. `./pants list`) AND a changed request. Compute
# alternate target roots.
request = OwnersRequest(sources=tuple(changed_files),
include_dependees=str(changed_request.include_dependees))
changed_addresses, = session.product_request(BuildFileAddresses, [request])
logger.debug('changed addresses: %s', changed_addresses)
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in changed_addresses)
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags))
if owned_files:
# We've been provided no spec roots (e.g. `./pants list`) AND a owner request. Compute
# alternate target roots.
request = OwnersRequest(sources=tuple(owned_files), include_dependees=str('none'))
owner_addresses, = session.product_request(BuildFileAddresses, [request])
logger.debug('owner addresses: %s', owner_addresses)
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in owner_addresses)
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags))
return TargetRoots(spec_roots) | :param Options options: An `Options` instance to use.
:param session: The Scheduler session
:param string build_root: The build root. |
def actor2ImageData(actor, spacing=(1, 1, 1)):
"""
Convert a mesh it into volume representation as ``vtkImageData``
where the foreground (exterior) voxels are 1 and the background
(interior) voxels are 0.
Internally the ``vtkPolyDataToImageStencil`` class is used.
.. hint:: |mesh2volume| |mesh2volume.py|_
"""
# https://vtk.org/Wiki/VTK/Examples/Cxx/PolyData/PolyDataToImageData
pd = actor.polydata()
whiteImage = vtk.vtkImageData()
bounds = pd.GetBounds()
whiteImage.SetSpacing(spacing)
# compute dimensions
dim = [0, 0, 0]
for i in [0, 1, 2]:
dim[i] = int(np.ceil((bounds[i * 2 + 1] - bounds[i * 2]) / spacing[i]))
whiteImage.SetDimensions(dim)
whiteImage.SetExtent(0, dim[0] - 1, 0, dim[1] - 1, 0, dim[2] - 1)
origin = [0, 0, 0]
origin[0] = bounds[0] + spacing[0] / 2
origin[1] = bounds[2] + spacing[1] / 2
origin[2] = bounds[4] + spacing[2] / 2
whiteImage.SetOrigin(origin)
whiteImage.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
# fill the image with foreground voxels:
inval = 255
count = whiteImage.GetNumberOfPoints()
for i in range(count):
whiteImage.GetPointData().GetScalars().SetTuple1(i, inval)
# polygonal data --> image stencil:
pol2stenc = vtk.vtkPolyDataToImageStencil()
pol2stenc.SetInputData(pd)
pol2stenc.SetOutputOrigin(origin)
pol2stenc.SetOutputSpacing(spacing)
pol2stenc.SetOutputWholeExtent(whiteImage.GetExtent())
pol2stenc.Update()
# cut the corresponding white image and set the background:
outval = 0
imgstenc = vtk.vtkImageStencil()
imgstenc.SetInputData(whiteImage)
imgstenc.SetStencilConnection(pol2stenc.GetOutputPort())
imgstenc.ReverseStencilOff()
imgstenc.SetBackgroundValue(outval)
imgstenc.Update()
return imgstenc.GetOutput() | Convert a mesh it into volume representation as ``vtkImageData``
where the foreground (exterior) voxels are 1 and the background
(interior) voxels are 0.
Internally the ``vtkPolyDataToImageStencil`` class is used.
.. hint:: |mesh2volume| |mesh2volume.py|_ |
def control_gate(control: Qubit, gate: Gate) -> Gate:
"""Return a controlled unitary gate. Given a gate acting on K qubits,
return a new gate on K+1 qubits prepended with a control bit. """
if control in gate.qubits:
raise ValueError('Gate and control qubits overlap')
qubits = [control, *gate.qubits]
gate_tensor = join_gates(P0(control), identity_gate(gate.qubits)).tensor \
+ join_gates(P1(control), gate).tensor
controlled_gate = Gate(qubits=qubits, tensor=gate_tensor)
return controlled_gate | Return a controlled unitary gate. Given a gate acting on K qubits,
return a new gate on K+1 qubits prepended with a control bit. |
def _add_logo(fig, x=10, y=25, zorder=100, which='metpy', size='small', **kwargs):
"""Add the MetPy or Unidata logo to a figure.
Adds an image to the figure.
Parameters
----------
fig : `matplotlib.figure`
The `figure` instance used for plotting
x : int
x position padding in pixels
y : float
y position padding in pixels
zorder : int
The zorder of the logo
which : str
Which logo to plot 'metpy' or 'unidata'
size : str
Size of logo to be used. Can be 'small' for 75 px square or 'large' for
150 px square.
Returns
-------
`matplotlib.image.FigureImage`
The `matplotlib.image.FigureImage` instance created
"""
fname_suffix = {'small': '_75x75.png',
'large': '_150x150.png'}
fname_prefix = {'unidata': 'unidata',
'metpy': 'metpy'}
try:
fname = fname_prefix[which] + fname_suffix[size]
fpath = posixpath.join('_static', fname)
except KeyError:
raise ValueError('Unknown logo size or selection')
logo = imread(pkg_resources.resource_stream('metpy.plots', fpath))
return fig.figimage(logo, x, y, zorder=zorder, **kwargs) | Add the MetPy or Unidata logo to a figure.
Adds an image to the figure.
Parameters
----------
fig : `matplotlib.figure`
The `figure` instance used for plotting
x : int
x position padding in pixels
y : float
y position padding in pixels
zorder : int
The zorder of the logo
which : str
Which logo to plot 'metpy' or 'unidata'
size : str
Size of logo to be used. Can be 'small' for 75 px square or 'large' for
150 px square.
Returns
-------
`matplotlib.image.FigureImage`
The `matplotlib.image.FigureImage` instance created |
def as_html(self, max_rows=0):
"""Format table as HTML."""
if not max_rows or max_rows > self.num_rows:
max_rows = self.num_rows
omitted = max(0, self.num_rows - max_rows)
labels = self.labels
lines = [
(0, '<table border="1" class="dataframe">'),
(1, '<thead>'),
(2, '<tr>'),
(3, ' '.join('<th>' + label + '</th>' for label in labels)),
(2, '</tr>'),
(1, '</thead>'),
(1, '<tbody>'),
]
fmts = self._get_column_formatters(max_rows, True)
for row in itertools.islice(self.rows, max_rows):
lines += [
(2, '<tr>'),
(3, ' '.join('<td>' + fmt(v, label=False) + '</td>' for
v, fmt in zip(row, fmts))),
(2, '</tr>'),
]
lines.append((1, '</tbody>'))
lines.append((0, '</table>'))
if omitted:
lines.append((0, '<p>... ({} rows omitted)</p>'.format(omitted)))
return '\n'.join(4 * indent * ' ' + text for indent, text in lines) | Format table as HTML. |
def _learning_rate_decay(hparams, warmup_steps=0):
"""Learning rate decay multiplier."""
scheme = hparams.learning_rate_decay_scheme
warmup_steps = tf.to_float(warmup_steps)
global_step = _global_step(hparams)
if not scheme or scheme == "none":
return tf.constant(1.)
tf.logging.info("Applying learning rate decay: %s.", scheme)
if scheme == "exp":
decay_steps = hparams.learning_rate_decay_steps
p = (global_step - warmup_steps) / decay_steps
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
if scheme == "piecewise":
return _piecewise_learning_rate(global_step,
hparams.learning_rate_boundaries,
hparams.learning_rate_multiples)
if scheme == "cosine":
cycle_steps = hparams.learning_rate_cosine_cycle_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)
return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))
if scheme == "cyclelinear10x":
# Cycle the rate linearly by 10x every warmup_steps, up and down.
cycle_steps = warmup_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = tf.to_float( # Normalize to the interval [-1, 1].
cycle_position - cycle_steps) / float(cycle_steps)
cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0.
return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3).
if scheme == "sqrt":
return _legacy_sqrt_decay(global_step - warmup_steps)
raise ValueError("Unrecognized learning rate decay scheme: %s" %
hparams.learning_rate_decay_scheme) | Learning rate decay multiplier. |
def clean(self):
"""Remove internal fields"""
doc = self._resource
result = {k: v for k, v in doc.iteritems() if k not in
self.internal_fields}
if '_id' in doc and 'id' not in result:
result['id'] = doc['_id']
return result | Remove internal fields |
def lcsr(s1, s2):
'''longest common sequence ratio
>>> lcsr('ab', 'abcd')
0.5
'''
if s1 == s2:
return 1.0
return llcs(s1, s2) / max(1, len(s1), len(s2)) | longest common sequence ratio
>>> lcsr('ab', 'abcd')
0.5 |
def use(ctx, shortcut):
"""Use a shortcut."""
git_dir = current_git_dir()
if git_dir is None:
output(NOT_GIT_REPO_MSG)
exit(1)
repo_root = os.path.dirname(git_dir)
config = get_config(repo_root)
try:
use_shortcut = config.shortcuts.get(shortcut)
while use_shortcut.extends is not None:
base = config.shortcuts.get(use_shortcut.extends)
use_shortcut = base.extend(use_shortcut)
except config.shortcuts.DoesNotExist as err:
output('{}\nAvailable shortcuts:'.format(err.message))
for s in config.shortcuts:
output(s.name)
exit(1)
else:
options = use_shortcut.options
for flag in use_shortcut.flags:
options[flag.replace('-', '_')] = True
options_string = ''
for k, v in sorted(iteritems(options)):
options_string += ' --{}'.format(k.replace('_', '-'))
if v is not True:
options_string += ' {}'.format(v)
output('#{{dim}}$ therapist run{}\n'.format(options_string))
ctx.invoke(run, **options) | Use a shortcut. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.