code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def build_command_tree(pattern, cmd_params):
"""
Recursively fill in a command tree in cmd_params according to a docopt-parsed "pattern" object.
"""
from docopt import Either, Optional, OneOrMore, Required, Option, Command, Argument
if type(pattern) in [Either, Optional, OneOrMore]:
for child in pattern.children:
build_command_tree(child, cmd_params)
elif type(pattern) in [Required]:
for child in pattern.children:
cmd_params = build_command_tree(child, cmd_params)
elif type(pattern) in [Option]:
suffix = "=" if pattern.argcount else ""
if pattern.short:
cmd_params.options.append(pattern.short + suffix)
if pattern.long:
cmd_params.options.append(pattern.long + suffix)
elif type(pattern) in [Command]:
cmd_params = cmd_params.get_subcommand(pattern.name)
elif type(pattern) in [Argument]:
cmd_params.arguments.append(pattern.name)
return cmd_params | Recursively fill in a command tree in cmd_params according to a docopt-parsed "pattern" object. |
def append_columns(self, colnames, values, **kwargs):
"""Append new columns to the table.
When appending a single column, ``values`` can be a scalar or an
array of either length 1 or the same length as this array (the one
it's appended to). In case of multiple columns, values must have
the shape ``list(arrays)``, and the dimension of each array
has to match the length of this array.
See the docs for ``numpy.lib.recfunctions.append_fields`` for an
explanation of the remaining options.
"""
n = len(self)
if np.isscalar(values):
values = np.full(n, values)
values = np.atleast_1d(values)
if not isinstance(colnames, str) and len(colnames) > 1:
values = np.atleast_2d(values)
self._check_column_length(values, n)
if values.ndim == 1:
if len(values) > n:
raise ValueError("New Column is longer than existing table!")
elif len(values) > 1 and len(values) < n:
raise ValueError(
"New Column is shorter than existing table, "
"but not just one element!"
)
elif len(values) == 1:
values = np.full(n, values[0])
new_arr = rfn.append_fields(
self, colnames, values, usemask=False, asrecarray=True, **kwargs
)
return self.__class__(
new_arr,
h5loc=self.h5loc,
split_h5=self.split_h5,
name=self.name,
h5singleton=self.h5singleton
) | Append new columns to the table.
When appending a single column, ``values`` can be a scalar or an
array of either length 1 or the same length as this array (the one
it's appended to). In case of multiple columns, values must have
the shape ``list(arrays)``, and the dimension of each array
has to match the length of this array.
See the docs for ``numpy.lib.recfunctions.append_fields`` for an
explanation of the remaining options. |
def getPeer(self, url):
"""
Finds a peer by URL and return the first peer record with that URL.
"""
peers = list(models.Peer.select().where(models.Peer.url == url))
if len(peers) == 0:
raise exceptions.PeerNotFoundException(url)
return peers[0] | Finds a peer by URL and return the first peer record with that URL. |
def _parse_table(
self, parent_name=None
): # type: (Optional[str]) -> Tuple[Key, Union[Table, AoT]]
"""
Parses a table element.
"""
if self._current != "[":
raise self.parse_error(
InternalParserError, "_parse_table() called on non-bracket character."
)
indent = self.extract()
self.inc() # Skip opening bracket
if self.end():
raise self.parse_error(UnexpectedEofError)
is_aot = False
if self._current == "[":
if not self.inc():
raise self.parse_error(UnexpectedEofError)
is_aot = True
# Key
self.mark()
while self._current != "]" and self.inc():
if self.end():
raise self.parse_error(UnexpectedEofError)
pass
name = self.extract()
if not name.strip():
raise self.parse_error(EmptyTableNameError)
key = Key(name, sep="")
name_parts = tuple(self._split_table_name(name))
missing_table = False
if parent_name:
parent_name_parts = tuple(self._split_table_name(parent_name))
else:
parent_name_parts = tuple()
if len(name_parts) > len(parent_name_parts) + 1:
missing_table = True
name_parts = name_parts[len(parent_name_parts) :]
values = Container(True)
self.inc() # Skip closing bracket
if is_aot:
# TODO: Verify close bracket
self.inc()
cws, comment, trail = self._parse_comment_trail()
result = Null()
if len(name_parts) > 1:
if missing_table:
# Missing super table
# i.e. a table initialized like this: [foo.bar]
# without initializing [foo]
#
# So we have to create the parent tables
table = Table(
Container(True),
Trivia(indent, cws, comment, trail),
is_aot and name_parts[0].key in self._aot_stack,
is_super_table=True,
name=name_parts[0].key,
)
result = table
key = name_parts[0]
for i, _name in enumerate(name_parts[1:]):
if _name in table:
child = table[_name]
else:
child = Table(
Container(True),
Trivia(indent, cws, comment, trail),
is_aot and i == len(name_parts[1:]) - 1,
is_super_table=i < len(name_parts[1:]) - 1,
name=_name.key,
display_name=name if i == len(name_parts[1:]) - 1 else None,
)
if is_aot and i == len(name_parts[1:]) - 1:
table.append(_name, AoT([child], name=table.name, parsed=True))
else:
table.append(_name, child)
table = child
values = table.value
else:
if name_parts:
key = name_parts[0]
while not self.end():
item = self._parse_item()
if item:
_key, item = item
if not self._merge_ws(item, values):
if _key is not None and _key.is_dotted():
self._handle_dotted_key(values, _key, item)
else:
values.append(_key, item)
else:
if self._current == "[":
is_aot_next, name_next = self._peek_table()
if self._is_child(name, name_next):
key_next, table_next = self._parse_table(name)
values.append(key_next, table_next)
# Picking up any sibling
while not self.end():
_, name_next = self._peek_table()
if not self._is_child(name, name_next):
break
key_next, table_next = self._parse_table(name)
values.append(key_next, table_next)
break
else:
raise self.parse_error(
InternalParserError,
"_parse_item() returned None on a non-bracket character.",
)
if isinstance(result, Null):
result = Table(
values,
Trivia(indent, cws, comment, trail),
is_aot,
name=name,
display_name=name,
)
if is_aot and (not self._aot_stack or name != self._aot_stack[-1]):
result = self._parse_aot(result, name)
return key, result | Parses a table element. |
def addresses_for_key(gpg, key):
"""
Takes a key and extracts the email addresses for it.
"""
fingerprint = key["fingerprint"]
addresses = []
for key in gpg.list_keys():
if key["fingerprint"] == fingerprint:
addresses.extend([address.split("<")[-1].strip(">")
for address in key["uids"] if address])
return addresses | Takes a key and extracts the email addresses for it. |
def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int or datetime
"""
minutes = self._get_minutes(minutes)
if minutes is not None:
return self._store.put(self.tagged_item_key(key), value, minutes) | Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int or datetime |
def execute(self, command):
"""
Executes command on remote hosts
:type command: str
:param command: command to be run on remote host
"""
try:
if self.ssh.get_transport() is not None:
logger.debug('{0}: executing "{1}"'.format(self.target_address,
command))
stdin, stdout, stderr = self.ssh.exec_command(command)
return dict(zip(['stdin', 'stdout', 'stderr'],
[stdin, stdout, stderr]))
else:
raise SSHConnectionError(self.target_address,
"ssh transport is closed")
except (AuthenticationException, SSHException,
ChannelException, SocketError) as ex:
logger.critical(("{0} execution failed on {1} with exception:"
"{2}".format(command, self.target_address,
ex)))
raise SSHCommandError(self.target_address, command, ex) | Executes command on remote hosts
:type command: str
:param command: command to be run on remote host |
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
print("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model | Load tf checkpoints in a pytorch model |
def strains(self):
"""
Create a dictionary of SEQID: OLNID from the supplied
"""
with open(os.path.join(self.path, 'strains.csv')) as strains:
next(strains)
for line in strains:
oln, seqid = line.split(',')
self.straindict[oln] = seqid.rstrip()
self.strainset.add(oln)
logging.debug(oln)
if self.debug:
break | Create a dictionary of SEQID: OLNID from the supplied |
async def get_messages(self, name):
"""Get stored messages for a service.
Args:
name (string): The name of the service to get messages from.
Returns:
list(ServiceMessage): A list of the messages stored for this service
"""
resp = await self.send_command(OPERATIONS.CMD_QUERY_MESSAGES, {'name': name},
MESSAGES.QueryMessagesResponse, timeout=5.0)
return [states.ServiceMessage.FromDictionary(x) for x in resp] | Get stored messages for a service.
Args:
name (string): The name of the service to get messages from.
Returns:
list(ServiceMessage): A list of the messages stored for this service |
def encode_request(name, expected, updated):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, expected, updated))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_long(expected)
client_message.append_long(updated)
client_message.update_frame_length()
return client_message | Encode request into client_message |
def defaulted_config(modules, params=None, yaml=None, filename=None,
config=None, validate=True):
"""Context manager version of :func:`set_default_config()`.
Use this with a Python 'with' statement, like
>>> config_yaml = '''
... toplevel:
... param: value
... '''
>>> with yakonfig.defaulted_config([toplevel], yaml=config_yaml) as config:
... assert 'param' in config['toplevel']
... assert yakonfig.get_global_config('toplevel', 'param') == 'value'
On exit the global configuration is restored to its previous state
(if any).
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.Configurable`
:param dict params: dictionary of command-line argument key to values
:param str yaml: global configuration file
:param str filename: location of global configuration file
:param dict config: global configuration object
:param bool validate: check configuration after creating
:return: the new global configuration
"""
with _temporary_config():
set_default_config(modules, params=params, yaml=yaml,
filename=filename, config=config, validate=validate)
yield get_global_config() | Context manager version of :func:`set_default_config()`.
Use this with a Python 'with' statement, like
>>> config_yaml = '''
... toplevel:
... param: value
... '''
>>> with yakonfig.defaulted_config([toplevel], yaml=config_yaml) as config:
... assert 'param' in config['toplevel']
... assert yakonfig.get_global_config('toplevel', 'param') == 'value'
On exit the global configuration is restored to its previous state
(if any).
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.Configurable`
:param dict params: dictionary of command-line argument key to values
:param str yaml: global configuration file
:param str filename: location of global configuration file
:param dict config: global configuration object
:param bool validate: check configuration after creating
:return: the new global configuration |
def from_coeff(self, chebcoeff, domain=None, prune=True, vscale=1.):
"""
Initialise from provided coefficients
prune: Whether to prune the negligible coefficients
vscale: the scale to use when pruning
"""
coeffs = np.asarray(chebcoeff)
if prune:
N = self._cutoff(coeffs, vscale)
pruned_coeffs = coeffs[:N]
else:
pruned_coeffs = coeffs
values = self.polyval(pruned_coeffs)
return self(values, domain, vscale) | Initialise from provided coefficients
prune: Whether to prune the negligible coefficients
vscale: the scale to use when pruning |
def get_random(min_pt, max_pt):
"""Returns a random vector in the given range."""
result = Point(random.random(), random.random())
return result.get_component_product(max_pt - min_pt) + min_pt | Returns a random vector in the given range. |
def list_subcommand(vcard_list, parsable):
"""Print a user friendly contacts table.
:param vcard_list: the vcards to print
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:returns: None
:rtype: None
"""
if not vcard_list:
if not parsable:
print("Found no contacts")
sys.exit(1)
elif parsable:
contact_line_list = []
for vcard in vcard_list:
if config.display_by_name() == "first_name":
name = vcard.get_first_name_last_name()
else:
name = vcard.get_last_name_first_name()
contact_line_list.append('\t'.join([vcard.get_uid(), name,
vcard.address_book.name]))
print('\n'.join(contact_line_list))
else:
list_contacts(vcard_list) | Print a user friendly contacts table.
:param vcard_list: the vcards to print
:type vcard_list: list of carddav_object.CarddavObject
:param parsable: machine readable output: columns devided by tabulator (\t)
:type parsable: bool
:returns: None
:rtype: None |
def _init(self, run_conf, run_number=None):
'''Initialization before a new run.
'''
self.stop_run.clear()
self.abort_run.clear()
self._run_status = run_status.running
self._write_run_number(run_number)
self._init_run_conf(run_conf) | Initialization before a new run. |
def interpolate(self, factor, minKerning, maxKerning, round=True, suppressError=True):
"""
Interpolates all pairs between two :class:`BaseKerning` objects:
**minKerning** and **maxKerning**. The interpolation occurs on a
0 to 1.0 range where **minKerning** is located at 0 and
**maxKerning** is located at 1.0. The kerning data is replaced by
the interpolated kerning.
* **factor** is the interpolation value. It may be less than 0
and greater than 1.0. It may be an :ref:`type-int-float`,
``tuple`` or ``list``. If it is a ``tuple`` or ``list``,
the first number indicates the x factor and the second number
indicates the y factor.
* **round** is a ``bool`` indicating if the result should be rounded to
``int``\s. The default behavior is to round interpolated kerning.
* **suppressError** is a ``bool`` indicating if incompatible data should
be ignored or if an error should be raised when such incompatibilities
are found. The default behavior is to ignore incompatible data.
>>> myKerning.interpolate(kerningOne, kerningTwo)
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minKerning, BaseKerning):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.") % (
self.__class__.__name__, minKerning.__class__.__name__))
if not isinstance(maxKerning, BaseKerning):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.") % (
self.__class__.__name__, maxKerning.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minKerning, maxKerning,
round=round, suppressError=suppressError) | Interpolates all pairs between two :class:`BaseKerning` objects:
**minKerning** and **maxKerning**. The interpolation occurs on a
0 to 1.0 range where **minKerning** is located at 0 and
**maxKerning** is located at 1.0. The kerning data is replaced by
the interpolated kerning.
* **factor** is the interpolation value. It may be less than 0
and greater than 1.0. It may be an :ref:`type-int-float`,
``tuple`` or ``list``. If it is a ``tuple`` or ``list``,
the first number indicates the x factor and the second number
indicates the y factor.
* **round** is a ``bool`` indicating if the result should be rounded to
``int``\s. The default behavior is to round interpolated kerning.
* **suppressError** is a ``bool`` indicating if incompatible data should
be ignored or if an error should be raised when such incompatibilities
are found. The default behavior is to ignore incompatible data.
>>> myKerning.interpolate(kerningOne, kerningTwo) |
def construct(self, request, service=None, http_args=None, **kwargs):
"""
Constructs a client assertion and signs it with a key.
The request is modified as a side effect.
:param request: The request
:param service: A :py:class:`oidcservice.service.Service` instance
:param http_args: HTTP arguments
:param kwargs: Extra arguments
:return: Constructed HTTP arguments, in this case none
"""
if 'client_assertion' in kwargs:
request["client_assertion"] = kwargs['client_assertion']
if 'client_assertion_type' in kwargs:
request[
'client_assertion_type'] = kwargs['client_assertion_type']
else:
request["client_assertion_type"] = JWT_BEARER
elif 'client_assertion' in request:
if 'client_assertion_type' not in request:
request["client_assertion_type"] = JWT_BEARER
else:
algorithm = None
_context = service.service_context
# audience for the signed JWT depends on which endpoint
# we're talking to.
if kwargs['authn_endpoint'] in ['token_endpoint']:
try:
algorithm = _context.behaviour[
'token_endpoint_auth_signing_alg']
except (KeyError, AttributeError):
pass
audience = _context.provider_info['token_endpoint']
else:
audience = _context.provider_info['issuer']
if not algorithm:
algorithm = self.choose_algorithm(**kwargs)
ktype = alg2keytype(algorithm)
try:
if 'kid' in kwargs:
signing_key = [self.get_key_by_kid(kwargs["kid"], algorithm,
_context)]
elif ktype in _context.kid["sig"]:
try:
signing_key = [self.get_key_by_kid(
_context.kid["sig"][ktype], algorithm, _context)]
except KeyError:
signing_key = self.get_signing_key(algorithm, _context)
else:
signing_key = self.get_signing_key(algorithm, _context)
except NoMatchingKey as err:
logger.error("%s" % sanitize(err))
raise
try:
_args = {'lifetime': kwargs['lifetime']}
except KeyError:
_args = {}
# construct the signed JWT with the assertions and add
# it as value to the 'client_assertion' claim of the request
request["client_assertion"] = assertion_jwt(
_context.client_id, signing_key, audience,
algorithm, **_args)
request["client_assertion_type"] = JWT_BEARER
try:
del request["client_secret"]
except KeyError:
pass
# If client_id is not required to be present, remove it.
if not request.c_param["client_id"][VREQUIRED]:
try:
del request["client_id"]
except KeyError:
pass
return {} | Constructs a client assertion and signs it with a key.
The request is modified as a side effect.
:param request: The request
:param service: A :py:class:`oidcservice.service.Service` instance
:param http_args: HTTP arguments
:param kwargs: Extra arguments
:return: Constructed HTTP arguments, in this case none |
def getAccountInfo(self, fields=None):
"""Use this method to get information about a Telegraph account.
:param fields: List of account fields to return.
Available fields: short_name, author_name, author_url, auth_url, page_count.
:type fields: list
:returns: Account object on success.
"""
return self.make_method("getAccountInfo", {
"access_token": self.access_token,
"fields": json.dumps(fields) if fields else None
}) | Use this method to get information about a Telegraph account.
:param fields: List of account fields to return.
Available fields: short_name, author_name, author_url, auth_url, page_count.
:type fields: list
:returns: Account object on success. |
def path(self, path):
"""
Path of the IOU executable.
:param path: path to the IOU image executable
"""
self._path = self.manager.get_abs_image_path(path)
log.info('IOU "{name}" [{id}]: IOU image updated to "{path}"'.format(name=self._name, id=self._id, path=self._path)) | Path of the IOU executable.
:param path: path to the IOU image executable |
def run(self):
""" Statistics logger job callback.
"""
try:
proxy = config_ini.engine.open()
self.LOG.info("Stats for %s - up %s, %s" % (
config_ini.engine.engine_id,
fmt.human_duration(proxy.system.time() - config_ini.engine.startup, 0, 2, True).strip(),
proxy
))
except (error.LoggableError, xmlrpc.ERRORS), exc:
self.LOG.warn(str(exc)) | Statistics logger job callback. |
async def chat_send(self, message: str, team_only: bool):
""" Writes a message to the chat """
ch = ChatChannel.Team if team_only else ChatChannel.Broadcast
await self._execute(
action=sc_pb.RequestAction(
actions=[sc_pb.Action(action_chat=sc_pb.ActionChat(channel=ch.value, message=message))]
)
) | Writes a message to the chat |
def _msg(self, label, *msg):
"""
Prints a message with a label
"""
if self.quiet is False:
txt = self._unpack_msg(*msg)
print("[" + label + "] " + txt) | Prints a message with a label |
def random_filtered_sources(sources, srcfilter, seed):
"""
:param sources: a list of sources
:param srcfilte: a SourceFilter instance
:param seed: a random seed
:returns: an empty list or a list with a single filtered source
"""
random.seed(seed)
while sources:
src = random.choice(sources)
if srcfilter.get_close_sites(src) is not None:
return [src]
sources.remove(src)
return [] | :param sources: a list of sources
:param srcfilte: a SourceFilter instance
:param seed: a random seed
:returns: an empty list or a list with a single filtered source |
def _schema(self, path, obj, app):
""" fulfill 'name' field for objects under
'#/definitions' and with 'properties'
"""
if path.startswith('#/definitions'):
last_token = jp_split(path)[-1]
if app.version == '1.2':
obj.update_field('name', scope_split(last_token)[-1])
else:
obj.update_field('name', last_token) | fulfill 'name' field for objects under
'#/definitions' and with 'properties' |
def _get_api_content(self):
"""Updates class api content by calling Github api and storing result"""
if GITHUB_TOKEN is not None:
self.add_params_to_url({
"access_token": GITHUB_TOKEN
})
api_content_response = requests.get(self.api_url)
self.api_content = json.loads(
api_content_response.text
) | Updates class api content by calling Github api and storing result |
def category(self, value):
"""
Setter for **self.__category** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"category", value)
self.__category = value | Setter for **self.__category** attribute.
:param value: Attribute value.
:type value: unicode |
def character_to_vk(self, character):
""" Returns a tuple of (scan_code, modifiers) where ``scan_code`` is a numeric scan code
and ``modifiers`` is an array of string modifier names (like 'shift') """
for vk in self.non_layout_keys:
if self.non_layout_keys[vk] == character.lower():
return (vk, [])
for vk in self.layout_specific_keys:
if self.layout_specific_keys[vk][0] == character:
return (vk, [])
elif self.layout_specific_keys[vk][1] == character:
return (vk, ['shift'])
raise ValueError("Unrecognized character: {}".format(character)) | Returns a tuple of (scan_code, modifiers) where ``scan_code`` is a numeric scan code
and ``modifiers`` is an array of string modifier names (like 'shift') |
def sky_bbox_ll(self):
"""
The sky coordinates of the lower-left vertex of the minimal
bounding box of the source segment, returned as a
`~astropy.coordinates.SkyCoord` object.
The bounding box encloses all of the source segment pixels in
their entirety, thus the vertices are at the pixel *corners*.
"""
if self._wcs is not None:
return pixel_to_skycoord(self.xmin.value - 0.5,
self.ymin.value - 0.5,
self._wcs, origin=0)
else:
return None | The sky coordinates of the lower-left vertex of the minimal
bounding box of the source segment, returned as a
`~astropy.coordinates.SkyCoord` object.
The bounding box encloses all of the source segment pixels in
their entirety, thus the vertices are at the pixel *corners*. |
def _refresh_channel(self):
'''
Reset the channel, in the event of an interruption
'''
self.channel = salt.transport.client.ReqChannel.factory(self.opts)
return self.channel | Reset the channel, in the event of an interruption |
def column_types(self):
"""Return a dict mapping column name to type for all columns in table
"""
column_types = {}
for c in self.sqla_columns:
column_types[c.name] = c.type
return column_types | Return a dict mapping column name to type for all columns in table |
def delete(method, hmc, uri, uri_parms, logon_required):
"""Operation: Delete Hipersocket (requires DPM mode)."""
try:
adapter = hmc.lookup_by_uri(uri)
except KeyError:
raise InvalidResourceError(method, uri)
cpc = adapter.manager.parent
assert cpc.dpm_enabled
adapter.manager.remove(adapter.oid) | Operation: Delete Hipersocket (requires DPM mode). |
def convert_descriptor_and_rows(self, descriptor, rows):
"""Convert descriptor and rows to Pandas
"""
# Prepare
primary_key = None
schema = tableschema.Schema(descriptor)
if len(schema.primary_key) == 1:
primary_key = schema.primary_key[0]
elif len(schema.primary_key) > 1:
message = 'Multi-column primary keys are not supported'
raise tableschema.exceptions.StorageError(message)
# Get data/index
data_rows = []
index_rows = []
jtstypes_map = {}
for row in rows:
values = []
index = None
for field, value in zip(schema.fields, row):
try:
if isinstance(value, float) and np.isnan(value):
value = None
if value and field.type == 'integer':
value = int(value)
value = field.cast_value(value)
except tableschema.exceptions.CastError:
value = json.loads(value)
# http://pandas.pydata.org/pandas-docs/stable/gotchas.html#support-for-integer-na
if value is None and field.type in ('number', 'integer'):
jtstypes_map[field.name] = 'number'
value = np.NaN
if field.name == primary_key:
index = value
else:
values.append(value)
data_rows.append(tuple(values))
index_rows.append(index)
# Get dtypes
dtypes = []
for field in schema.fields:
if field.name != primary_key:
field_name = field.name
if six.PY2:
field_name = field.name.encode('utf-8')
dtype = self.convert_type(jtstypes_map.get(field.name, field.type))
dtypes.append((field_name, dtype))
# Create dataframe
index = None
columns = schema.headers
array = np.array(data_rows, dtype=dtypes)
if primary_key:
index_field = schema.get_field(primary_key)
index_dtype = self.convert_type(index_field.type)
index_class = pd.Index
if index_field.type in ['datetime', 'date']:
index_class = pd.DatetimeIndex
index = index_class(index_rows, name=primary_key, dtype=index_dtype)
columns = filter(lambda column: column != primary_key, schema.headers)
dataframe = pd.DataFrame(array, index=index, columns=columns)
return dataframe | Convert descriptor and rows to Pandas |
def _set_neighbor_route_map_name_direction_out(self, v, load=False):
"""
Setter method for neighbor_route_map_name_direction_out, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/neighbor_route_map_name_direction_out (common-def:name-string64)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor_route_map_name_direction_out is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor_route_map_name_direction_out() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,63})'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply route map to neighbor', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='common-def:name-string64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """neighbor_route_map_name_direction_out must be of a type compatible with common-def:name-string64""",
'defined-type': "common-def:name-string64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,63})'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply route map to neighbor', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='common-def:name-string64', is_config=True)""",
})
self.__neighbor_route_map_name_direction_out = t
if hasattr(self, '_set'):
self._set() | Setter method for neighbor_route_map_name_direction_out, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/neighbor_route_map_name_direction_out (common-def:name-string64)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor_route_map_name_direction_out is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor_route_map_name_direction_out() directly. |
def event(self, event_data, priority="normal", event_method="EVENT"):
"""This function will send event packets to the server. This is the
main method you would use to send data from your application to the
server.
Whenever an event is sent to the server, a universally unique event id
(euuid) is created for each event and stored in the "event_uuids"
dictionary. This dictionary contains a list of all events that are
currently waiting for a response from the server. The event will only
be removed from this dictionary if the server responds with LEGAL or
ILLEGAL or if the request times out.
Args:
event_data (dict): The event data to send to the server. This data
will be passed through the server's middleware to determine if the
event is legal or not, and then processed by the server it is legal
priority (string): The event's priority informs the server of whether
or not the client is going to wait for a confirmation message from
the server indicating whether its event was LEGAL or ILLEGAL.
Setting this to "normal" informs the server that the client will
wait for a response from the server before processing the event.
Setting this to "high" informs the server that the client will NOT
wait for a response. Defaults to "normal".
event_method (string): The type of event to send to the server. Valid
methods are "EVENT", "AUTH". Defaults to "EVENT".
Returns:
A universally unique identifier (uuid) of the event.
Examples:
>>> event_data
>>> priority
"""
logger.debug("event: " + str(event_data))
# Generate an event UUID for this event
euuid = uuid.uuid1()
logger.debug("<%s> <euuid:%s> Sending event data to server: "
"%s" % (str(self.cuuid), str(euuid), str(self.server)))
if not self.listener.listening:
logger.warning("Neteria client is not listening.")
# If we're not even registered, don't even bother.
if not self.registered:
logger.warning("<%s> <euuid:%s> Client is currently not registered. "
"Event not sent." % (str(self.cuuid), str(euuid)))
return False
# Send the event data to the server
packet = {"method": event_method,
"cuuid": str(self.cuuid),
"euuid": str(euuid),
"event_data": event_data,
"timestamp": str(datetime.now()),
"retry": 0,
"priority": priority}
self.listener.send_datagram(
serialize_data(packet, self.compression,
self.encryption, self.server_key),
self.server)
logger.debug("<%s> Sending EVENT Packet: %s" % (str(self.cuuid),
pformat(packet)))
# Set the sent event to our event buffer to see if we need to roll back
# or anything
self.event_uuids[str(euuid)] = packet
# Now we need to reschedule a timeout/retransmit check
logger.debug("<%s> Scheduling retry in %s seconds" % (str(self.cuuid),
str(self.timeout)))
self.listener.call_later(self.timeout, self.retransmit, packet)
return euuid | This function will send event packets to the server. This is the
main method you would use to send data from your application to the
server.
Whenever an event is sent to the server, a universally unique event id
(euuid) is created for each event and stored in the "event_uuids"
dictionary. This dictionary contains a list of all events that are
currently waiting for a response from the server. The event will only
be removed from this dictionary if the server responds with LEGAL or
ILLEGAL or if the request times out.
Args:
event_data (dict): The event data to send to the server. This data
will be passed through the server's middleware to determine if the
event is legal or not, and then processed by the server it is legal
priority (string): The event's priority informs the server of whether
or not the client is going to wait for a confirmation message from
the server indicating whether its event was LEGAL or ILLEGAL.
Setting this to "normal" informs the server that the client will
wait for a response from the server before processing the event.
Setting this to "high" informs the server that the client will NOT
wait for a response. Defaults to "normal".
event_method (string): The type of event to send to the server. Valid
methods are "EVENT", "AUTH". Defaults to "EVENT".
Returns:
A universally unique identifier (uuid) of the event.
Examples:
>>> event_data
>>> priority |
def filter_kepler_lcdict(lcdict,
filterflags=True,
nanfilter='sap,pdc',
timestoignore=None):
'''This filters the Kepler `lcdict`, removing nans and bad
observations.
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE!
'''
cols = lcdict['columns']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = lcdict['time'].size
filterind = lcdict['sap_quality'] == 0
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][filterind]
else:
lcdict[col] = lcdict[col][filterind]
nafter = lcdict['time'].size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
if nanfilter and nanfilter == 'sap,pdc':
notnanind = (
npisfinite(lcdict['sap']['sap_flux']) &
npisfinite(lcdict['pdc']['pdcsap_flux']) &
npisfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'sap':
notnanind = (
npisfinite(lcdict['sap']['sap_flux']) &
npisfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'pdc':
notnanind = (
npisfinite(lcdict['pdc']['pdcsap_flux']) &
npisfinite(lcdict['time'])
)
# remove nans from all columns
if nanfilter:
nbefore = lcdict['time'].size
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][notnanind]
else:
lcdict[col] = lcdict[col][notnanind]
nafter = lcdict['time'].size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(lcdict['time'], True, dtype=np.bool_)
nbefore = exclind.size
# get all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = ~((lcdict['time'] >= time0) & (lcdict['time'] <= time1))
exclind = exclind & thismask
# apply the masks
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][exclind]
else:
lcdict[col] = lcdict[col][exclind]
nafter = lcdict['time'].size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
return lcdict | This filters the Kepler `lcdict`, removing nans and bad
observations.
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE! |
def get_selections(pattern=None, state=None):
'''
View package state from the dpkg database.
Returns a dict of dicts containing the state, and package names:
.. code-block:: python
{'<host>':
{'<state>': ['pkg1',
...
]
},
...
}
CLI Example:
.. code-block:: bash
salt '*' pkg.get_selections
salt '*' pkg.get_selections 'python-*'
salt '*' pkg.get_selections state=hold
salt '*' pkg.get_selections 'openssh*' state=hold
'''
ret = {}
cmd = ['dpkg', '--get-selections']
cmd.append(pattern if pattern else '*')
stdout = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
ret = _parse_selections(stdout)
if state:
return {state: ret.get(state, [])}
return ret | View package state from the dpkg database.
Returns a dict of dicts containing the state, and package names:
.. code-block:: python
{'<host>':
{'<state>': ['pkg1',
...
]
},
...
}
CLI Example:
.. code-block:: bash
salt '*' pkg.get_selections
salt '*' pkg.get_selections 'python-*'
salt '*' pkg.get_selections state=hold
salt '*' pkg.get_selections 'openssh*' state=hold |
def hard_equals(a, b):
"""Implements the '===' operator."""
if type(a) != type(b):
return False
return a == b | Implements the '===' operator. |
def set_parameter(self, name, value):
"""Sets a parameter for the BaseForecastingMethod.
:param string name: Name of the parameter.
:param numeric value: Value of the parameter.
"""
# set the furecast until variable to None if necessary
if name == "valuesToForecast":
self._forecastUntil = None
# continue with the parents implementation
return super(BaseForecastingMethod, self).set_parameter(name, value) | Sets a parameter for the BaseForecastingMethod.
:param string name: Name of the parameter.
:param numeric value: Value of the parameter. |
def from_bma_history(cls: Type[TransactionType], currency: str, tx_data: Dict) -> TransactionType:
"""
Get the transaction instance from json
:param currency: the currency of the tx
:param tx_data: json data of the transaction
:return:
"""
tx_data = tx_data.copy()
tx_data["currency"] = currency
for data_list in ('issuers', 'outputs', 'inputs', 'unlocks', 'signatures'):
tx_data['multiline_{0}'.format(data_list)] = '\n'.join(tx_data[data_list])
if tx_data["version"] >= 3:
signed_raw = """Version: {version}
Type: Transaction
Currency: {currency}
Blockstamp: {blockstamp}
Locktime: {locktime}
Issuers:
{multiline_issuers}
Inputs:
{multiline_inputs}
Unlocks:
{multiline_unlocks}
Outputs:
{multiline_outputs}
Comment: {comment}
{multiline_signatures}
""".format(**tx_data)
else:
signed_raw = """Version: {version}
Type: Transaction
Currency: {currency}
Locktime: {locktime}
Issuers:
{multiline_issuers}
Inputs:
{multiline_inputs}
Unlocks:
{multiline_unlocks}
Outputs:
{multiline_outputs}
Comment: {comment}
{multiline_signatures}
""".format(**tx_data)
return cls.from_signed_raw(signed_raw) | Get the transaction instance from json
:param currency: the currency of the tx
:param tx_data: json data of the transaction
:return: |
def authorized_tenants(self):
"""Returns a memoized list of tenants this user may access."""
if self.is_authenticated and self._authorized_tenants is None:
endpoint = self.endpoint
try:
self._authorized_tenants = utils.get_project_list(
user_id=self.id,
auth_url=endpoint,
token=self.unscoped_token,
is_federated=self.is_federated)
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure):
LOG.exception('Unable to retrieve project list.')
return self._authorized_tenants or [] | Returns a memoized list of tenants this user may access. |
def img(self):
'''return a cv image for the icon'''
SlipThumbnail.img(self)
if self.rotation:
# rotate the image
mat = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.GetRotationMatrix2D((self.width/2,self.height/2),
-self.rotation, 1.0, mat)
self._rotated = cv.CloneImage(self._img)
cv.WarpAffine(self._img, self._rotated, mat)
else:
self._rotated = self._img
return self._rotated | return a cv image for the icon |
def point_in_circle(pt, center, radius):
'''
Returns true if a given point is located inside (or on the border) of a circle.
>>> point_in_circle((0, 0), (0, 0), 1)
True
>>> point_in_circle((1, 0), (0, 0), 1)
True
>>> point_in_circle((1, 1), (0, 0), 1)
False
'''
d = np.linalg.norm(np.asarray(pt) - np.asarray(center))
return d <= radius | Returns true if a given point is located inside (or on the border) of a circle.
>>> point_in_circle((0, 0), (0, 0), 1)
True
>>> point_in_circle((1, 0), (0, 0), 1)
True
>>> point_in_circle((1, 1), (0, 0), 1)
False |
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files | Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:". |
def argdistincts(self, nested=False):
"""
Return all unique combinations (up to permutation) of two elements,
taken without replacement from the indices of the jagged dimension.
Combinations are ordered lexicographically.
nested: Return a doubly-jagged array where the first jagged dimension
matches the shape of this array
"""
out = self._argdistincts(absolute=False)
if nested:
out = self.JaggedArray.fromcounts(self.numpy.maximum(0, self.counts - 1), self.JaggedArray.fromcounts(self.index[:, :0:-1].flatten(), out._content))
return out | Return all unique combinations (up to permutation) of two elements,
taken without replacement from the indices of the jagged dimension.
Combinations are ordered lexicographically.
nested: Return a doubly-jagged array where the first jagged dimension
matches the shape of this array |
def write_defaults(self):
"""Create default config file and reload.
"""
self.defaults.write()
self.reset_defaults(self.defaults.filename) | Create default config file and reload. |
def term_symbols(self):
"""
All possible Russell-Saunders term symbol of the Element
eg. L = 1, n_e = 2 (s2)
returns
[['1D2'], ['3P0', '3P1', '3P2'], ['1S0']]
"""
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
L, v_e = self.valence
# for one electron in subshell L
ml = list(range(-L, L + 1))
ms = [1 / 2, -1 / 2]
# all possible configurations of ml,ms for one e in subshell L
ml_ms = list(product(ml, ms))
# Number of possible configurations for r electrons in subshell L.
n = (2 * L + 1) * 2
# the combination of n_e electrons configurations
# C^{n}_{n_e}
e_config_combs = list(combinations(range(n), v_e))
# Total ML = sum(ml1, ml2), Total MS = sum(ms1, ms2)
TL = [sum([ml_ms[comb[e]][0] for e in range(v_e)])
for comb in e_config_combs]
TS = [sum([ml_ms[comb[e]][1] for e in range(v_e)])
for comb in e_config_combs]
comb_counter = Counter([r for r in zip(TL, TS)])
term_symbols = []
while sum(comb_counter.values()) > 0:
# Start from the lowest freq combination,
# which corresponds to largest abs(L) and smallest abs(S)
L, S = min(comb_counter)
J = list(np.arange(abs(L - S), abs(L) + abs(S) + 1))
term_symbols.append([str(int(2 * (abs(S)) + 1))
+ L_symbols[abs(L)]
+ str(j) for j in J])
# Without J
# term_symbols.append(str(int(2 * (abs(S)) + 1)) \
# + L_symbols[abs(L)])
# Delete all configurations included in this term
for ML in range(-L, L - 1, -1):
for MS in np.arange(S, -S + 1, 1):
if (ML, MS) in comb_counter:
comb_counter[(ML, MS)] -= 1
if comb_counter[(ML, MS)] == 0:
del comb_counter[(ML, MS)]
return term_symbols | All possible Russell-Saunders term symbol of the Element
eg. L = 1, n_e = 2 (s2)
returns
[['1D2'], ['3P0', '3P1', '3P2'], ['1S0']] |
def parse_results(fields, data):
"""
Traverses ordered dictionary, calls _traverse_results() to recursively read into the dictionary depth of data
"""
master = []
for record in data['records']: # for each 'record' in response
row = [None] * len(fields) # create null list the length of number of columns
for obj, value in record.iteritems(): # for each obj in record
if not isinstance(value, (dict, list, tuple)): # if not data structure
if obj in fields:
row[fields.index(obj)] = ensure_utf(value)
elif isinstance(value, dict) and obj != 'attributes': # traverse down into object
path = obj
_traverse_results(value, fields, row, path)
master.append(row)
return master | Traverses ordered dictionary, calls _traverse_results() to recursively read into the dictionary depth of data |
def deprecated(message=None):
"""Decorator to mark functions as deprecated. A warning will be emitted when the function is used."""
def deco(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
warnings.warn(
message or 'Call to deprecated function {}'.format(func.__name__),
category=PubChemPyDeprecationWarning,
stacklevel=2
)
return func(*args, **kwargs)
return wrapped
return deco | Decorator to mark functions as deprecated. A warning will be emitted when the function is used. |
def np2model_tensor(a):
"Tranform numpy array `a` to a tensor of the same type."
dtype = model_type(a.dtype)
res = as_tensor(a)
if not dtype: return res
return res.type(dtype) | Tranform numpy array `a` to a tensor of the same type. |
def main():
"""main."""
parser = create_parser()
args = parser.parse_args()
if hasattr(args, 'handler'):
args.handler(args)
else:
parser.print_help() | main. |
def _system_config_file():
"""
Returns the path to the settings.cfg file. On Windows the file is
located in the AppData/Local/envipyengine directory. On Unix, the file
will be located in the ~/.envipyengine directory.
:return: String specifying the full path to the settings.cfg file
"""
if sys.platform == 'win32':
config_path = os.path.sep.join([_windows_system_appdata(),
_APP_DIRNAME,
_CONFIG_FILENAME])
elif sys.platform.startswith('darwin'):
config_path = os.path.sep.join([os.path.sep + 'Library', 'Preferences',
_APP_DIRNAME, _CONFIG_FILENAME])
else:
config_path = os.path.sep.join(['', 'var', 'lib', _APP_DIRNAME,
_CONFIG_FILENAME])
return config_path | Returns the path to the settings.cfg file. On Windows the file is
located in the AppData/Local/envipyengine directory. On Unix, the file
will be located in the ~/.envipyengine directory.
:return: String specifying the full path to the settings.cfg file |
def circuit_to_tensorflow_runnable(
circuit: circuits.Circuit,
initial_state: Union[int, np.ndarray] = 0,
) -> ComputeFuncAndFeedDict:
"""Returns a compute function and feed_dict for a `cirq.Circuit`'s output.
`result.compute()` will return a `tensorflow.Tensor` with
`tensorflow.placeholder` objects to be filled in by `result.feed_dict`, at
which point it will evaluate to the output state vector of the circuit.
You can apply further operations to the tensor returned by
`result.compute`. This allows, for example, for the final result to be
a small amount of computed data (e.g. an expectation value) instead of the
gigantic raw state vector.
The tensor returned by `result.compute` is intended to run efficiently
on cloud TPUs. It will have dtype complex64 and a shape of (2**n,) where n
is the number of qubits.
Examples:
To simulate the circuit with tensorflow in a normal session, forward
this method's output into `tensorflow.Session.run` as follows:
import tensorflow as tf
r = circuit_to_tensorflow_runnable(...)
with tf.Session() as session:
output = session.run(r.compute(), feed_dict=r.feed_dict)
print(output)
Note that you can use the returned tensor in further computations. For
example, to compute the chance of the system ending up in the first 128
computational basis states you can use `tf.norm(tensor[:128], 2)`:
import tensorflow as tf
r = circuit_to_tensorflow_runnable(...)
expectation = lambda: tf.norm(r.compute()[:128], 2)
with tf.Session() as session:
output = session.run(expectation, feed_dict=r.feed_dict)
print(output)
For documentation on running against cloud TPUs, see
https://cloud.google.com/tpu/docs/quickstart#run_example
Generally speaking, from within a cloud instance, you use
`tf.contrib.tpu.rewrite` to convert the tensor into a TPU compatible
form, initialize the TPU system, then run the rewritten tensor:
import tensorflow as tf
TPU_TARGET = ???????
r = circuit_to_tensorflow_runnable(...YOUR_CIRCUIT...)
rewritten_for_tpu = tf.contrib.tpu.rewrite(r.compute)
with tf.Session(target=TPU_TARGET) as session:
session.run(tf.contrib.tpu.initialize_system())
output = session.run(rewritten_for_tpu, feed_dict=r.feed_dict)
print(output)
Args:
circuit: The circuit to apply to `initial_state` to produce an output
state vector.
initial_state: The input into the circuit. If this is an integer, it
indicates that the input state is a computational basis state where
the k'th qubit is set by the k'th bit of the integer. If this is
a numpy array, it should directly encode a normalized wavefunction.
Returns:
A ComputeFuncAndFeedDict, which is a named tuple whose first element is
a function that returns a Tensor representing the output state vector
that results from applying the given circuit to the given, and whose
second element is a feed_dict containing important parameters describing
that tensor.
"""
if not circuit.are_all_measurements_terminal():
raise ValueError('not circuit.are_all_measurements_terminal()')
t = _TensorCircuit(circuit, initial_state)
return ComputeFuncAndFeedDict(t.compute, t.feed_dict) | Returns a compute function and feed_dict for a `cirq.Circuit`'s output.
`result.compute()` will return a `tensorflow.Tensor` with
`tensorflow.placeholder` objects to be filled in by `result.feed_dict`, at
which point it will evaluate to the output state vector of the circuit.
You can apply further operations to the tensor returned by
`result.compute`. This allows, for example, for the final result to be
a small amount of computed data (e.g. an expectation value) instead of the
gigantic raw state vector.
The tensor returned by `result.compute` is intended to run efficiently
on cloud TPUs. It will have dtype complex64 and a shape of (2**n,) where n
is the number of qubits.
Examples:
To simulate the circuit with tensorflow in a normal session, forward
this method's output into `tensorflow.Session.run` as follows:
import tensorflow as tf
r = circuit_to_tensorflow_runnable(...)
with tf.Session() as session:
output = session.run(r.compute(), feed_dict=r.feed_dict)
print(output)
Note that you can use the returned tensor in further computations. For
example, to compute the chance of the system ending up in the first 128
computational basis states you can use `tf.norm(tensor[:128], 2)`:
import tensorflow as tf
r = circuit_to_tensorflow_runnable(...)
expectation = lambda: tf.norm(r.compute()[:128], 2)
with tf.Session() as session:
output = session.run(expectation, feed_dict=r.feed_dict)
print(output)
For documentation on running against cloud TPUs, see
https://cloud.google.com/tpu/docs/quickstart#run_example
Generally speaking, from within a cloud instance, you use
`tf.contrib.tpu.rewrite` to convert the tensor into a TPU compatible
form, initialize the TPU system, then run the rewritten tensor:
import tensorflow as tf
TPU_TARGET = ???????
r = circuit_to_tensorflow_runnable(...YOUR_CIRCUIT...)
rewritten_for_tpu = tf.contrib.tpu.rewrite(r.compute)
with tf.Session(target=TPU_TARGET) as session:
session.run(tf.contrib.tpu.initialize_system())
output = session.run(rewritten_for_tpu, feed_dict=r.feed_dict)
print(output)
Args:
circuit: The circuit to apply to `initial_state` to produce an output
state vector.
initial_state: The input into the circuit. If this is an integer, it
indicates that the input state is a computational basis state where
the k'th qubit is set by the k'th bit of the integer. If this is
a numpy array, it should directly encode a normalized wavefunction.
Returns:
A ComputeFuncAndFeedDict, which is a named tuple whose first element is
a function that returns a Tensor representing the output state vector
that results from applying the given circuit to the given, and whose
second element is a feed_dict containing important parameters describing
that tensor. |
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time()) | convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00 |
def compose(layers, bbox=None, layer_filter=None, color=None, **kwargs):
"""
Compose layers to a single :py:class:`PIL.Image`.
If the layers do not have visible pixels, the function returns `None`.
Example::
image = compose([layer1, layer2])
In order to skip some layers, pass `layer_filter` function which
should take `layer` as an argument and return `True` to keep the layer
or return `False` to skip::
image = compose(
layers,
layer_filter=lambda x: x.is_visible() and x.kind == 'type'
)
By default, visible layers are composed.
.. note:: This function is experimental and does not guarantee
Photoshop-quality rendering.
Currently the following are ignored:
- Adjustments layers
- Layer effects
- Blending mode (all blending modes become normal)
Shape drawing is inaccurate if the PSD file is not saved with
maximum compatibility.
:param layers: a layer, or an iterable of layers.
:param bbox: (left, top, bottom, right) tuple that specifies a region to
compose. By default, all the visible area is composed. The origin
is at the top-left corner of the PSD document.
:param layer_filter: a callable that takes a layer and returns `bool`.
:param color: background color in `int` or `tuple`.
:return: :py:class:`PIL.Image` or `None`.
"""
from PIL import Image
if not hasattr(layers, '__iter__'):
layers = [layers]
def _default_filter(layer):
return layer.is_visible()
layer_filter = layer_filter or _default_filter
valid_layers = [x for x in layers if layer_filter(x)]
if len(valid_layers) == 0:
return None
if bbox is None:
bbox = extract_bbox(valid_layers)
if bbox == (0, 0, 0, 0):
return None
# Alpha must be forced to correctly blend.
mode = get_pil_mode(valid_layers[0]._psd.color_mode, True)
result = Image.new(
mode, (bbox[2] - bbox[0], bbox[3] - bbox[1]),
color=color if color is not None else 'white',
)
result.putalpha(0)
for layer in valid_layers:
if intersect(layer.bbox, bbox) == (0, 0, 0, 0):
continue
image = layer.compose(**kwargs)
if image is None:
continue
logger.debug('Composing %s' % layer)
offset = (layer.left - bbox[0], layer.top - bbox[1])
result = _blend(result, image, offset)
return result | Compose layers to a single :py:class:`PIL.Image`.
If the layers do not have visible pixels, the function returns `None`.
Example::
image = compose([layer1, layer2])
In order to skip some layers, pass `layer_filter` function which
should take `layer` as an argument and return `True` to keep the layer
or return `False` to skip::
image = compose(
layers,
layer_filter=lambda x: x.is_visible() and x.kind == 'type'
)
By default, visible layers are composed.
.. note:: This function is experimental and does not guarantee
Photoshop-quality rendering.
Currently the following are ignored:
- Adjustments layers
- Layer effects
- Blending mode (all blending modes become normal)
Shape drawing is inaccurate if the PSD file is not saved with
maximum compatibility.
:param layers: a layer, or an iterable of layers.
:param bbox: (left, top, bottom, right) tuple that specifies a region to
compose. By default, all the visible area is composed. The origin
is at the top-left corner of the PSD document.
:param layer_filter: a callable that takes a layer and returns `bool`.
:param color: background color in `int` or `tuple`.
:return: :py:class:`PIL.Image` or `None`. |
def trimult(U, x, uplo='U', transa='N', alpha=1., inplace=False):
"""
b = trimult(U,x, uplo='U')
Multiplies U x, where U is upper triangular if uplo='U'
or lower triangular if uplo = 'L'.
"""
if inplace:
b = x
else:
b = x.copy('F')
dtrmm_wrap(a=U, b=b, uplo=uplo, transa=transa, alpha=alpha)
return b | b = trimult(U,x, uplo='U')
Multiplies U x, where U is upper triangular if uplo='U'
or lower triangular if uplo = 'L'. |
def create_item(self, name):
"""
create a new todo list item
"""
elem = self.controlled_list.create_item(name)
if elem:
return TodoElementUX(parent=self, controlled_element=elem) | create a new todo list item |
def batches(dataset):
'''Returns a callable that chooses sequences from netcdf data.'''
seq_lengths = dataset.variables['seqLengths'].data
seq_begins = np.concatenate(([0], np.cumsum(seq_lengths)[:-1]))
def sample():
chosen = np.random.choice(
list(range(len(seq_lengths))), BATCH_SIZE, replace=False)
return batch_at(dataset.variables['inputs'].data,
dataset.variables['targetClasses'].data,
seq_begins[chosen],
seq_lengths[chosen])
return sample | Returns a callable that chooses sequences from netcdf data. |
def _find_types(pkgs):
'''Form a package names list, find prefixes of packages types.'''
return sorted({pkg.split(':', 1)[0] for pkg in pkgs
if len(pkg.split(':', 1)) == 2}) | Form a package names list, find prefixes of packages types. |
def get_annotation(self, key, result_format='list'):
"""
Is a convenience method for accessing annotations on models that have them
"""
value = self.get('_annotations_by_key', {}).get(key)
if not value:
return value
if result_format == 'one':
return value[0]
return value | Is a convenience method for accessing annotations on models that have them |
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint | a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root |
def GroupBy(self: dict, f=None):
"""
[
{
'self': [1, 2, 3],
'f': lambda x: x%2,
'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3]
}
]
"""
if f and is_to_destruct(f):
f = destruct_func(f)
return _group_by(self.items(), f) | [
{
'self': [1, 2, 3],
'f': lambda x: x%2,
'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3]
}
] |
def _parse_raw_members(
self, leaderboard_name, members, members_only=False, **options):
'''
Parse the raw leaders data as returned from a given leader board query. Do associative
lookups with the member to rank, score and potentially sort the results.
@param leaderboard_name [String] Name of the leaderboard.
@param members [List] A list of members as returned from a sorted set range query
@param members_only [bool] Set True to return the members as is, Default is False.
@param options [Hash] Options to be used when retrieving the page from the named leaderboard.
@return a list of members.
'''
if members_only:
return [{self.MEMBER_KEY: m} for m in members]
if members:
return self.ranked_in_list_in(leaderboard_name, members, **options)
else:
return [] | Parse the raw leaders data as returned from a given leader board query. Do associative
lookups with the member to rank, score and potentially sort the results.
@param leaderboard_name [String] Name of the leaderboard.
@param members [List] A list of members as returned from a sorted set range query
@param members_only [bool] Set True to return the members as is, Default is False.
@param options [Hash] Options to be used when retrieving the page from the named leaderboard.
@return a list of members. |
def offset(self):
"""
Property to be used for setting and getting the offset of the layer.
Note that setting this property causes an immediate redraw.
"""
if callable(self._offset):
return util.WatchingList(self._offset(*(self.widget.pos+self.widget.size)),self._wlredraw_offset)
else:
return util.WatchingList(self._offset,self._wlredraw_offset) | Property to be used for setting and getting the offset of the layer.
Note that setting this property causes an immediate redraw. |
def __disambiguate_proper_names_1(self, docs, lexicon):
""" Teeme esmase yleliigsete analyyside kustutamise: kui sõnal on mitu
erineva sagedusega pärisnimeanalüüsi, siis jätame alles vaid
suurima sagedusega analyysi(d) ...
"""
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:
if len(word[ANALYSIS]) > 1:
# 1) Leiame kõigi pärisnimede sagedused sagedusleksikonist
highestFreq = 0
properNameAnalyses = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
if analysis[ROOT] in lexicon:
properNameAnalyses.append( analysis )
if lexicon[analysis[ROOT]] > highestFreq:
highestFreq = lexicon[analysis[ROOT]]
else:
raise Exception(' Unable to find lemma ',analysis[ROOT], \
' from the lexicon. ')
# 2) J2tame alles vaid suurima lemmasagedusega pärisnimeanalyysid,
# ylejaanud kustutame maha
if highestFreq > 0:
toDelete = []
for analysis in properNameAnalyses:
if lexicon[analysis[ROOT]] < highestFreq:
toDelete.append(analysis)
for analysis in toDelete:
word[ANALYSIS].remove(analysis) | Teeme esmase yleliigsete analyyside kustutamise: kui sõnal on mitu
erineva sagedusega pärisnimeanalüüsi, siis jätame alles vaid
suurima sagedusega analyysi(d) ... |
def scramble(name, **kwargs):
""" scramble text blocks and keep original file structure
Parameters
----------
name : str | pathlib.Path
file name
Returns
-------
name : str
scrambled file name
"""
name = Path(name)
mdf = MDF(name)
texts = {}
callback = kwargs.get("callback", None)
if callback:
callback(0, 100)
count = len(mdf.groups)
if mdf.version >= "4.00":
ChannelConversion = ChannelConversionV4
stream = mdf._file
if mdf.header.comment_addr:
stream.seek(mdf.header.comment_addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[mdf.header.comment_addr] = randomized_string(size)
for fh in mdf.file_history:
addr = fh.comment_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for ev in mdf.events:
for addr in (ev.comment_addr, ev.name_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for idx, gp in enumerate(mdf.groups, 1):
addr = gp.data_group.comment_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
cg = gp.channel_group
for addr in (cg.acq_name_addr, cg.comment_addr):
if cg.flags & v4c.FLAG_CG_BUS_EVENT:
continue
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
source = cg.acq_source_addr
if source:
source = SourceInformation(address=source, stream=stream)
for addr in (
source.name_addr,
source.path_addr,
source.comment_addr,
):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for ch in gp.channels:
for addr in (ch.name_addr, ch.unit_addr, ch.comment_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
source = ch.source_addr
if source:
source = SourceInformation(address=source, stream=stream)
for addr in (
source.name_addr,
source.path_addr,
source.comment_addr,
):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
conv = ch.conversion_addr
if conv:
conv = ChannelConversion(address=conv, stream=stream)
for addr in (conv.name_addr, conv.unit_addr, conv.comment_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
if conv.conversion_type == v4c.CONVERSION_TYPE_ALG:
addr = conv.formula_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
if conv.referenced_blocks:
for key, block in conv.referenced_blocks.items():
if block:
if block.id == b"##TX":
addr = block.address
if addr not in texts:
stream.seek(addr + 8)
size = block.block_len - 24
texts[addr] = randomized_string(size)
if callback:
callback(int(idx/count*66), 100)
mdf.close()
dst = name.with_suffix(".scrambled.mf4")
copy(name, dst)
with open(dst, "rb+") as mdf:
count = len(texts)
chunk = max(count // 34, 1)
idx = 0
for index, (addr, bts) in enumerate(texts.items()):
mdf.seek(addr + 24)
mdf.write(bts)
if index % chunk == 0:
if callback:
callback(66 + idx, 100)
if callback:
callback(100, 100)
else:
ChannelConversion = ChannelConversionV3
stream = mdf._file
if mdf.header.comment_addr:
stream.seek(mdf.header.comment_addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[mdf.header.comment_addr + 4] = randomized_string(size)
texts[36 + 0x40] = randomized_string(32)
texts[68 + 0x40] = randomized_string(32)
texts[100 + 0x40] = randomized_string(32)
texts[132 + 0x40] = randomized_string(32)
for idx, gp in enumerate(mdf.groups, 1):
cg = gp.channel_group
addr = cg.comment_addr
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
if gp.trigger:
addr = gp.trigger.text_addr
if addr:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
for ch in gp.channels:
for key in ("long_name_addr", "display_name_addr", "comment_addr"):
if hasattr(ch, key):
addr = getattr(ch, key)
else:
addr = 0
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
texts[ch.address + 26] = randomized_string(32)
texts[ch.address + 58] = randomized_string(128)
source = ch.source_addr
if source:
source = ChannelExtension(address=source, stream=stream)
if source.type == v23c.SOURCE_ECU:
texts[source.address + 12] = randomized_string(80)
texts[source.address + 92] = randomized_string(32)
else:
texts[source.address + 14] = randomized_string(36)
texts[source.address + 50] = randomized_string(36)
conv = ch.conversion_addr
if conv:
texts[conv + 22] = randomized_string(20)
conv = ChannelConversion(address=conv, stream=stream)
if conv.conversion_type == v23c.CONVERSION_TYPE_FORMULA:
texts[conv + 36] = randomized_string(conv.block_len - 36)
if conv.referenced_blocks:
for key, block in conv.referenced_blocks.items():
if block:
if block.id == b"TX":
addr = block.address
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
if callback:
callback(int(idx/count*66), 100)
mdf.close()
dst = name.with_suffix(".scrambled.mf4")
copy(name, dst)
with open(dst, "rb+") as mdf:
chunk = count // 34
idx = 0
for index, (addr, bts) in enumerate(texts.items()):
mdf.seek(addr)
mdf.write(bts)
if chunk and index % chunk == 0:
if callback:
callback(66 + idx, 100)
if callback:
callback(100, 100)
return dst | scramble text blocks and keep original file structure
Parameters
----------
name : str | pathlib.Path
file name
Returns
-------
name : str
scrambled file name |
def remove_vectored_io_slice_suffix_from_name(name, slice):
# type: (str, int) -> str
"""Remove vectored io (stripe) slice suffix from a given name
:param str name: entity name
:param int slice: slice num
:rtype: str
:return: name without suffix
"""
suffix = '.bxslice-{}'.format(slice)
if name.endswith(suffix):
return name[:-len(suffix)]
else:
return name | Remove vectored io (stripe) slice suffix from a given name
:param str name: entity name
:param int slice: slice num
:rtype: str
:return: name without suffix |
def get(self, param=None, must=[APIKEY]):
'''查账户信息
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
Args:
param: (Optional)
Results:
Result
'''
param = {} if param is None else param
r = self.verify_param(param, must)
if not r.is_succ():
return r
handle = CommonResultHandler(lambda rsp: {VERSION_V1:rsp.get(USER), VERSION_V2:rsp}[self.version()])
return self.path('get.json').post(param, handle, r) | 查账户信息
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
Args:
param: (Optional)
Results:
Result |
def plot(self, feature):
"""
Spawns a new figure showing data for `feature`.
:param feature: A `pybedtools.Interval` object
Using the pybedtools.Interval `feature`, creates figure specified in
:meth:`BaseMiniBrowser.make_fig` and plots data on panels according to
`self.panels()`.
"""
if isinstance(feature, gffutils.Feature):
feature = asinterval(feature)
self.make_fig()
axes = []
for ax, method in self.panels():
feature = method(ax, feature)
axes.append(ax)
return axes | Spawns a new figure showing data for `feature`.
:param feature: A `pybedtools.Interval` object
Using the pybedtools.Interval `feature`, creates figure specified in
:meth:`BaseMiniBrowser.make_fig` and plots data on panels according to
`self.panels()`. |
def parse(self, buffer, inlineparent = None):
'''
Compatible to Parser.parse()
'''
size = 0
v = []
for i in range(0, self.size): # @UnusedVariable
r = self.innerparser.parse(buffer[size:], None)
if r is None:
return None
v.append(r[0])
size += r[1]
return (v, size) | Compatible to Parser.parse() |
def get_distances(rupture, mesh, param):
"""
:param rupture: a rupture
:param mesh: a mesh of points or a site collection
:param param: the kind of distance to compute (default rjb)
:returns: an array of distances from the given mesh
"""
if param == 'rrup':
dist = rupture.surface.get_min_distance(mesh)
elif param == 'rx':
dist = rupture.surface.get_rx_distance(mesh)
elif param == 'ry0':
dist = rupture.surface.get_ry0_distance(mesh)
elif param == 'rjb':
dist = rupture.surface.get_joyner_boore_distance(mesh)
elif param == 'rhypo':
dist = rupture.hypocenter.distance_to_mesh(mesh)
elif param == 'repi':
dist = rupture.hypocenter.distance_to_mesh(mesh, with_depths=False)
elif param == 'rcdpp':
dist = rupture.get_cdppvalue(mesh)
elif param == 'azimuth':
dist = rupture.surface.get_azimuth(mesh)
elif param == "rvolc":
# Volcanic distance not yet supported, defaulting to zero
dist = numpy.zeros_like(mesh.lons)
else:
raise ValueError('Unknown distance measure %r' % param)
return dist | :param rupture: a rupture
:param mesh: a mesh of points or a site collection
:param param: the kind of distance to compute (default rjb)
:returns: an array of distances from the given mesh |
def get_learning_objectives_metadata(self):
"""Gets the metadata for learning objectives.
return: (osid.Metadata) - metadata for the learning objectives
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template
metadata = dict(self._mdata['learning_objectives'])
metadata.update({'existing_learning_objectives_values': self._my_map['learningObjectiveIds']})
return Metadata(**metadata) | Gets the metadata for learning objectives.
return: (osid.Metadata) - metadata for the learning objectives
*compliance: mandatory -- This method must be implemented.* |
def _virtualenv_sys(venv_path):
"obtain version and path info from a virtualenv."
executable = os.path.join(venv_path, env_bin_dir, 'python')
# Must use "executable" as the first argument rather than as the
# keyword argument "executable" to get correct value from sys.path
p = subprocess.Popen([executable,
'-c', 'import sys;'
'print (sys.version[:3]);'
'print ("\\n".join(sys.path));'],
env={},
stdout=subprocess.PIPE)
stdout, err = p.communicate()
assert not p.returncode and stdout
lines = stdout.decode('utf-8').splitlines()
return lines[0], list(filter(bool, lines[1:])) | obtain version and path info from a virtualenv. |
def cleanup(self):
"""
Release resources used during shell execution
"""
for future in self.futures:
future.cancel()
self.executor.shutdown(wait=10)
if self.ssh.get_transport() != None:
self.ssh.close() | Release resources used during shell execution |
def get_results(self, title_prefix="", title_override="", rnd_dig=2):
"""
Constructs a summary of the results as an array, which might be
useful for writing the results of multiple algorithms to a table.
NOTE- This method must be called AFTER "roll_mc".
:param title_prefix: If desired, prefix the title (such as "Alg 1 ")
:param title_override: Override the title string entirely
:param rnd_dig: the number of digits to round to
:return: A tuple of the raw array results and PBE results, as:
[Description, Typical Array, Mean, Std, 5%, 95%]
"""
# Check that roll_mc has been called
if not self.arr_res:
raise ValueError("Call roll_mc before getting results.")
# Find a title using either the override or _construct_title method
if title_override:
title = title_override
else:
ctitle = PBE._construct_title(self.num_dice, self.dice_type,
self.add_val, self.num_attribute, self.keep_attribute,
self.keep_dice, self.reroll, self.num_arrays)
title = title_prefix + ctitle
# Find the typical array
typ_arr = "; ".join([str(round(x, rnd_dig))
for x in self.arr_res["means"]])
res_row = [title, typ_arr,
round(self.pbe_res["means"], rnd_dig),
round(self.pbe_res["stds"], rnd_dig),
round(self.pbe_res["5percentile"], rnd_dig),
round(self.pbe_res["95percentile"], rnd_dig)]
return res_row | Constructs a summary of the results as an array, which might be
useful for writing the results of multiple algorithms to a table.
NOTE- This method must be called AFTER "roll_mc".
:param title_prefix: If desired, prefix the title (such as "Alg 1 ")
:param title_override: Override the title string entirely
:param rnd_dig: the number of digits to round to
:return: A tuple of the raw array results and PBE results, as:
[Description, Typical Array, Mean, Std, 5%, 95%] |
def status(self):
"""
The status of the container. For example, ``running``, or ``exited``.
"""
if isinstance(self.attrs['State'], dict):
return self.attrs['State']['Status']
return self.attrs['State'] | The status of the container. For example, ``running``, or ``exited``. |
def main():
"""
The main loop for the commandline parser.
"""
DATABASE.load_contents()
continue_flag = False
while not continue_flag:
DATABASE.print_contents()
try:
command = raw_input(">>> ")
for stmnt_unformated in sqlparse.parse(command):
statement = sqlparse.parse(
sqlparse.format(
str(
stmnt_unformated
),
reindent=True
)
)[0]
type = statement.tokens[0]
if str(type).lower() == "drop":
if str(statement.tokens[2]).lower() == "table":
tablename = str(statement.tokens[4])
table = DATABASE.get_table(tablename)
table.rows = []
table.store_contents()
DATABASE.delete_table(tablename)
DATABASE.store_contents()
else:
raise Exception(
"Invalid Syntax of DROP TABLE t"
)
elif str(type).lower() == "truncate":
if str(statement.tokens[2]).lower() == "table":
tablename = str(statement.tokens[4])
table = DATABASE.get_table(tablename)
table.rows = []
table.store_contents()
else:
raise Exception(
"Invalid Syntax of TRUNCATE TABLE t"
)
elif str(type).lower() == "delete":
if str(statement.tokens[2]).lower() == "from":
tablename = str(statement.tokens[4])
table = DATABASE.get_table(tablename)
whereclause = statement.tokens[6]
if str(whereclause.tokens[0]).lower() == "where":
comparison = whereclause.tokens[2]
key = str(comparison.tokens[0])
value = int(str(comparison.tokens[4]))
table.delete_row(key, value)
table.store_contents()
else:
raise Exception(
"Invalid Syntax of DELETE FROM t where k = v"
)
else:
raise Exception(
"Invalid Syntax of DELETE FROM t WHERE k = v"
)
elif str(type).lower() == "insert":
if str(statement.tokens[2]).lower() == "into":
tablename = str(statement.tokens[4])
table = DATABASE.get_table(tablename)
if str(statement.tokens[6]).lower() == "values":
parenthesis = statement.tokens[8]
idlist = parenthesis.tokens[1]
values_list = map(
lambda x: int(str(x)),
idlist.get_identifiers()
)
table.put_row_raw(values_list)
table.store_contents()
else:
raise Exception(
"Invalid Syntax of INSERT INTO t VALUES (v...)"
)
else:
raise Exception(
"Invalid Syntax of INSERT INTO t VALUES (v...)"
)
elif str(type).lower() == "create":
if str(statement.tokens[2]).lower() == "table":
sublist = list(statement.tokens[4].get_sublists())
tablename = str(sublist[0])
garbage = str(sublist[1])
column_list = map(
lambda x: x.strip(" ()",).split()[0],
garbage.split(",")
)
DATABASE.create_table_raw(
tablename=tablename,
columns=column_list[:],
)
DATABASE.store_contents()
elif str(type).lower() == "select":
col_list_or_single = statement.tokens[2]
if "," not in str(col_list_or_single):
if str(col_list_or_single) == "*":
column_list = ['*']
else:
column_list = [str(col_list_or_single)]
else:
column_list = map(
lambda x: str(x),
col_list_or_single.get_identifiers()
)
if str(statement.tokens[4]).lower() == "from":
tab_list_or_single = statement.tokens[6]
if "," not in str(tab_list_or_single):
table_list = [str(tab_list_or_single)]
else:
table_list = map(
lambda x: str(x),
tab_list_or_single.get_identifiers()
)
cross_columns = reduce(
lambda x, y: x + y,
map(
lambda x: DATABASE.get_table(
x
).get_column_list_prefixed(),
table_list
)
)
cross_table = parthsql.Table(
name="temp",
columns=cross_columns,
rows=[]
)
for i in itertools.product(
*map(
lambda x: DATABASE.get_table(x).get_all_rows(),
table_list
)
):
cross_table.put_row_raw(
reduce(
lambda x, y: x + y,
i
)
)
if len(statement.tokens) >= 9:
whereclause = statement.tokens[8]
if str(whereclause.tokens[0]).lower() == "where":
comparison = whereclause.tokens[2]
key = str(comparison.tokens[0])
try:
value = int(str(comparison.tokens[4]))
cross_table.invert_delete_row(key, value)
except:
value = str(comparison.tokens[4])
cross_table.invert_delete_row2(key, value)
else:
raise Exception(
"Invalid Syntax of DELETE FROM t where k = v"
)
if "*" in column_list:
cross_table.print_contents()
else:
temp_list = []
for i in column_list:
temp_list.append(cross_table.get_column(i))
print "\t\t\t".join(column_list)
for i in zip(*(temp_list)):
print "\t\t\t".join(map(str, i))
else:
raise Exception(
"Invalid Syntax of SELECT c... FROM t... WHERE k = v"
)
else:
raise Exception(
"Unsupported Operation"
)
except ValueError:
print("¯\_(ツ)_/¯")
except IOError:
print("¯\_(ツ)_/¯")
except IndexError:
print("¯\_(ツ)_/¯")
except AttributeError:
print("¯\_(ツ)_/¯")
except Exception, e:
print e.message | The main loop for the commandline parser. |
def _read_openjp2_common(self):
"""
Read a JPEG 2000 image using libopenjp2.
Returns
-------
ndarray or lst
Either the image as an ndarray or a list of ndarrays, each item
corresponding to one band.
"""
with ExitStack() as stack:
filename = self.filename
stream = opj2.stream_create_default_file_stream(filename, True)
stack.callback(opj2.stream_destroy, stream)
codec = opj2.create_decompress(self._codec_format)
stack.callback(opj2.destroy_codec, codec)
opj2.set_error_handler(codec, _ERROR_CALLBACK)
opj2.set_warning_handler(codec, _WARNING_CALLBACK)
if self._verbose:
opj2.set_info_handler(codec, _INFO_CALLBACK)
else:
opj2.set_info_handler(codec, None)
opj2.setup_decoder(codec, self._dparams)
raw_image = opj2.read_header(stream, codec)
stack.callback(opj2.image_destroy, raw_image)
if self._dparams.nb_tile_to_decode:
opj2.get_decoded_tile(codec, stream, raw_image,
self._dparams.tile_index)
else:
opj2.set_decode_area(codec, raw_image,
self._dparams.DA_x0, self._dparams.DA_y0,
self._dparams.DA_x1, self._dparams.DA_y1)
opj2.decode(codec, stream, raw_image)
opj2.end_decompress(codec, stream)
image = self._extract_image(raw_image)
return image | Read a JPEG 2000 image using libopenjp2.
Returns
-------
ndarray or lst
Either the image as an ndarray or a list of ndarrays, each item
corresponding to one band. |
def get_fd(file_or_fd, default=None):
"""Helper function for getting a file descriptor."""
fd = file_or_fd
if fd is None:
fd = default
if hasattr(fd, "fileno"):
fd = fd.fileno()
return fd | Helper function for getting a file descriptor. |
def deleteNetworkVisualProp(self, networkId, viewId, visualProperty, verbose=None):
"""
Deletes the bypass Visual Property specificed by the `visualProperty`, `viewId`, and `networkId` parameters. When this is done, the Visual Property will be defined by the Visual Style
Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html)
:param networkId: SUID of the Network
:param viewId: SUID of the Network View
:param visualProperty: Name of the Visual Property
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/views/'+str(viewId)+'/network/'+str(visualProperty)+'/bypass', method="DELETE", verbose=verbose)
return response | Deletes the bypass Visual Property specificed by the `visualProperty`, `viewId`, and `networkId` parameters. When this is done, the Visual Property will be defined by the Visual Style
Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html)
:param networkId: SUID of the Network
:param viewId: SUID of the Network View
:param visualProperty: Name of the Visual Property
:param verbose: print more
:returns: 200: successful operation |
def to_struct(self, value):
"""Cast `date` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.strftime(self.default_format) | Cast `date` object to string. |
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None) | Makes an aware datetime.datetime naive in a given time zone. |
def get_metric_values(self, group_name):
"""
Get the faked metric values for a metric group, by its metric group
name.
The result includes all metric object values added earlier for that
metric group name, using
:meth:`~zhmcclient.FakedMetricsContextManager.add_metric_object_values`
i.e. the metric values for all resources and all points in time that
were added.
Parameters:
group_name (:term:`string`): Name of the metric group.
Returns:
iterable of :class:`~zhmclient.FakedMetricObjectValues`: The metric
values for that metric group, in the order they had been added.
Raises:
ValueError: Metric values for this group name do not exist.
"""
if group_name not in self._metric_values:
raise ValueError("Metric values for this group name do not "
"exist: {}".format(group_name))
return self._metric_values[group_name] | Get the faked metric values for a metric group, by its metric group
name.
The result includes all metric object values added earlier for that
metric group name, using
:meth:`~zhmcclient.FakedMetricsContextManager.add_metric_object_values`
i.e. the metric values for all resources and all points in time that
were added.
Parameters:
group_name (:term:`string`): Name of the metric group.
Returns:
iterable of :class:`~zhmclient.FakedMetricObjectValues`: The metric
values for that metric group, in the order they had been added.
Raises:
ValueError: Metric values for this group name do not exist. |
def _SendTerminationMessage(self, status=None):
"""This notifies the parent flow of our termination."""
if not self.runner_args.request_state.session_id:
# No parent flow, nothing to do here.
return
if status is None:
status = rdf_flows.GrrStatus()
client_resources = self.context.client_resources
user_cpu = client_resources.cpu_usage.user_cpu_time
sys_cpu = client_resources.cpu_usage.system_cpu_time
status.cpu_time_used.user_cpu_time = user_cpu
status.cpu_time_used.system_cpu_time = sys_cpu
status.network_bytes_sent = self.context.network_bytes_sent
status.child_session_id = self.session_id
request_state = self.runner_args.request_state
request_state.response_count += 1
# Make a response message
msg = rdf_flows.GrrMessage(
session_id=request_state.session_id,
request_id=request_state.id,
response_id=request_state.response_count,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
type=rdf_flows.GrrMessage.Type.STATUS,
payload=status)
# Queue the response now
self.queue_manager.QueueResponse(msg)
self.QueueNotification(session_id=request_state.session_id) | This notifies the parent flow of our termination. |
def get_per_identity_records(self, events: Iterable, data_processor: DataProcessor
) -> Generator[Tuple[str, TimeAndRecord], None, None]:
"""
Uses the given iteratable events and the data processor convert the event into a list of
Records along with its identity and time.
:param events: iteratable events.
:param data_processor: DataProcessor to process each event in events.
:return: yields Tuple[Identity, TimeAndRecord] for all Records in events,
"""
schema_loader = SchemaLoader()
stream_bts_name = schema_loader.add_schema_spec(self._stream_bts)
stream_transformer_schema: StreamingTransformerSchema = schema_loader.get_schema_object(
stream_bts_name)
for event in events:
try:
for record in data_processor.process_data(event):
try:
id = stream_transformer_schema.get_identity(record)
time = stream_transformer_schema.get_time(record)
yield (id, (time, record))
except Exception as err:
logging.error('{} in parsing Record {}.'.format(err, record))
except Exception as err:
logging.error('{} in parsing Event {}.'.format(err, event)) | Uses the given iteratable events and the data processor convert the event into a list of
Records along with its identity and time.
:param events: iteratable events.
:param data_processor: DataProcessor to process each event in events.
:return: yields Tuple[Identity, TimeAndRecord] for all Records in events, |
def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name) | 对指定的图片进行性能测试. |
def serialize(self, queryset, **options):
"""
Serialize a queryset.
THE OUTPUT OF THIS SERIALIZER IS NOT MEANT TO BE SERIALIZED BACK
INTO THE DB.
"""
self.options = options
self.stream = options.get("stream", StringIO())
self.selected_fields = options.get("fields")
self.use_natural_keys = options.get("use_natural_keys", True)
self.xml = options.get("xml", None)
self.root = (self.xml == None)
self.start_serialization()
for obj in queryset:
# hook for having custom serialization
if hasattr(obj, '__serialize__'):
obj.__serialize__(self.xml)
else:
self.serialize_object(obj)
self.end_serialization()
return self.getvalue() | Serialize a queryset.
THE OUTPUT OF THIS SERIALIZER IS NOT MEANT TO BE SERIALIZED BACK
INTO THE DB. |
def _compute_distance(self, rup, dists, C):
"""
Compute the distance function, equation (9):
"""
mref = 3.6
rref = 1.0
rval = np.sqrt(dists.rhypo ** 2 + C['h'] ** 2)
return (C['c1'] + C['c2'] * (rup.mag - mref)) *\
np.log10(rval / rref) + C['c3'] * (rval - rref) | Compute the distance function, equation (9): |
def _are_safety_checks_disabled(self, caller=u"unknown_function"):
"""
Return ``True`` if safety checks are disabled.
:param string caller: the name of the caller function
:rtype: bool
"""
if self.rconf.safety_checks:
return False
self.log_warn([u"Safety checks disabled => %s passed", caller])
return True | Return ``True`` if safety checks are disabled.
:param string caller: the name of the caller function
:rtype: bool |
def build_package(team, username, package, subpath, yaml_path,
checks_path=None, dry_run=False, env='default'):
"""
Builds a package from a given Yaml file and installs it locally.
Returns the name of the package.
"""
def find(key, value):
"""
find matching nodes recursively;
only descend iterables that aren't strings
"""
if isinstance(value, Iterable) and not isinstance(value, string_types):
for k, v in iteritems(value):
if k == key:
yield v
elif isinstance(v, dict):
for result in find(key, v):
yield result
elif isinstance(v, list):
for item in v:
for result in find(key, item):
yield result
build_data = load_yaml(yaml_path)
# default to 'checks.yml' if build.yml contents: contains checks, but
# there's no inlined checks: defined by build.yml
if (checks_path is None and list(find('checks', build_data['contents'])) and
'checks' not in build_data):
checks_path = 'checks.yml'
checks_contents = load_yaml(checks_path, optional=True)
elif checks_path is not None:
checks_contents = load_yaml(checks_path)
else:
checks_contents = None
build_package_from_contents(team, username, package, subpath, os.path.dirname(yaml_path), build_data,
checks_contents=checks_contents, dry_run=dry_run, env=env) | Builds a package from a given Yaml file and installs it locally.
Returns the name of the package. |
def get_value(self, section, option, default=None):
"""
:param default:
If not None, the given default value will be returned in case
the option did not exist
:return: a properly typed value, either int, float or string
:raise TypeError: in case the value could not be understood
Otherwise the exceptions known to the ConfigParser will be raised."""
try:
valuestr = self.get(section, option)
except Exception:
if default is not None:
return default
raise
types = (int, float)
for numtype in types:
try:
val = numtype(valuestr)
# truncated value ?
if val != float(valuestr):
continue
return val
except (ValueError, TypeError):
continue
# END for each numeric type
# try boolean values as git uses them
vl = valuestr.lower()
if vl == 'false':
return False
if vl == 'true':
return True
if not isinstance(valuestr, string_types):
raise TypeError("Invalid value type: only int, long, float and str are allowed", valuestr)
return valuestr | :param default:
If not None, the given default value will be returned in case
the option did not exist
:return: a properly typed value, either int, float or string
:raise TypeError: in case the value could not be understood
Otherwise the exceptions known to the ConfigParser will be raised. |
def configure(self, viewport=None, fbo_size=None, fbo_rect=None,
canvas=None):
"""Automatically configure the TransformSystem:
* canvas_transform maps from the Canvas logical pixel
coordinate system to the framebuffer coordinate system, taking into
account the logical/physical pixel scale factor, current FBO
position, and y-axis inversion.
* framebuffer_transform maps from the current GL viewport on the
framebuffer coordinate system to clip coordinates (-1 to 1).
Parameters
==========
viewport : tuple or None
The GL viewport rectangle (x, y, w, h). If None, then it
is assumed to cover the entire canvas.
fbo_size : tuple or None
The size of the active FBO. If None, then it is assumed to have the
same size as the canvas's framebuffer.
fbo_rect : tuple or None
The position and size (x, y, w, h) of the FBO in the coordinate
system of the canvas's framebuffer. If None, then the bounds are
assumed to cover the entire active framebuffer.
canvas : Canvas instance
Optionally set the canvas for this TransformSystem. See the
`canvas` property.
"""
# TODO: check that d2f and f2r transforms still contain a single
# STTransform (if the user has modified these, then auto-config should
# either fail or replace the transforms)
if canvas is not None:
self.canvas = canvas
canvas = self._canvas
if canvas is None:
raise RuntimeError("No canvas assigned to this TransformSystem.")
# By default, this should invert the y axis--canvas origin is in top
# left, whereas framebuffer origin is in bottom left.
map_from = [(0, 0), canvas.size]
map_to = [(0, canvas.physical_size[1]), (canvas.physical_size[0], 0)]
self._canvas_transform.transforms[1].set_mapping(map_from, map_to)
if fbo_rect is None:
self._canvas_transform.transforms[0].scale = (1, 1, 1)
self._canvas_transform.transforms[0].translate = (0, 0, 0)
else:
# Map into FBO coordinates
map_from = [(fbo_rect[0], fbo_rect[1]),
(fbo_rect[0] + fbo_rect[2], fbo_rect[1] + fbo_rect[3])]
map_to = [(0, 0), fbo_size]
self._canvas_transform.transforms[0].set_mapping(map_from, map_to)
if viewport is None:
if fbo_size is None:
# viewport covers entire canvas
map_from = [(0, 0), canvas.physical_size]
else:
# viewport covers entire FBO
map_from = [(0, 0), fbo_size]
else:
map_from = [viewport[:2],
(viewport[0] + viewport[2], viewport[1] + viewport[3])]
map_to = [(-1, -1), (1, 1)]
self._framebuffer_transform.transforms[0].set_mapping(map_from, map_to) | Automatically configure the TransformSystem:
* canvas_transform maps from the Canvas logical pixel
coordinate system to the framebuffer coordinate system, taking into
account the logical/physical pixel scale factor, current FBO
position, and y-axis inversion.
* framebuffer_transform maps from the current GL viewport on the
framebuffer coordinate system to clip coordinates (-1 to 1).
Parameters
==========
viewport : tuple or None
The GL viewport rectangle (x, y, w, h). If None, then it
is assumed to cover the entire canvas.
fbo_size : tuple or None
The size of the active FBO. If None, then it is assumed to have the
same size as the canvas's framebuffer.
fbo_rect : tuple or None
The position and size (x, y, w, h) of the FBO in the coordinate
system of the canvas's framebuffer. If None, then the bounds are
assumed to cover the entire active framebuffer.
canvas : Canvas instance
Optionally set the canvas for this TransformSystem. See the
`canvas` property. |
def weather_history_at_id(self, id, start=None, end=None):
"""
Queries the OWM Weather API for weather history for the specified city ID.
A list of *Weather* objects is returned. It is possible to query for
weather history in a closed time period, whose boundaries can be passed
as optional parameters.
:param id: the city ID
:type id: int
:param start: the object conveying the time value for the start query
boundary (defaults to ``None``)
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param end: the object conveying the time value for the end query
boundary (defaults to ``None``)
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:returns: a list of *Weather* instances or ``None`` if history data is
not available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if the time boundaries are not in the correct
chronological order, if one of the time boundaries is not ``None``
and the other is or if one or both of the time boundaries are after
the current time
"""
assert type(id) is int, "'id' must be an int"
if id < 0:
raise ValueError("'id' value must be greater than 0")
params = {'id': id, 'lang': self._language}
if start is None and end is None:
pass
elif start is not None and end is not None:
unix_start = timeformatutils.to_UNIXtime(start)
unix_end = timeformatutils.to_UNIXtime(end)
if unix_start >= unix_end:
raise ValueError("Error: the start time boundary must " \
"precede the end time!")
current_time = time()
if unix_start > current_time:
raise ValueError("Error: the start time boundary must " \
"precede the current time!")
params['start'] = str(unix_start)
params['end'] = str(unix_end)
else:
raise ValueError("Error: one of the time boundaries is None, " \
"while the other is not!")
uri = http_client.HttpClient.to_url(CITY_WEATHER_HISTORY_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['weather_history'].parse_JSON(json_data) | Queries the OWM Weather API for weather history for the specified city ID.
A list of *Weather* objects is returned. It is possible to query for
weather history in a closed time period, whose boundaries can be passed
as optional parameters.
:param id: the city ID
:type id: int
:param start: the object conveying the time value for the start query
boundary (defaults to ``None``)
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param end: the object conveying the time value for the end query
boundary (defaults to ``None``)
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:returns: a list of *Weather* instances or ``None`` if history data is
not available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if the time boundaries are not in the correct
chronological order, if one of the time boundaries is not ``None``
and the other is or if one or both of the time boundaries are after
the current time |
def _prepare_data_dir(self, data):
"""Prepare destination directory where the data will live.
:param data: The :class:`~resolwe.flow.models.Data` object for
which to prepare the private execution directory.
:return: The prepared data directory path.
:rtype: str
"""
logger.debug(__("Preparing data directory for Data with id {}.", data.id))
with transaction.atomic():
# Create a temporary random location and then override it with data
# location id since object has to be created first.
# TODO Find a better solution, e.g. defer the database constraint.
temporary_location_string = uuid.uuid4().hex[:10]
data_location = DataLocation.objects.create(subpath=temporary_location_string)
data_location.subpath = str(data_location.id)
data_location.save()
data_location.data.add(data)
output_path = self._get_per_data_dir('DATA_DIR', data_location.subpath)
dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('DATA_DIR_MODE', 0o755)
os.mkdir(output_path, mode=dir_mode)
# os.mkdir is not guaranteed to set the given mode
os.chmod(output_path, dir_mode)
return output_path | Prepare destination directory where the data will live.
:param data: The :class:`~resolwe.flow.models.Data` object for
which to prepare the private execution directory.
:return: The prepared data directory path.
:rtype: str |
def omitted_parcov(self):
"""get the omitted prior parameter covariance matrix
Returns
-------
omitted_parcov : pyemu.Cov
Note
----
returns a reference
If ErrorVariance.__omitted_parcov is None,
attribute is dynamically loaded
"""
if self.__omitted_parcov is None:
self.log("loading omitted_parcov")
self.__load_omitted_parcov()
self.log("loading omitted_parcov")
return self.__omitted_parcov | get the omitted prior parameter covariance matrix
Returns
-------
omitted_parcov : pyemu.Cov
Note
----
returns a reference
If ErrorVariance.__omitted_parcov is None,
attribute is dynamically loaded |
def stylesheet_declarations(string, is_merc=False, scale=1):
""" Parse a string representing a stylesheet into a list of declarations.
Required boolean is_merc indicates whether the projection should
be interpreted as spherical mercator, so we know what to do with
zoom/scale-denominator in parse_rule().
"""
# everything is display: map by default
display_map = Declaration(Selector(SelectorElement(['*'], [])),
Property('display'), Value('map', False),
(False, (0, 0, 0), (0, 0)))
declarations = [display_map]
tokens = cssTokenizer().tokenize(string)
variables = {}
while True:
try:
for declaration in parse_rule(tokens, variables, [], [], is_merc):
if scale != 1:
declaration.scaleBy(scale)
declarations.append(declaration)
except StopIteration:
break
# sort by a css-like method
return sorted(declarations, key=operator.attrgetter('sort_key')) | Parse a string representing a stylesheet into a list of declarations.
Required boolean is_merc indicates whether the projection should
be interpreted as spherical mercator, so we know what to do with
zoom/scale-denominator in parse_rule(). |
def add_infos(self, *keyvals, **kwargs):
"""Adds the given info and returns a dict composed of just this added info."""
kv_pairs = []
for key, val in keyvals:
key = key.strip()
val = str(val).strip()
if ':' in key:
raise ValueError('info key "{}" must not contain a colon.'.format(key))
kv_pairs.append((key, val))
for k, v in kv_pairs:
if k in self._info:
raise ValueError('info key "{}" already exists with value {}. '
'Cannot add it again with value {}.'.format(k, self._info[k], v))
self._info[k] = v
try:
with open(self._info_file, 'a') as outfile:
for k, v in kv_pairs:
outfile.write('{}: {}\n'.format(k, v))
except IOError:
if not kwargs.get('ignore_errors', False):
raise | Adds the given info and returns a dict composed of just this added info. |
def write(models, out=None, base=None, propertybase=None, shorteners=None, logger=logging):
'''
models - input Versa models from which output is generated. Must be a sequence
object, not an iterator
'''
assert out is not None #Output stream required
if not isinstance(models, list): models = [models]
shorteners = shorteners or {}
all_propertybase = [propertybase] if propertybase else []
all_propertybase.append(VERSA_BASEIRI)
if any((base, propertybase, shorteners)):
out.write('# @docheader\n\n* @iri:\n')
if base:
out.write(' * @base: {0}'.format(base))
#for k, v in shorteners:
# out.write(' * @base: {0}'.format(base))
out.write('\n\n')
origin_space = set()
#base_out = models[0].base
for m in models:
origin_space.update(all_origins(m))
for o in origin_space:
out.write('# {0}\n\n'.format(o))
for o_, r, t, a in m.match(o):
abbr_r = abbreviate(r, all_propertybase)
value_format(t)
out.write('* {0}: {1}\n'.format(abbr_r, value_format(t)))
for k, v in a.items():
abbr_k = abbreviate(k, all_propertybase)
out.write(' * {0}: {1}\n'.format(k, value_format(v)))
out.write('\n')
return | models - input Versa models from which output is generated. Must be a sequence
object, not an iterator |
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return 255 | Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations. |
def nvmlUnitGetCount():
r"""
/**
* Retrieves the number of units in the system.
*
* For S-class products.
*
* @param unitCount Reference in which to return the number of units
*
* @return
* - \ref NVML_SUCCESS if \a unitCount has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetCount
"""
"""
/**
* Retrieves the number of units in the system.
*
* For S-class products.
*
* @param unitCount Reference in which to return the number of units
*
* @return
* - \ref NVML_SUCCESS if \a unitCount has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
"""
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlUnitGetCount")
ret = fn(byref(c_count))
_nvmlCheckReturn(ret)
return bytes_to_str(c_count.value) | r"""
/**
* Retrieves the number of units in the system.
*
* For S-class products.
*
* @param unitCount Reference in which to return the number of units
*
* @return
* - \ref NVML_SUCCESS if \a unitCount has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlUnitGetCount |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.