code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _position_encoding_init(max_length, dim):
"""Init the sinusoid position encoding table """
position_enc = np.arange(max_length).reshape((-1, 1)) \
/ (np.power(10000, (2. / dim) * np.arange(dim).reshape((1, -1))))
# Apply the cosine to even columns and sin to odds.
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1
return position_enc | Init the sinusoid position encoding table |
def get_nift_values() -> Mapping[str, str]:
"""Extract the list of NIFT names from the BEL resource and builds a dictionary mapping from the lowercased version
to the uppercase version.
"""
r = get_bel_resource(NIFT)
return {
name.lower(): name
for name in r['Values']
} | Extract the list of NIFT names from the BEL resource and builds a dictionary mapping from the lowercased version
to the uppercase version. |
def redo(self, channel, image):
"""Add an entry with image modification info."""
chname = channel.name
if image is None:
# shouldn't happen, but let's play it safe
return
imname = image.get('name', 'none')
iminfo = channel.get_image_info(imname)
timestamp = iminfo.time_modified
if timestamp is None:
reason = iminfo.get('reason_modified', None)
if reason is not None:
self.fv.show_error(
"{0} invoked 'modified' callback to ChangeHistory with a "
"reason but without a timestamp. The plugin invoking the "
"callback is no longer be compatible with Ginga. "
"Please contact plugin developer to update the plugin "
"to use self.fv.update_image_info() like Mosaic "
"plugin.".format(imname))
# Image somehow lost its history
self.remove_image_info_cb(self.fv, channel, iminfo)
return
self.add_entry(chname, iminfo) | Add an entry with image modification info. |
def attr_string(filterKeys=(), filterValues=(), **kwargs):
"""Build a string consisting of 'key=value' substrings for each keyword
argument in :kwargs:
@param filterKeys: list of key names to ignore
@param filterValues: list of values to ignore (e.g. None will ignore all
key=value pairs that has that value.
"""
return ', '.join([str(k)+'='+repr(v) for k, v in kwargs.items()
if k not in filterKeys and v not in filterValues]) | Build a string consisting of 'key=value' substrings for each keyword
argument in :kwargs:
@param filterKeys: list of key names to ignore
@param filterValues: list of values to ignore (e.g. None will ignore all
key=value pairs that has that value. |
def purge_tokens(self, input_token_attrs=None):
""" Removes all specified token_attrs that exist in instance.token_attrs
:param token_attrs: list(str), list of string values of tokens to remove. If None, removes all
"""
if input_token_attrs is None:
remove_attrs = self.token_attrs
else:
remove_attrs = [token_attr for token_attr in self.token_attrs if token_attr.token in input_token_attrs]
self.token_attrs = [token_attr for token_attr in self.token_attrs if token_attr not in remove_attrs] | Removes all specified token_attrs that exist in instance.token_attrs
:param token_attrs: list(str), list of string values of tokens to remove. If None, removes all |
def fetch_and_index(self, fetch_func):
"Fetch data with func, return dict indexed by ID"
data, e = fetch_func()
if e: raise e
yield {row['id']: row for row in data} | Fetch data with func, return dict indexed by ID |
def search_all(self, quota=50, format='json'):
'''
Returns a single list containing up to 'limit' Result objects
Will keep requesting until quota is met
Will also truncate extra results to return exactly the given quota
'''
quota_left = quota
results = []
while quota_left > 0:
more_results = self._search(quota_left, format)
if not more_results:
break
results += more_results
quota_left = quota_left - len(more_results)
time.sleep(1)
results = results[0:quota]
return results | Returns a single list containing up to 'limit' Result objects
Will keep requesting until quota is met
Will also truncate extra results to return exactly the given quota |
def keyPressEvent(self, event):
"""
Listen for the delete key and check to see if this should auto
set the remove property on the object.
:param event | <QKeyPressEvent>
"""
# tag the item for deletion
if self.useDefaultKeystrokes() and self.isEditable():
if event.key() == Qt.Key_Delete:
for item in self.selectedItems():
item.setRecordState(XOrbRecordItem.State.Removed)
# save/commit to the database
elif event.key() == Qt.Key_S and\
event.modifiers() == Qt.ControlModifier:
self.commit()
super(XOrbTreeWidget, self).keyPressEvent(event) | Listen for the delete key and check to see if this should auto
set the remove property on the object.
:param event | <QKeyPressEvent> |
def _get_log_entries(self) -> List[Tuple[int, bytes, List[int], bytes]]:
"""
Return the log entries for this computation and its children.
They are sorted in the same order they were emitted during the transaction processing, and
include the sequential counter as the first element of the tuple representing every entry.
"""
if self.is_error:
return []
else:
return sorted(itertools.chain(
self._log_entries,
*(child._get_log_entries() for child in self.children)
)) | Return the log entries for this computation and its children.
They are sorted in the same order they were emitted during the transaction processing, and
include the sequential counter as the first element of the tuple representing every entry. |
def process_module(self, node):
"""Process the astroid node stream."""
if self.config.file_header:
if sys.version_info[0] < 3:
pattern = re.compile(
'\A' + self.config.file_header, re.LOCALE | re.MULTILINE)
else:
# The use of re.LOCALE is discouraged in python 3
pattern = re.compile(
'\A' + self.config.file_header, re.MULTILINE)
content = None
with node.stream() as stream:
# Explicit decoding required by python 3
content = stream.read().decode('utf-8')
matches = pattern.findall(content)
if len(matches) != 1:
self.add_message('invalid-file-header', 1,
args=self.config.file_header) | Process the astroid node stream. |
def validate_cmap(val):
"""Validate a colormap
Parameters
----------
val: str or :class:`mpl.colors.Colormap`
Returns
-------
str or :class:`mpl.colors.Colormap`
Raises
------
ValueError"""
from matplotlib.colors import Colormap
try:
return validate_str(val)
except ValueError:
if not isinstance(val, Colormap):
raise ValueError(
"Could not find a valid colormap!")
return val | Validate a colormap
Parameters
----------
val: str or :class:`mpl.colors.Colormap`
Returns
-------
str or :class:`mpl.colors.Colormap`
Raises
------
ValueError |
def f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1,
training=True, bottleneck=True, padding='SAME'):
"""Applies residual function for RevNet.
Args:
x: input tensor
depth1: Number of output channels for the first and second conv layers.
depth2: Number of output channels for the third conv layer.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the first conv filter. Note that this particular
RevNet architecture only varies the stride for the first conv
filter. The stride for the second conv filter is always set to 1.
training: True for train phase, False for eval phase.
bottleneck: If true, apply bottleneck 1x1 down/up sampling.
padding: Padding for each conv layer.
Returns:
Output tensor after applying residual function for RevNet.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope('f', reuse=tf.AUTO_REUSE):
if first_batch_norm:
net = tf.layers.batch_normalization(x, training=training)
net = tf.nn.relu(net)
else:
net = x
if bottleneck:
net = conv(net, depth1, 1, strides=stride,
padding=padding, activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = conv(net, depth1, 3, strides=1,
padding=padding, activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
net = conv(net, depth2, 1, strides=1,
padding=padding, activation=None)
else:
net = conv(net, depth2, 3, strides=stride,
padding=padding, activation=None)
net = tf.layers.batch_normalization(x, training=training)
net = tf.nn.relu(net)
net = conv(net, depth2, 3, strides=stride,
padding=padding, activation=None)
return net | Applies residual function for RevNet.
Args:
x: input tensor
depth1: Number of output channels for the first and second conv layers.
depth2: Number of output channels for the third conv layer.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
first_batch_norm: Whether to keep the first batch norm layer or not.
Typically used in the first RevNet block.
stride: Stride for the first conv filter. Note that this particular
RevNet architecture only varies the stride for the first conv
filter. The stride for the second conv filter is always set to 1.
training: True for train phase, False for eval phase.
bottleneck: If true, apply bottleneck 1x1 down/up sampling.
padding: Padding for each conv layer.
Returns:
Output tensor after applying residual function for RevNet. |
def build(image, build_path, tag=None, build_args=None, fromline=None, args=[]):
""" build a docker image"""
if tag:
image = ":".join([image, tag])
bdir = tempfile.mkdtemp()
os.system('cp -r {0:s}/* {1:s}'.format(build_path, bdir))
if build_args:
stdw = tempfile.NamedTemporaryFile(dir=bdir, mode='w')
with open("{}/Dockerfile".format(bdir)) as std:
dfile = std.readlines()
for line in dfile:
if fromline and line.lower().startswith('from'):
stdw.write('FROM {:s}\n'.format(fromline))
elif line.lower().startswith("cmd"):
for arg in build_args:
stdw.write(arg+"\n")
stdw.write(line)
else:
stdw.write(line)
stdw.flush()
utils.xrun("docker build", args+["--force-rm","-f", stdw.name,
"-t", image,
bdir])
stdw.close()
else:
utils.xrun("docker build", args+["--force-rm", "-t", image,
bdir])
os.system('rm -rf {:s}'.format(bdir)) | build a docker image |
def get_initial_arguments(request, cache_id=None):
'Extract initial arguments for the dash app'
if cache_id is None:
return None
if initial_argument_location():
return cache.get(cache_id)
return request.session[cache_id] | Extract initial arguments for the dash app |
def get_coord_box(centre_x, centre_y, distance):
"""Get the square boundary coordinates for a given centre and distance"""
"""Todo: return coordinates inside a circle, rather than a square"""
return {
'top_left': (centre_x - distance, centre_y + distance),
'top_right': (centre_x + distance, centre_y + distance),
'bottom_left': (centre_x - distance, centre_y - distance),
'bottom_right': (centre_x + distance, centre_y - distance),
} | Get the square boundary coordinates for a given centre and distance |
def resolve_dependencies(self):
""" evaluate each of the data dependencies of this build target,
returns the resulting dict"""
return dict(
[((key, self.data_dependencies[key])
if type(self.data_dependencies[key]) != DeferredDependency
else (key, self.data_dependencies[key].resolve()))
for key in self.data_dependencies]) | evaluate each of the data dependencies of this build target,
returns the resulting dict |
def plot_color_legend(legend, horizontal=False, ax=None):
"""
Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.array([np.array([x for x in legend])])
if ax is None:
fig, ax = plt.subplots(1, 1)
if horizontal:
ax.imshow(t, interpolation='none')
ax.set_yticks([])
ax.set_xticks(np.arange(0, legend.shape[0]))
t = ax.set_xticklabels(legend.index)
else:
t = t.reshape([legend.shape[0], 1, 3])
ax.imshow(t, interpolation='none')
ax.set_xticks([])
ax.set_yticks(np.arange(0, legend.shape[0]))
t = ax.set_yticklabels(legend.index)
return ax | Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis. |
def handle_legacy_tloc(line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Handle translocations that lack the ``fromLoc`` and ``toLoc`` entries."""
log.log(5, 'legacy translocation statement: %s [%d]', line, position)
return tokens | Handle translocations that lack the ``fromLoc`` and ``toLoc`` entries. |
def check_fam_for_samples(required_samples, source, gold):
"""Check fam files for required_samples."""
# Checking the source panel
source_samples = set()
with open(source, 'r') as input_file:
for line in input_file:
sample = tuple(line.rstrip("\r\n").split(" ")[:2])
if sample in required_samples:
source_samples.add(sample)
# Checking the gold standard
gold_samples = set()
with open(gold, 'r') as input_file:
for line in input_file:
sample = tuple(line.rstrip("\r\n").split(" ")[:2])
if sample in required_samples:
gold_samples.add(sample)
# Checking if we got everything
logger.info(" - Found {} samples in source panel".format(
len(source_samples),
))
logger.info(" - Found {} samples in gold standard".format(
len(gold_samples),
))
if len(required_samples - (source_samples | gold_samples)) != 0:
return False
else:
return True | Check fam files for required_samples. |
def pipe_privateinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for some text and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : text
"""
value = utils.get_input(context, conf)
while True:
yield value | An input that prompts the user for some text and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : text |
def staticEval(self):
"""
Recursively statistically evaluate result of this operator
"""
for o in self.operands:
o.staticEval()
self.result._val = self.evalFn() | Recursively statistically evaluate result of this operator |
def match_score(self, supported: 'Language') -> int:
"""
Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported. This method
returns a number from 0 to 100 indicating how similar the supported
language is (higher numbers are better). This is not a symmetric
relation.
The algorithm here is described (badly) in a Unicode technical report
at http://unicode.org/reports/tr35/#LanguageMatching. If you find these
results bothersome, take it up with Unicode, unless it's particular
tweaks we implemented such as macrolanguage matching.
See :func:`tag_match_score` for a function that works on strings,
instead of requiring you to instantiate Language objects first.
Further documentation and examples appear with that function.
"""
if supported == self:
return 100
desired_complete = self.prefer_macrolanguage().maximize()
supported_complete = supported.prefer_macrolanguage().maximize()
desired_triple = (desired_complete.language, desired_complete.script, desired_complete.region)
supported_triple = (supported_complete.language, supported_complete.script, supported_complete.region)
return 100 - raw_distance(desired_triple, supported_triple) | Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported. This method
returns a number from 0 to 100 indicating how similar the supported
language is (higher numbers are better). This is not a symmetric
relation.
The algorithm here is described (badly) in a Unicode technical report
at http://unicode.org/reports/tr35/#LanguageMatching. If you find these
results bothersome, take it up with Unicode, unless it's particular
tweaks we implemented such as macrolanguage matching.
See :func:`tag_match_score` for a function that works on strings,
instead of requiring you to instantiate Language objects first.
Further documentation and examples appear with that function. |
def xyzlabel(labelx, labely, labelz):
"""Set all labels at once."""
xlabel(labelx)
ylabel(labely)
zlabel(labelz) | Set all labels at once. |
def lookup(self, topic):
"""Returns producers for a topic."""
nsq.assert_valid_topic_name(topic)
return self._request('GET', '/lookup', fields={'topic': topic}) | Returns producers for a topic. |
def sort(self):
"""Sort by detection time.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> family[0].detect_time
UTCDateTime(1970, 1, 1, 0, 3, 20)
>>> family.sort()[0].detect_time
UTCDateTime(1970, 1, 1, 0, 0)
"""
self.detections = sorted(self.detections, key=lambda d: d.detect_time)
return self | Sort by detection time.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0) + 200,
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> family[0].detect_time
UTCDateTime(1970, 1, 1, 0, 3, 20)
>>> family.sort()[0].detect_time
UTCDateTime(1970, 1, 1, 0, 0) |
def fill_rect(self, rect):
"""Fill a rectangle on the current rendering target with the drawing color.
Args:
rect (Rect): The destination rectangle, or None to fill the entire rendering target.
Raises:
SDLError: If an error is encountered.
"""
check_int_err(lib.SDL_RenderFillRect(self._ptr, rect._ptr)) | Fill a rectangle on the current rendering target with the drawing color.
Args:
rect (Rect): The destination rectangle, or None to fill the entire rendering target.
Raises:
SDLError: If an error is encountered. |
def persist_booking(booking, user):
"""
Ties an in-progress booking from a session to a user when the user logs in.
If we don't do this, the booking will be lost, because on a login, the
old session will be deleted and a new one will be created. Since the
booking has a FK to the session, it would be deleted as well when the user
logs in.
We assume that a user can only have one booking that is in-progress.
Therefore we will delete any existing in-progress bookings of this user
before tying the one from the session to the user.
TODO: Find a more generic solution for this, as this assumes that there is
a status called inprogress and that a user can only have one such booking.
:param booking: The booking that should be tied to the user.
:user: The user the booking should be tied to.
"""
if booking is not None:
existing_bookings = Booking.objects.filter(
user=user, booking_status__slug='inprogress').exclude(
pk=booking.pk)
existing_bookings.delete()
booking.session = None
booking.user = user
booking.save() | Ties an in-progress booking from a session to a user when the user logs in.
If we don't do this, the booking will be lost, because on a login, the
old session will be deleted and a new one will be created. Since the
booking has a FK to the session, it would be deleted as well when the user
logs in.
We assume that a user can only have one booking that is in-progress.
Therefore we will delete any existing in-progress bookings of this user
before tying the one from the session to the user.
TODO: Find a more generic solution for this, as this assumes that there is
a status called inprogress and that a user can only have one such booking.
:param booking: The booking that should be tied to the user.
:user: The user the booking should be tied to. |
def resolve_args(self, args):
"""
Resolve function call arguments that have object ids
into instances of these objects
"""
def resolve(a):
if isinstance(a, dict):
_id = a.get('i', None)
# If it's a compound type (including dict)
# Check if it has id (i) to determine that it has
# a reference in object storage. If it's None, then it's
# a dict originated at the remote
return self.objects[_id] if _id else a['v']
# if array, resolve it's elements
if isinstance(a, (list, tuple)):
return [resolve(i) for i in a]
return a
return [resolve(a) for a in args] | Resolve function call arguments that have object ids
into instances of these objects |
def get_uris(self, base_uri, filter_list=None):
"""Return a set of internal URIs."""
return {
re.sub(r'^/', base_uri, link.attrib['href'])
for link in self.parsedpage.get_nodes_by_selector('a')
if 'href' in link.attrib and (
link.attrib['href'].startswith(base_uri) or
link.attrib['href'].startswith('/')
) and
not is_uri_to_be_filtered(link.attrib['href'], filter_list)
} | Return a set of internal URIs. |
def data(self, data=None):
""" Set response data """
if data is not None:
self.response_model.data = data
return self.response_model.data | Set response data |
def generate_field_spec(row):
""" Generate a set of metadata for each field/column in
the data. This is loosely based on jsontableschema. """
names = set()
fields = []
for cell in row:
name = column_alias(cell, names)
field = {
'name': name,
'title': cell.column,
'type': unicode(cell.type).lower(),
'has_nulls': False,
'has_empty': False,
'samples': []
}
if hasattr(cell.type, 'format'):
field['type'] = 'date'
field['format'] = cell.type.format
fields.append(field)
return fields | Generate a set of metadata for each field/column in
the data. This is loosely based on jsontableschema. |
def execute(self, eopatch):
""" Execute computation of HoG features on input eopatch
:param eopatch: Input eopatch
:type eopatch: eolearn.core.EOPatch
:return: EOPatch instance with new keys holding the HoG features and HoG image for visualisation.
:rtype: eolearn.core.EOPatch
"""
for feature_type, feature_name, new_feature_name in self.feature:
result = self._compute_hog(eopatch[feature_type][feature_name])
eopatch[feature_type][new_feature_name] = result[0]
if self.visualize:
eopatch[feature_type][self.visualize_name] = result[1]
return eopatch | Execute computation of HoG features on input eopatch
:param eopatch: Input eopatch
:type eopatch: eolearn.core.EOPatch
:return: EOPatch instance with new keys holding the HoG features and HoG image for visualisation.
:rtype: eolearn.core.EOPatch |
def findExtname(fimg, extname, extver=None):
"""
Returns the list number of the extension corresponding to EXTNAME given.
"""
i = 0
extnum = None
for chip in fimg:
hdr = chip.header
if 'EXTNAME' in hdr:
if hdr['EXTNAME'].strip() == extname.upper():
if extver is None or hdr['EXTVER'] == extver:
extnum = i
break
i += 1
return extnum | Returns the list number of the extension corresponding to EXTNAME given. |
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5) | Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n |
def emboss_pepstats_parser(infile):
"""Get dictionary of pepstats results.
Args:
infile: Path to pepstats outfile
Returns:
dict: Parsed information from pepstats
TODO:
Only currently parsing the bottom of the file for percentages of properties.
"""
with open(infile) as f:
lines = f.read().split('\n')
info_dict = {}
for l in lines[38:47]:
info = l.split('\t')
cleaninfo = list(filter(lambda x: x != '', info))
prop = cleaninfo[0]
num = cleaninfo[2]
percent = float(cleaninfo[-1]) / float(100)
info_dict['mol_percent_' + prop.lower() + '-pepstats'] = percent
return info_dict | Get dictionary of pepstats results.
Args:
infile: Path to pepstats outfile
Returns:
dict: Parsed information from pepstats
TODO:
Only currently parsing the bottom of the file for percentages of properties. |
def _multiply(self, x1, x2, out):
"""Raw pointwise multiplication of two elements."""
self.tspace._multiply(x1.tensor, x2.tensor, out.tensor) | Raw pointwise multiplication of two elements. |
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
"""
assert "noise_shape" not in kwargs
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs) | Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x. |
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars()) | Display all information sysconfig detains. |
def extension_by_source(source, mime_type):
"Return the file extension used by this plugin"
# TODO: should get this information from the plugin
extension = source.plugin_name
if extension:
return extension
if mime_type:
return mime_type.split("/")[-1] | Return the file extension used by this plugin |
def configure(self, **configs):
"""Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi
"""
configs = self._deprecate_configs(**configs)
self._config = {}
for key in self.DEFAULT_CONFIG:
self._config[key] = configs.pop(key, self.DEFAULT_CONFIG[key])
if configs:
raise KafkaConfigurationError('Unknown configuration key(s): ' +
str(list(configs.keys())))
if self._config['auto_commit_enable']:
if not self._config['group_id']:
raise KafkaConfigurationError(
'KafkaConsumer configured to auto-commit '
'without required consumer group (group_id)'
)
# Check auto-commit configuration
if self._config['auto_commit_enable']:
logger.info("Configuring consumer to auto-commit offsets")
self._reset_auto_commit()
if not self._config['bootstrap_servers']:
raise KafkaConfigurationError(
'bootstrap_servers required to configure KafkaConsumer'
)
self._client = KafkaClient(
self._config['bootstrap_servers'],
client_id=self._config['client_id'],
timeout=(self._config['socket_timeout_ms'] / 1000.0)
) | Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi |
def update_datetime(value, range = None):
"""
Updates (drifts) a Date value within specified range defined
:param value: a Date value to drift.
:param range: (optional) a range in milliseconds. Default: 10 days
:return: an updated DateTime value.
"""
range = range if range != None else 10
if range < 0:
return value
days = RandomFloat.next_float(-range, range)
return value + datetime.timedelta(days) | Updates (drifts) a Date value within specified range defined
:param value: a Date value to drift.
:param range: (optional) a range in milliseconds. Default: 10 days
:return: an updated DateTime value. |
def _from_dict(cls, _dict):
"""Initialize a SpeechRecognitionResults object from a json dictionary."""
args = {}
if 'results' in _dict:
args['results'] = [
SpeechRecognitionResult._from_dict(x)
for x in (_dict.get('results'))
]
if 'result_index' in _dict:
args['result_index'] = _dict.get('result_index')
if 'speaker_labels' in _dict:
args['speaker_labels'] = [
SpeakerLabelsResult._from_dict(x)
for x in (_dict.get('speaker_labels'))
]
if 'warnings' in _dict:
args['warnings'] = _dict.get('warnings')
return cls(**args) | Initialize a SpeechRecognitionResults object from a json dictionary. |
def match_value_to_text(self, text):
"""
this is going to be the tricky bit - probably not possible
to get the 'exact' rating for a value. Will need to do sentiment
analysis of the text to see how it matches the rating. Even that
sounds like it wont work - maybe a ML algorithm would do it, but
that requires a large body of text already matched to values - and
values aren't even defined as far as I have found.
UPDATE - this could work if we assume values can be single words,
eg tax=0.3, freedom=0.7, healthcare=0.3, welfare=0.3 etc
"""
if self.nme in text:
res = 0.8
else:
res = 0.2
return self.nme + ' = ' + str(res) + ' match against ' + text | this is going to be the tricky bit - probably not possible
to get the 'exact' rating for a value. Will need to do sentiment
analysis of the text to see how it matches the rating. Even that
sounds like it wont work - maybe a ML algorithm would do it, but
that requires a large body of text already matched to values - and
values aren't even defined as far as I have found.
UPDATE - this could work if we assume values can be single words,
eg tax=0.3, freedom=0.7, healthcare=0.3, welfare=0.3 etc |
def match(record, config=None):
"""Given a record, yield the records in INSPIRE most similar to it.
This method can be used to detect if a record that we are ingesting as a
submission or as an harvest is already present in the system, or to find
out which record a reference should be pointing to.
"""
if config is None:
current_app.logger.debug('No configuration provided. Falling back to the default configuration.')
config = current_app.config['MATCHER_DEFAULT_CONFIGURATION']
try:
algorithm, doc_type, index = config['algorithm'], config['doc_type'], config['index']
except KeyError as e:
raise KeyError('Malformed configuration: %s.' % repr(e))
source = config.get('source', [])
match_deleted = config.get('match_deleted', False)
collections = config.get('collections')
if not (collections is None or (
isinstance(collections, (list, tuple)) and
all(isinstance(collection, string_types) for collection in collections)
)):
raise ValueError('Malformed collections. Expected a list of strings bug got: %s' % repr(collections))
for i, step in enumerate(algorithm):
try:
queries = step['queries']
except KeyError:
raise KeyError('Malformed algorithm: step %d has no queries.' % i)
validator = _get_validator(step.get('validator'))
for j, query in enumerate(queries):
try:
body = compile(query, record, collections=collections, match_deleted=match_deleted)
except Exception as e:
raise ValueError('Malformed query. Query %d of step %d does not compile: %s.' % (j, i, repr(e)))
if not body:
continue
current_app.logger.debug('Sending ES query: %s' % repr(body))
if source:
result = es.search(index=index, doc_type=doc_type, body=body, _source=source)
else:
result = es.search(index=index, doc_type=doc_type, body=body)
for hit in result['hits']['hits']:
if validator(record, hit):
yield hit | Given a record, yield the records in INSPIRE most similar to it.
This method can be used to detect if a record that we are ingesting as a
submission or as an harvest is already present in the system, or to find
out which record a reference should be pointing to. |
def bool_assignment(arg, patterns=None):
"""
Summary:
Enforces correct bool argment assignment
Arg:
:arg (*): arg which must be interpreted as either bool True or False
Returns:
bool assignment | TYPE: bool
"""
arg = str(arg) # only eval type str
try:
if patterns is None:
patterns = (
(re.compile(r'^(true|false)$', flags=re.IGNORECASE), lambda x: x.lower() == 'true'),
(re.compile(r'^(yes|no)$', flags=re.IGNORECASE), lambda x: x.lower() == 'yes'),
(re.compile(r'^(y|n)$', flags=re.IGNORECASE), lambda x: x.lower() == 'y')
)
if not arg:
return '' # default selected
else:
for pattern, func in patterns:
if pattern.match(arg):
return func(arg)
except Exception as e:
raise e | Summary:
Enforces correct bool argment assignment
Arg:
:arg (*): arg which must be interpreted as either bool True or False
Returns:
bool assignment | TYPE: bool |
def fix_reference_url(url):
"""Used to parse an incorect url to try to fix it with the most common ocurrences for errors.
If the fixed url is still incorrect, it returns ``None``.
Returns:
String containing the fixed url or the original one if it could not be fixed.
"""
new_url = url
new_url = fix_url_bars_instead_of_slashes(new_url)
new_url = fix_url_add_http_if_missing(new_url)
new_url = fix_url_replace_tilde(new_url)
try:
rfc3987.parse(new_url, rule="URI")
return new_url
except ValueError:
return url | Used to parse an incorect url to try to fix it with the most common ocurrences for errors.
If the fixed url is still incorrect, it returns ``None``.
Returns:
String containing the fixed url or the original one if it could not be fixed. |
def _browse(c):
"""
Open build target's index.html in a browser (using 'open').
"""
index = join(c.sphinx.target, c.sphinx.target_file)
c.run("open {0}".format(index)) | Open build target's index.html in a browser (using 'open'). |
def _prepare_script(self, dest_dir, program):
"""Copy the script into the destination directory.
:param dest_dir: The target directory where the script will be
saved.
:param program: The script text to be saved.
:return: The name of the script file.
:rtype: str
"""
script_name = ExecutorFiles.PROCESS_SCRIPT
dest_file = os.path.join(dest_dir, script_name)
with open(dest_file, 'wt') as dest_file_obj:
dest_file_obj.write(program)
os.chmod(dest_file, 0o700)
return script_name | Copy the script into the destination directory.
:param dest_dir: The target directory where the script will be
saved.
:param program: The script text to be saved.
:return: The name of the script file.
:rtype: str |
def mclennan_tourky(g, init=None, epsilon=1e-3, max_iter=200,
full_output=False):
r"""
Find one mixed-action epsilon-Nash equilibrium of an N-player normal
form game by the fixed point computation algorithm by McLennan and
Tourky [1]_.
Parameters
----------
g : NormalFormGame
NormalFormGame instance.
init : array_like(int or array_like(float, ndim=1)), optional
Initial action profile, an array of N objects, where each object
must be an iteger (pure action) or an array of floats (mixed
action). If None, default to an array of zeros (the zero-th
action for each player).
epsilon : scalar(float), optional(default=1e-3)
Value of epsilon-optimality.
max_iter : scalar(int), optional(default=100)
Maximum number of iterations.
full_output : bool, optional(default=False)
If False, only the computed Nash equilibrium is returned. If
True, the return value is `(NE, res)`, where `NE` is the Nash
equilibrium and `res` is a `NashResult` object.
Returns
-------
NE : tuple(ndarray(float, ndim=1))
Tuple of computed Nash equilibrium mixed actions.
res : NashResult
Object containing information about the computation. Returned
only when `full_output` is True. See `NashResult` for details.
Examples
--------
Consider the following version of 3-player "anti-coordination" game,
where action 0 is a safe action which yields payoff 1, while action
1 yields payoff :math:`v` if no other player plays 1 and payoff 0
otherwise:
>>> N = 3
>>> v = 2
>>> payoff_array = np.empty((2,)*n)
>>> payoff_array[0, :] = 1
>>> payoff_array[1, :] = 0
>>> payoff_array[1].flat[0] = v
>>> g = NormalFormGame((Player(payoff_array),)*N)
>>> print(g)
3-player NormalFormGame with payoff profile array:
[[[[ 1., 1., 1.], [ 1., 1., 2.]],
[[ 1., 2., 1.], [ 1., 0., 0.]]],
[[[ 2., 1., 1.], [ 0., 1., 0.]],
[[ 0., 0., 1.], [ 0., 0., 0.]]]]
This game has a unique symmetric Nash equilibrium, where the
equilibrium action is given by :math:`(p^*, 1-p^*)` with :math:`p^*
= 1/v^{1/(N-1)}`:
>>> p_star = 1/(v**(1/(N-1)))
>>> [p_star, 1 - p_star]
[0.7071067811865475, 0.29289321881345254]
Obtain an approximate Nash equilibrium of this game by
`mclennan_tourky`:
>>> epsilon = 1e-5 # Value of epsilon-optimality
>>> NE = mclennan_tourky(g, epsilon=epsilon)
>>> print(NE[0], NE[1], NE[2], sep='\n')
[ 0.70710754 0.29289246]
[ 0.70710754 0.29289246]
[ 0.70710754 0.29289246]
>>> g.is_nash(NE, tol=epsilon)
True
Additional information is returned if `full_output` is set True:
>>> NE, res = mclennan_tourky(g, epsilon=epsilon, full_output=True)
>>> res.converged
True
>>> res.num_iter
18
References
----------
.. [1] A. McLennan and R. Tourky, "From Imitation Games to
Kakutani," 2006.
"""
try:
N = g.N
except:
raise TypeError('g must be a NormalFormGame')
if N < 2:
raise NotImplementedError('Not implemented for 1-player games')
if init is None:
init = (0,) * N
try:
l = len(init)
except TypeError:
raise TypeError('init must be array_like')
if l != N:
raise ValueError(
'init must be of length {N}'.format(N=N)
)
indptr = np.empty(N+1, dtype=int)
indptr[0] = 0
indptr[1:] = np.cumsum(g.nums_actions)
x_init = _flatten_action_profile(init, indptr)
is_approx_fp = lambda x: _is_epsilon_nash(x, g, epsilon, indptr)
x_star, converged, num_iter = \
_compute_fixed_point_ig(_best_response_selection, x_init, max_iter,
verbose=0, print_skip=1,
is_approx_fp=is_approx_fp,
g=g, indptr=indptr)
NE = _get_action_profile(x_star, indptr)
if not full_output:
return NE
res = NashResult(NE=NE,
converged=converged,
num_iter=num_iter,
max_iter=max_iter,
init=init,
epsilon=epsilon)
return NE, res | r"""
Find one mixed-action epsilon-Nash equilibrium of an N-player normal
form game by the fixed point computation algorithm by McLennan and
Tourky [1]_.
Parameters
----------
g : NormalFormGame
NormalFormGame instance.
init : array_like(int or array_like(float, ndim=1)), optional
Initial action profile, an array of N objects, where each object
must be an iteger (pure action) or an array of floats (mixed
action). If None, default to an array of zeros (the zero-th
action for each player).
epsilon : scalar(float), optional(default=1e-3)
Value of epsilon-optimality.
max_iter : scalar(int), optional(default=100)
Maximum number of iterations.
full_output : bool, optional(default=False)
If False, only the computed Nash equilibrium is returned. If
True, the return value is `(NE, res)`, where `NE` is the Nash
equilibrium and `res` is a `NashResult` object.
Returns
-------
NE : tuple(ndarray(float, ndim=1))
Tuple of computed Nash equilibrium mixed actions.
res : NashResult
Object containing information about the computation. Returned
only when `full_output` is True. See `NashResult` for details.
Examples
--------
Consider the following version of 3-player "anti-coordination" game,
where action 0 is a safe action which yields payoff 1, while action
1 yields payoff :math:`v` if no other player plays 1 and payoff 0
otherwise:
>>> N = 3
>>> v = 2
>>> payoff_array = np.empty((2,)*n)
>>> payoff_array[0, :] = 1
>>> payoff_array[1, :] = 0
>>> payoff_array[1].flat[0] = v
>>> g = NormalFormGame((Player(payoff_array),)*N)
>>> print(g)
3-player NormalFormGame with payoff profile array:
[[[[ 1., 1., 1.], [ 1., 1., 2.]],
[[ 1., 2., 1.], [ 1., 0., 0.]]],
[[[ 2., 1., 1.], [ 0., 1., 0.]],
[[ 0., 0., 1.], [ 0., 0., 0.]]]]
This game has a unique symmetric Nash equilibrium, where the
equilibrium action is given by :math:`(p^*, 1-p^*)` with :math:`p^*
= 1/v^{1/(N-1)}`:
>>> p_star = 1/(v**(1/(N-1)))
>>> [p_star, 1 - p_star]
[0.7071067811865475, 0.29289321881345254]
Obtain an approximate Nash equilibrium of this game by
`mclennan_tourky`:
>>> epsilon = 1e-5 # Value of epsilon-optimality
>>> NE = mclennan_tourky(g, epsilon=epsilon)
>>> print(NE[0], NE[1], NE[2], sep='\n')
[ 0.70710754 0.29289246]
[ 0.70710754 0.29289246]
[ 0.70710754 0.29289246]
>>> g.is_nash(NE, tol=epsilon)
True
Additional information is returned if `full_output` is set True:
>>> NE, res = mclennan_tourky(g, epsilon=epsilon, full_output=True)
>>> res.converged
True
>>> res.num_iter
18
References
----------
.. [1] A. McLennan and R. Tourky, "From Imitation Games to
Kakutani," 2006. |
def stage_all(self):
"""
Stages all changed and untracked files
"""
LOGGER.info('Staging all files')
self.repo.git.add(A=True) | Stages all changed and untracked files |
def init():
"""
Initialize synchronously.
"""
loop = asyncio.get_event_loop()
if loop.is_running():
raise Exception("You must initialize the Ray async API by calling "
"async_api.init() or async_api.as_future(obj) before "
"the event loop starts.")
else:
asyncio.get_event_loop().run_until_complete(_async_init()) | Initialize synchronously. |
def from_api(cls, api):
"""
create an application description for the todo app,
that based on the api can use either tha api or the ux for interaction
"""
ux = TodoUX(api)
from .pseudorpc import PseudoRpc
rpc = PseudoRpc(api)
return cls({ViaAPI: api, ViaUX: ux, ViaRPC: rpc}) | create an application description for the todo app,
that based on the api can use either tha api or the ux for interaction |
def describe_field(k, v, timestamp_parser=default_timestamp_parser):
"""Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]}
"""
def bq_schema_field(name, bq_type, mode):
return {"name": name, "type": bq_type, "mode": mode}
if isinstance(v, list):
if len(v) == 0:
raise Exception(
"Can't describe schema because of empty list {0}:[]".format(k))
v = v[0]
mode = "repeated"
else:
mode = "nullable"
bq_type = bigquery_type(v, timestamp_parser=timestamp_parser)
if not bq_type:
raise InvalidTypeException(k, v)
field = bq_schema_field(k, bq_type, mode)
if bq_type == "record":
try:
field['fields'] = schema_from_record(v, timestamp_parser)
except InvalidTypeException as e:
# recursively construct the key causing the error
raise InvalidTypeException("%s.%s" % (k, e.key), e.value)
return field | Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]} |
def urlretrieve(url, filename=None, reporthook=None, data=None):
"""
Retrieve a URL into a temporary location on disk.
Requires a URL argument. If a filename is passed, it is used as
the temporary file location. The reporthook argument should be
a callable that accepts a block number, a read size, and the
total file size of the URL target. The data argument should be
valid URL encoded data.
If a filename is passed and the URL points to a local resource,
the result is a copy from local file to new file.
Returns a tuple containing the path to the newly created
data file as well as the resulting HTTPMessage object.
"""
url_type, path = splittype(url)
with contextlib.closing(urlopen(url, data)) as fp:
headers = fp.info()
# Just return the local path and the "headers" for file://
# URLs. No sense in performing a copy unless requested.
if url_type == "file" and not filename:
return os.path.normpath(path), headers
# Handle temporary file setup.
if filename:
tfp = open(filename, 'wb')
else:
tfp = tempfile.NamedTemporaryFile(delete=False)
filename = tfp.name
_url_tempfiles.append(filename)
with tfp:
result = filename, headers
bs = 1024*8
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, bs, size)
while True:
block = fp.read(bs)
if not block:
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: got only %i out of %i bytes"
% (read, size), result)
return result | Retrieve a URL into a temporary location on disk.
Requires a URL argument. If a filename is passed, it is used as
the temporary file location. The reporthook argument should be
a callable that accepts a block number, a read size, and the
total file size of the URL target. The data argument should be
valid URL encoded data.
If a filename is passed and the URL points to a local resource,
the result is a copy from local file to new file.
Returns a tuple containing the path to the newly created
data file as well as the resulting HTTPMessage object. |
def restore(self):
"""Restore snapshotted state."""
if not self._snapshot:
return
yield from self.set_muted(self._snapshot['muted'])
yield from self.set_volume(self._snapshot['volume'])
yield from self.set_stream(self._snapshot['stream'])
self.callback()
_LOGGER.info('restored snapshot of state of %s', self.friendly_name) | Restore snapshotted state. |
def to_XML(self, xml_declaration=True, xmlns=True):
"""
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
"""
root_node = self._to_DOM()
if xmlns:
xmlutils.annotate_with_XMLNS(root_node,
OBSERVATION_XMLNS_PREFIX,
OBSERVATION_XMLNS_URL)
return xmlutils.DOM_node_to_XML(root_node, xml_declaration) | Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string |
def include(self, spec, *,
basePath=None,
operationId_mapping=None,
name=None):
""" Adds a new specification to a router
:param spec: path to specification
:param basePath: override base path specify in specification
:param operationId_mapping: mapping for handlers
:param name: name to access original spec
"""
data = self._file_loader.load(spec)
if basePath is None:
basePath = data.get('basePath', '')
if name is not None:
d = dict(data)
d['basePath'] = basePath
self._swagger_data[name] = d
# TODO clear d
swagger_data = {k: v for k, v in data.items() if k != 'paths'}
swagger_data['basePath'] = basePath
for url, methods in data.get('paths', {}).items():
url = basePath + url
methods = dict(methods)
location_name = methods.pop(self.NAME, None)
parameters = methods.pop('parameters', [])
for method, body in methods.items():
if method == self.VIEW:
view = utils.import_obj(body)
view.add_routes(self, prefix=url, encoding=self._encoding)
continue
body = dict(body)
if parameters:
body['parameters'] = parameters + \
body.get('parameters', [])
handler = body.pop(self.HANDLER, None)
name = location_name or handler
if not handler:
op_id = body.get('operationId')
if op_id and operationId_mapping:
handler = operationId_mapping.get(op_id)
if handler:
name = location_name or op_id
if handler:
validate = body.pop(self.VALIDATE, self._default_validate)
self.add_route(
method.upper(), utils.url_normolize(url),
handler=handler,
name=name,
swagger_data=body,
validate=validate,
)
self._swagger_data[basePath] = swagger_data
for route in self.routes():
if isinstance(route, SwaggerRoute) and not route.is_built:
route.build_swagger_data(self._file_loader) | Adds a new specification to a router
:param spec: path to specification
:param basePath: override base path specify in specification
:param operationId_mapping: mapping for handlers
:param name: name to access original spec |
def _get_u16(self, msb, lsb):
"""
Convert 2 bytes into an unsigned int.
"""
buf = struct.pack('>BB', self._get_u8(msb), self._get_u8(lsb))
return int(struct.unpack('>H', buf)[0]) | Convert 2 bytes into an unsigned int. |
def validate_param_name(name, param_type):
"""Validate that the name follows posix conventions for env variables."""
# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_235
#
# 3.235 Name
# In the shell command language, a word consisting solely of underscores,
# digits, and alphabetics from the portable character set.
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', name):
raise ValueError('Invalid %s: %s' % (param_type, name)) | Validate that the name follows posix conventions for env variables. |
def _set_adj_type(self, v, load=False):
"""
Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adj_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adj_type() directly.
YANG Description: Type of ISIS Adjacency
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-adj-ptpt': {'value': 8}, u'is-adj-l1': {'value': 2}, u'is-adj-l2': {'value': 4}, u'is-adj-es': {'value': 1}, u'is-adj-unknown': {'value': 0}, u'is-adj-stct': {'value': 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-adj-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adj_type must be of a type compatible with isis-adj-type""",
'defined-type': "brocade-isis-operational:isis-adj-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-adj-ptpt': {'value': 8}, u'is-adj-l1': {'value': 2}, u'is-adj-l2': {'value': 4}, u'is-adj-es': {'value': 1}, u'is-adj-unknown': {'value': 0}, u'is-adj-stct': {'value': 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-adj-type', is_config=False)""",
})
self.__adj_type = t
if hasattr(self, '_set'):
self._set() | Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adj_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adj_type() directly.
YANG Description: Type of ISIS Adjacency |
def purge(self):
"""
Clean old transactions
"""
while not self.stopped.isSet():
self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME)
self._messageLayer.purge() | Clean old transactions |
def build_authorization_arg(authdict):
"""
Create an "Authorization" header value from an authdict (created by generate_response()).
"""
vallist = []
for k in authdict.keys():
vallist += ['%s=%s' % (k,authdict[k])]
return 'Digest '+', '.join(vallist) | Create an "Authorization" header value from an authdict (created by generate_response()). |
def error(code, message, **kwargs):
"""Call this to raise an exception and have it stored in the journal"""
assert code in Logger._error_code_to_exception
exc_type, domain = Logger._error_code_to_exception[code]
exc = exc_type(message, **kwargs)
Logger._log(code, exc.message, ERROR, domain)
raise exc | Call this to raise an exception and have it stored in the journal |
def register_view(self, view):
"""Called when the View was registered"""
super(TopToolBarUndockedWindowController, self).register_view(view)
view['redock_button'].connect('clicked', self.on_redock_button_clicked) | Called when the View was registered |
def _render_content(self, content, **settings):
"""
Perform widget rendering, but do not print anything.
"""
result = []
columns = settings[self.SETTING_COLUMNS]
# Format each table cell into string.
(columns, content) = self.table_format(columns, content)
# Enumerate each table row.
if settings[self.SETTING_FLAG_ENUMERATE]:
(columns, content) = self.table_enumerate(columns, content)
# Calculate the dimensions of each table column.
dimensions = self.table_measure(columns, content)
# Display table header.
sb = {k: settings[k] for k in (self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
result.append(self.fmt_border(dimensions, 't', **sb))
if settings[self.SETTING_FLAG_HEADER]:
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_HEADER_FORMATING]
result.append(self.fmt_row_header(columns, dimensions, **s))
result.append(self.fmt_border(dimensions, 'm', **sb))
# Display table body.
for row in content:
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_TEXT_FORMATING]
result.append(self.fmt_row(columns, dimensions, row, **s))
# Display table footer
result.append(self.fmt_border(dimensions, 'b', **sb))
return result | Perform widget rendering, but do not print anything. |
def to_cell_table(self, merged=True):
"""Returns a list of lists of Cells with the cooked value and note for each cell."""
new_rows = []
for row_index, row in enumerate(self.rows(CellMode.cooked)):
new_row = []
for col_index, cell_value in enumerate(row):
new_row.append(Cell(cell_value, self.get_note((col_index, row_index))))
new_rows.append(new_row)
if merged:
for cell_low, cell_high in self.merged_cell_ranges():
anchor_cell = new_rows[cell_low[1]][cell_low[0]]
for row_index in range(cell_low[1], cell_high[1]):
for col_index in range(cell_low[0], cell_high[0]):
# NOTE: xlrd occassionally returns ranges that don't have cells.
try:
new_rows[row_index][col_index] = anchor_cell.copy()
except IndexError:
pass
return new_rows | Returns a list of lists of Cells with the cooked value and note for each cell. |
def convert_dcm2nii(input_dir, output_dir, filename):
""" Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir`
to Nifti and save the Nifti file in `output_dir` with a `filename` prefix.
Parameters
----------
input_dir: str
Path to the folder that contains the DICOM files
output_dir: str
Path to the folder where to save the NifTI file
filename: str
Output file basename
Returns
-------
filepaths: list of str
List of file paths created in `output_dir`.
"""
# a few checks before doing the job
if not op.exists(input_dir):
raise IOError('Expected an existing folder in {}.'.format(input_dir))
if not op.exists(output_dir):
raise IOError('Expected an existing output folder in {}.'.format(output_dir))
# create a temporary folder for dcm2nii export
tmpdir = tempfile.TemporaryDirectory(prefix='dcm2nii_')
# call dcm2nii
arguments = '-o "{}" -i y'.format(tmpdir.name)
try:
call_out = call_dcm2nii(input_dir, arguments)
except:
raise
else:
log.info('Converted "{}" to nifti.'.format(input_dir))
# get the filenames of the files that dcm2nii produced
filenames = glob(op.join(tmpdir.name, '*.nii*'))
# cleanup `filenames`, using only the post-processed (reoriented, cropped, etc.) images by dcm2nii
cleaned_filenames = remove_dcm2nii_underprocessed(filenames)
# copy files to the output_dir
filepaths = []
for srcpath in cleaned_filenames:
dstpath = op.join(output_dir, filename)
realpath = copy_w_plus(srcpath, dstpath)
filepaths.append(realpath)
# copy any other file produced by dcm2nii that is not a NifTI file, e.g., *.bvals, *.bvecs, etc.
basename = op.basename(remove_ext(srcpath))
aux_files = set(glob(op.join(tmpdir.name, '{}.*' .format(basename)))) - \
set(glob(op.join(tmpdir.name, '{}.nii*'.format(basename))))
for aux_file in aux_files:
aux_dstpath = copy_w_ext(aux_file, output_dir, remove_ext(op.basename(realpath)))
filepaths.append(aux_dstpath)
return filepaths | Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir`
to Nifti and save the Nifti file in `output_dir` with a `filename` prefix.
Parameters
----------
input_dir: str
Path to the folder that contains the DICOM files
output_dir: str
Path to the folder where to save the NifTI file
filename: str
Output file basename
Returns
-------
filepaths: list of str
List of file paths created in `output_dir`. |
def main(**options):
"""Spline loc tool."""
application = Application(**options)
# fails application when your defined threshold is higher than your ratio of com/loc.
if not application.run():
sys.exit(1)
return application | Spline loc tool. |
def list(context, sort, limit, where, verbose):
"""list(context, sort, limit, where, verbose)
List all products.
>>> dcictl product list
:param string sort: Field to apply sort
:param integer limit: Max number of rows to return
:param string where: An optional filter criteria
:param boolean verbose: Display verbose output
"""
result = product.list(context, sort=sort, limit=limit, where=where)
utils.format_output(result, context.format, verbose=verbose) | list(context, sort, limit, where, verbose)
List all products.
>>> dcictl product list
:param string sort: Field to apply sort
:param integer limit: Max number of rows to return
:param string where: An optional filter criteria
:param boolean verbose: Display verbose output |
def write_block_data(self, addr, cmd, vals):
"""write_block_data(addr, cmd, vals)
Perform SMBus Write Block Data transaction.
"""
self._set_addr(addr)
data = ffi.new("union i2c_smbus_data *")
list_to_smbus_data(data, vals)
if SMBUS.i2c_smbus_access(self._fd,
int2byte(SMBUS.I2C_SMBUS_WRITE),
ffi.cast("__u8", cmd),
SMBUS.I2C_SMBUS_BLOCK_DATA,
data):
raise IOError(ffi.errno) | write_block_data(addr, cmd, vals)
Perform SMBus Write Block Data transaction. |
def _direct_set(self, key, value):
'''
_direct_set - INTERNAL USE ONLY!!!!
Directly sets a value on the underlying dict, without running through the setitem logic
'''
dict.__setitem__(self, key, value)
return value | _direct_set - INTERNAL USE ONLY!!!!
Directly sets a value on the underlying dict, without running through the setitem logic |
def shuffle_into_deck(self):
"""
Shuffle the card into the controller's deck
"""
return self.game.cheat_action(self, [actions.Shuffle(self.controller, self)]) | Shuffle the card into the controller's deck |
def to_json(data):
"""Return data as a JSON string."""
return json.dumps(data, default=lambda x: x.__dict__, sort_keys=True, indent=4) | Return data as a JSON string. |
def metadata_updated_on(item):
"""Extracts and coverts the update time from a Bugzilla item.
The timestamp is extracted from 'delta_ts' field. This date is
converted to UNIX timestamp format. Due Bugzilla servers ignore
the timezone on HTTP requests, it will be ignored during the
conversion, too.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
ts = item['delta_ts'][0]['__text__']
ts = str_to_datetime(ts)
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
return ts.timestamp() | Extracts and coverts the update time from a Bugzilla item.
The timestamp is extracted from 'delta_ts' field. This date is
converted to UNIX timestamp format. Due Bugzilla servers ignore
the timezone on HTTP requests, it will be ignored during the
conversion, too.
:param item: item generated by the backend
:returns: a UNIX timestamp |
def private_method(func):
"""Decorator for making an instance method private."""
def func_wrapper(*args, **kwargs):
"""Decorator wrapper function."""
outer_frame = inspect.stack()[1][0]
if 'self' not in outer_frame.f_locals or outer_frame.f_locals['self'] is not args[0]:
raise RuntimeError('%s.%s is a private method' % (args[0].__class__.__name__, func.__name__))
return func(*args, **kwargs)
return func_wrapper | Decorator for making an instance method private. |
def _add_versions(samples):
"""Add tool and data versions to the summary.
"""
samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]),
"data": provenancedata.write_versions(samples[0]["dirs"], samples)}
return samples | Add tool and data versions to the summary. |
def _sibpath(path, sibling):
"""
Return the path to a sibling of a file in the filesystem.
This is useful in conjunction with the special C{__file__} attribute
that Python provides for modules, so modules can load associated
resource files.
(Stolen from twisted.python.util)
"""
return os.path.join(os.path.dirname(os.path.abspath(path)), sibling) | Return the path to a sibling of a file in the filesystem.
This is useful in conjunction with the special C{__file__} attribute
that Python provides for modules, so modules can load associated
resource files.
(Stolen from twisted.python.util) |
def convert_concat(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert concatenation.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting concat ...')
concat_nodes = [layers[i] for i in inputs]
if len(concat_nodes) == 1:
# no-op
layers[scope_name] = concat_nodes[0]
return
if names == 'short':
tf_name = 'CAT' + random_string(5)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
cat = keras.layers.Concatenate(name=tf_name, axis=params['axis'])
layers[scope_name] = cat(concat_nodes) | Convert concatenation.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers |
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
"""
if self.normalize:
X = normalize(X)
random_state = check_random_state(self.random_state)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means(
X,
n_clusters=self.n_clusters,
sample_weight=sample_weight,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
random_state=random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True,
)
return self | Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None) |
def get_lines(self):
"""Gets lines in file
:return: Lines in file
"""
with open(self.path, "r") as data:
self.lines = data.readlines() # store data in arrays
return self.lines | Gets lines in file
:return: Lines in file |
def reset(self, total_size=None):
"""Remove all file system contents and reset the root."""
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
self.open_files = []
self._free_fd_heap = []
self._last_ino = 0
self._last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams() | Remove all file system contents and reset the root. |
def build_path(G, node, endpoints, path):
"""
Recursively build a path of nodes until you hit an endpoint node.
Parameters
----------
G : networkx multidigraph
node : int
the current node to start from
endpoints : set
the set of all nodes in the graph that are endpoints
path : list
the list of nodes in order in the path so far
Returns
-------
paths_to_simplify : list
"""
# for each successor in the passed-in node
for successor in G.successors(node):
if successor not in path:
# if this successor is already in the path, ignore it, otherwise add
# it to the path
path.append(successor)
if successor not in endpoints:
# if this successor is not an endpoint, recursively call
# build_path until you find an endpoint
path = build_path(G, successor, endpoints, path)
else:
# if this successor is an endpoint, we've completed the path,
# so return it
return path
if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])):
# if the end of the path is not actually an endpoint and the path's
# first node is a successor of the path's final node, then this is
# actually a self loop, so add path's first node to end of path to
# close it
path.append(path[0])
return path | Recursively build a path of nodes until you hit an endpoint node.
Parameters
----------
G : networkx multidigraph
node : int
the current node to start from
endpoints : set
the set of all nodes in the graph that are endpoints
path : list
the list of nodes in order in the path so far
Returns
-------
paths_to_simplify : list |
def operates_on(self, qubits: Iterable[raw_types.Qid]) -> bool:
"""Determines if the moment has operations touching the given qubits.
Args:
qubits: The qubits that may or may not be touched by operations.
Returns:
Whether this moment has operations involving the qubits.
"""
return any(q in qubits for q in self.qubits) | Determines if the moment has operations touching the given qubits.
Args:
qubits: The qubits that may or may not be touched by operations.
Returns:
Whether this moment has operations involving the qubits. |
def nested_genobject(self, metadata, attr, datastore):
"""
Allow for the printing of nested GenObjects
:param metadata: Nested dictionary containing the metadata. Will be further populated by this method
:param attr: Current attribute being evaluated. Must be a GenObject e.g. sample.general
:param datastore: The dictionary of the current attribute. Will be converted to nested dictionaries
:return: Updated nested metadata dictionary with all GenObjects safely converted to dictionaries
"""
# Iterate through all the key: value pairs of the current datastore[attr] datastore
# e.g. reverse_reads <accessoryFunctions.accessoryFunctions.GenObject object at 0x7fe153b725f8>
for key, value in sorted(datastore[attr].datastore.items()):
# If the type(value) is a GenObject, then JSON serialization will not work
if 'GenObject' in str(type(value)):
# Initialise the nested attribute: key nested dictionary within the metadata dictionary
# e.g. attr: 100_100, key: reverse_reads
metadata[attr][key] = dict()
# Iterate through the nested keys and nested values within the value datastore
# e.g. nested_key: length, nested_value: 100
for nested_key, nested_datastore in sorted(value.datastore.items()):
# Create an additional dictionary layer within the metadata dictionary
metadata[attr][key][nested_key] = dict()
# If the type(nested_datastore) is a GenObject, recursively run this method to update the
# metadata dictionary, supply the newly created nested dictionary: metadata[attr][key] as
# the input metadata dictionary, the nested key as the input attribute, and the datastore of
# value as the input datastore
# e.g. key: 100_100,
# datastore: <accessoryFunctions.accessoryFunctions.GenObject object at 0x7fc526001e80>
if 'GenObject' in str(type(nested_datastore)):
metadata[attr][key].update(
self.nested_genobject(metadata[attr][key], nested_key, value.datastore))
# If the nested datastore is not a GenObject, populate the nested metadata dictionary with
# the attribute, key, nested key, and nested datastore
# e.g. attr: 100_100, key: reverse_reads, nested_key: length, nested_datastore: 100
else:
metadata[attr][key][nested_key] = nested_datastore
# Non-GenObjects can (usually) be added to the metadata dictionary without issues
else:
try:
if key not in self.unwanted_keys:
metadata[attr][key] = value
except AttributeError:
print('dumperror', attr)
# Return the metadata
return metadata | Allow for the printing of nested GenObjects
:param metadata: Nested dictionary containing the metadata. Will be further populated by this method
:param attr: Current attribute being evaluated. Must be a GenObject e.g. sample.general
:param datastore: The dictionary of the current attribute. Will be converted to nested dictionaries
:return: Updated nested metadata dictionary with all GenObjects safely converted to dictionaries |
def truncate(s, max_len=20, ellipsis='...'):
r"""Return string at most `max_len` characters or sequence elments appended with the `ellipsis` characters
>>> truncate(OrderedDict(zip(list('ABCDEFGH'), range(8))), 1)
"{'A': 0..."
>>> truncate(list(range(5)), 3)
'[0, 1, 2...'
>>> truncate(np.arange(5), 3)
'[0, 1, 2...'
>>> truncate('Too verbose for its own good.', 11)
'Too verbose...'
"""
if s is None:
return None
elif isinstance(s, basestring):
return s[:min(len(s), max_len)] + ellipsis if len(s) > max_len else ''
elif isinstance(s, Mapping):
truncated_str = str(dict(islice(viewitems(s), max_len)))
else:
truncated_str = str(list(islice(s, max_len)))
return truncated_str[:-1] + '...' if len(s) > max_len else truncated_str | r"""Return string at most `max_len` characters or sequence elments appended with the `ellipsis` characters
>>> truncate(OrderedDict(zip(list('ABCDEFGH'), range(8))), 1)
"{'A': 0..."
>>> truncate(list(range(5)), 3)
'[0, 1, 2...'
>>> truncate(np.arange(5), 3)
'[0, 1, 2...'
>>> truncate('Too verbose for its own good.', 11)
'Too verbose...' |
def dependent_hosted_number_orders(self):
"""
Access the dependent_hosted_number_orders
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
"""
if self._dependent_hosted_number_orders is None:
self._dependent_hosted_number_orders = DependentHostedNumberOrderList(
self._version,
signing_document_sid=self._solution['sid'],
)
return self._dependent_hosted_number_orders | Access the dependent_hosted_number_orders
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList |
def check_if_ok_to_update(self):
"""Check if it is ok to perform an http request."""
current_time = int(time.time())
last_refresh = self.last_refresh
if last_refresh is None:
last_refresh = 0
if current_time >= (last_refresh + self.refresh_rate):
return True
return False | Check if it is ok to perform an http request. |
def multiplication_circuit(nbit, vartype=dimod.BINARY):
"""Multiplication circuit constraint satisfaction problem.
A constraint satisfaction problem that represents the binary multiplication :math:`ab=p`,
where the multiplicands are binary variables of length `nbit`; for example,
:math:`a_0 + 2a_1 + 4a_2 +... +2^ma_{nbit}`.
The square below shows a graphic representation of the circuit::
________________________________________________________________________________
| and20 and10 and00 |
| | | | |
| and21 add11ββand11 add01ββand01 | |
| |βββββββββββββ|βββββββββββββ| | |
| and22 add12ββand12 add02ββand02 | | |
| |βββββββββββββ|βββββββββββββ| | | |
| add13βββββββββadd03 | | | |
| βββββββββββββ| | | | | |
| p5 p4 p3 p2 p1 p0 |
--------------------------------------------------------------------------------
Args:
nbit (int): Number of bits in the multiplicands.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
Returns:
CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when variables
:math:`a,b,p` are assigned values that correctly solve binary multiplication :math:`ab=p`.
Examples:
This example creates a multiplication circuit CSP that multiplies two 3-bit numbers,
which is then formulated as a binary quadratic model (BQM). It fixes the multiplacands
as :math:`a=5, b=6` (:math:`101` and :math:`110`) and uses a simulated annealing sampler
to find the product, :math:`p=30` (:math:`111100`).
>>> import dwavebinarycsp
>>> from dwavebinarycsp.factories.csp.circuits import multiplication_circuit
>>> import neal
>>> csp = multiplication_circuit(3)
>>> bqm = dwavebinarycsp.stitch(csp)
>>> bqm.fix_variable('a0', 1); bqm.fix_variable('a1', 0); bqm.fix_variable('a2', 1)
>>> bqm.fix_variable('b0', 1); bqm.fix_variable('b1', 1); bqm.fix_variable('b2', 0)
>>> sampler = neal.SimulatedAnnealingSampler()
>>> response = sampler.sample(bqm)
>>> p = next(response.samples(n=1, sorted_by='energy'))
>>> print(p['p0'], p['p1'], p['p2'], p['p3'], p['p4'], p['p5']) # doctest: +SKIP
1 1 1 1 0 0
"""
if nbit < 1:
raise ValueError("num_multiplier_bits, num_multiplicand_bits must be positive integers")
num_multiplier_bits = num_multiplicand_bits = nbit
# also checks the vartype argument
csp = ConstraintSatisfactionProblem(vartype)
# throughout, we will use the following convention:
# i to refer to the bits of the multiplier
# j to refer to the bits of the multiplicand
# k to refer to the bits of the product
# create the variables corresponding to the input and output wires for the circuit
a = {i: 'a%d' % i for i in range(nbit)}
b = {j: 'b%d' % j for j in range(nbit)}
p = {k: 'p%d' % k for k in range(nbit + nbit)}
# we will want to store the internal variables somewhere
AND = defaultdict(dict) # the output of the AND gate associated with ai, bj is stored in AND[i][j]
SUM = defaultdict(dict) # the sum of the ADDER gate associated with ai, bj is stored in SUM[i][j]
CARRY = defaultdict(dict) # the carry of the ADDER gate associated with ai, bj is stored in CARRY[i][j]
# we follow a shift adder
for i in range(num_multiplier_bits):
for j in range(num_multiplicand_bits):
ai = a[i]
bj = b[j]
if i == 0 and j == 0:
# in this case there are no inputs from lower bits, so our only input is the AND
# gate. And since we only have one bit to add, we don't need an adder, no have a
# carry out
andij = AND[i][j] = p[0]
gate = and_gate([ai, bj, andij], vartype=vartype, name='AND(%s, %s) = %s' % (ai, bj, andij))
csp.add_constraint(gate)
continue
# we always need an AND gate
andij = AND[i][j] = 'and%s,%s' % (i, j)
gate = and_gate([ai, bj, andij], vartype=vartype, name='AND(%s, %s) = %s' % (ai, bj, andij))
csp.add_constraint(gate)
# the number of inputs will determine the type of adder
inputs = [andij]
# determine if there is a carry in
if i - 1 in CARRY and j in CARRY[i - 1]:
inputs.append(CARRY[i - 1][j])
# determine if there is a sum in
if i - 1 in SUM and j + 1 in SUM[i - 1]:
inputs.append(SUM[i - 1][j + 1])
# ok, add create adders if necessary
if len(inputs) == 1:
# we don't need an adder and we don't have a carry
SUM[i][j] = andij
elif len(inputs) == 2:
# we need a HALFADDER so we have a sum and a carry
if j == 0:
sumij = SUM[i][j] = p[i]
else:
sumij = SUM[i][j] = 'sum%d,%d' % (i, j)
carryij = CARRY[i][j] = 'carry%d,%d' % (i, j)
name = 'HALFADDER(%s, %s) = %s, %s' % (inputs[0], inputs[1], sumij, carryij)
gate = halfadder_gate([inputs[0], inputs[1], sumij, carryij], vartype=vartype, name=name)
csp.add_constraint(gate)
else:
assert len(inputs) == 3, 'unexpected number of inputs'
# we need a FULLADDER so we have a sum and a carry
if j == 0:
sumij = SUM[i][j] = p[i]
else:
sumij = SUM[i][j] = 'sum%d,%d' % (i, j)
carryij = CARRY[i][j] = 'carry%d,%d' % (i, j)
name = 'FULLADDER(%s, %s, %s) = %s, %s' % (inputs[0], inputs[1], inputs[2], sumij, carryij)
gate = fulladder_gate([inputs[0], inputs[1], inputs[2], sumij, carryij], vartype=vartype, name=name)
csp.add_constraint(gate)
# now we have a final row of full adders
for col in range(nbit - 1):
inputs = [CARRY[nbit - 1][col], SUM[nbit - 1][col + 1]]
if col == 0:
sumout = p[nbit + col]
carryout = CARRY[nbit][col] = 'carry%d,%d' % (nbit, col)
name = 'HALFADDER(%s, %s) = %s, %s' % (inputs[0], inputs[1], sumout, carryout)
gate = halfadder_gate([inputs[0], inputs[1], sumout, carryout], vartype=vartype, name=name)
csp.add_constraint(gate)
continue
inputs.append(CARRY[nbit][col - 1])
sumout = p[nbit + col]
if col < nbit - 2:
carryout = CARRY[nbit][col] = 'carry%d,%d' % (nbit, col)
else:
carryout = p[2 * nbit - 1]
name = 'FULLADDER(%s, %s, %s) = %s, %s' % (inputs[0], inputs[1], inputs[2], sumout, carryout)
gate = fulladder_gate([inputs[0], inputs[1], inputs[2], sumout, carryout], vartype=vartype, name=name)
csp.add_constraint(gate)
return csp | Multiplication circuit constraint satisfaction problem.
A constraint satisfaction problem that represents the binary multiplication :math:`ab=p`,
where the multiplicands are binary variables of length `nbit`; for example,
:math:`a_0 + 2a_1 + 4a_2 +... +2^ma_{nbit}`.
The square below shows a graphic representation of the circuit::
________________________________________________________________________________
| and20 and10 and00 |
| | | | |
| and21 add11ββand11 add01ββand01 | |
| |βββββββββββββ|βββββββββββββ| | |
| and22 add12ββand12 add02ββand02 | | |
| |βββββββββββββ|βββββββββββββ| | | |
| add13βββββββββadd03 | | | |
| βββββββββββββ| | | | | |
| p5 p4 p3 p2 p1 p0 |
--------------------------------------------------------------------------------
Args:
nbit (int): Number of bits in the multiplicands.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
Returns:
CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when variables
:math:`a,b,p` are assigned values that correctly solve binary multiplication :math:`ab=p`.
Examples:
This example creates a multiplication circuit CSP that multiplies two 3-bit numbers,
which is then formulated as a binary quadratic model (BQM). It fixes the multiplacands
as :math:`a=5, b=6` (:math:`101` and :math:`110`) and uses a simulated annealing sampler
to find the product, :math:`p=30` (:math:`111100`).
>>> import dwavebinarycsp
>>> from dwavebinarycsp.factories.csp.circuits import multiplication_circuit
>>> import neal
>>> csp = multiplication_circuit(3)
>>> bqm = dwavebinarycsp.stitch(csp)
>>> bqm.fix_variable('a0', 1); bqm.fix_variable('a1', 0); bqm.fix_variable('a2', 1)
>>> bqm.fix_variable('b0', 1); bqm.fix_variable('b1', 1); bqm.fix_variable('b2', 0)
>>> sampler = neal.SimulatedAnnealingSampler()
>>> response = sampler.sample(bqm)
>>> p = next(response.samples(n=1, sorted_by='energy'))
>>> print(p['p0'], p['p1'], p['p2'], p['p3'], p['p4'], p['p5']) # doctest: +SKIP
1 1 1 1 0 0 |
def _construct_deutsch_jozsa_circuit(self):
"""
Builds the Deutsch-Jozsa circuit. Which can determine whether a function f mapping
:math:`\{0,1\}^n \to \{0,1\}` is constant or balanced, provided that it is one of them.
:return: A program corresponding to the desired instance of Deutsch Jozsa's Algorithm.
:rtype: Program
"""
dj_prog = Program()
# Put the first ancilla qubit (query qubit) into minus state
dj_prog.inst(X(self.ancillas[0]), H(self.ancillas[0]))
# Apply Hadamard, Oracle, and Hadamard again
dj_prog.inst([H(qubit) for qubit in self.computational_qubits])
# Build the oracle
oracle_prog = Program()
oracle_prog.defgate(ORACLE_GATE_NAME, self.unitary_matrix)
scratch_bit = self.ancillas[1]
qubits_for_funct = [scratch_bit] + self.computational_qubits
oracle_prog.inst(tuple([ORACLE_GATE_NAME] + qubits_for_funct))
dj_prog += oracle_prog
# Here the oracle does not leave the computational qubits unchanged, so we use a CNOT to
# to move the result to the query qubit, and then we uncompute with the dagger.
dj_prog.inst(CNOT(self._qubits[0], self.ancillas[0]))
dj_prog += oracle_prog.dagger()
dj_prog.inst([H(qubit) for qubit in self.computational_qubits])
return dj_prog | Builds the Deutsch-Jozsa circuit. Which can determine whether a function f mapping
:math:`\{0,1\}^n \to \{0,1\}` is constant or balanced, provided that it is one of them.
:return: A program corresponding to the desired instance of Deutsch Jozsa's Algorithm.
:rtype: Program |
def get_ambient_sensor_data(self):
"""Refresh ambient sensor history"""
resource = 'cameras/{}/ambientSensors/history'.format(self.device_id)
history_event = self.publish_and_get_event(resource)
if history_event is None:
return None
properties = history_event.get('properties')
self._ambient_sensor_data = \
ArloBaseStation._decode_sensor_data(properties)
return self._ambient_sensor_data | Refresh ambient sensor history |
def clean(self):
"""
Final validations of model fields.
1. Validate that selected site for enterprise customer matches with the selected identity provider's site.
"""
super(EnterpriseCustomerIdentityProviderAdminForm, self).clean()
provider_id = self.cleaned_data.get('provider_id', None)
enterprise_customer = self.cleaned_data.get('enterprise_customer', None)
if provider_id is None or enterprise_customer is None:
# field validation for either provider_id or enterprise_customer has already raised
# a validation error.
return
identity_provider = utils.get_identity_provider(provider_id)
if not identity_provider:
# This should not happen, as identity providers displayed in drop down are fetched dynamically.
message = _(
"The specified Identity Provider does not exist. For more "
"information, contact a system administrator.",
)
# Log message for debugging
logger.exception(message)
raise ValidationError(message)
if identity_provider and identity_provider.site != enterprise_customer.site:
raise ValidationError(
_(
"The site for the selected identity provider "
"({identity_provider_site}) does not match the site for "
"this enterprise customer ({enterprise_customer_site}). "
"To correct this problem, select a site that has a domain "
"of '{identity_provider_site}', or update the identity "
"provider to '{enterprise_customer_site}'."
).format(
enterprise_customer_site=enterprise_customer.site,
identity_provider_site=identity_provider.site,
),
) | Final validations of model fields.
1. Validate that selected site for enterprise customer matches with the selected identity provider's site. |
def process_directory_statements_sorted_by_pmid(directory_name):
"""Processes a directory filled with CSXML files, first normalizing the
character encoding to utf-8, and then processing into INDRA statements
sorted by pmid.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
Returns
-------
pmid_dict : dict
A dictionary mapping pmids to a list of statements corresponding to
that pmid
"""
s_dict = defaultdict(list)
mp = process_directory(directory_name, lazy=True)
for statement in mp.iter_statements():
s_dict[statement.evidence[0].pmid].append(statement)
return s_dict | Processes a directory filled with CSXML files, first normalizing the
character encoding to utf-8, and then processing into INDRA statements
sorted by pmid.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
Returns
-------
pmid_dict : dict
A dictionary mapping pmids to a list of statements corresponding to
that pmid |
def get_lowest_numeric_score_metadata(self):
"""Gets the metadata for the lowest numeric score.
return: (osid.Metadata) - metadata for the lowest numeric score
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['lowest_numeric_score'])
metadata.update({'existing_decimal_values': self._my_map['lowestNumericScore']})
return Metadata(**metadata) | Gets the metadata for the lowest numeric score.
return: (osid.Metadata) - metadata for the lowest numeric score
*compliance: mandatory -- This method must be implemented.* |
def unpickle(self, parent):
"""Sets the parent pointer references for the module *and* all of its
child classes that also have pointer references."""
self.parent = parent
self._unpickle_collection(self.members)
self._unpickle_collection(self.dependencies)
self._unpickle_collection(self.types)
self._unpickle_collection(self.executables)
self._unpickle_collection(self._parameters)
self.unpickle_docs() | Sets the parent pointer references for the module *and* all of its
child classes that also have pointer references. |
def parse_log_entry(text):
"""This function does all real job on log line parsing.
it setup two cases for restart parsing if a line
with wrong format was found.
Restarts:
- use_value: just retuns an object it was passed. This can
be any value.
- reparse: calls `parse_log_entry` again with other text value.
Beware, this call can lead to infinite recursion.
"""
text = text.strip()
if well_formed_log_entry_p(text):
return LogEntry(text)
else:
def use_value(obj):
return obj
def reparse(text):
return parse_log_entry(text)
with restarts(use_value,
reparse) as call:
return call(signal, MalformedLogEntryError(text)) | This function does all real job on log line parsing.
it setup two cases for restart parsing if a line
with wrong format was found.
Restarts:
- use_value: just retuns an object it was passed. This can
be any value.
- reparse: calls `parse_log_entry` again with other text value.
Beware, this call can lead to infinite recursion. |
def _to_reddit_list(arg):
"""Return an argument converted to a reddit-formatted list.
The returned format is a comma deliminated list. Each element is a string
representation of an object. Either given as a string or as an object that
is then converted to its string representation.
"""
if (isinstance(arg, six.string_types) or not (
hasattr(arg, "__getitem__") or hasattr(arg, "__iter__"))):
return six.text_type(arg)
else:
return ','.join(six.text_type(a) for a in arg) | Return an argument converted to a reddit-formatted list.
The returned format is a comma deliminated list. Each element is a string
representation of an object. Either given as a string or as an object that
is then converted to its string representation. |
def flg(self, name, help, abbrev=None):
"""Describe a flag"""
abbrev = abbrev or '-' + name[0]
longname = '--' + name.replace('_', '-')
self._add(name, abbrev, longname, action='store_true', help=help) | Describe a flag |
def _client_properties():
"""AMQPStorm Client Properties.
:rtype: dict
"""
return {
'product': 'AMQPStorm',
'platform': 'Python %s (%s)' % (platform.python_version(),
platform.python_implementation()),
'capabilities': {
'basic.nack': True,
'connection.blocked': True,
'publisher_confirms': True,
'consumer_cancel_notify': True,
'authentication_failure_close': True,
},
'information': 'See https://github.com/eandersson/amqpstorm',
'version': __version__
} | AMQPStorm Client Properties.
:rtype: dict |
def update(self):
"""Update RAID stats using the input method."""
# Init new stats
stats = self.get_init_value()
if import_error_tag:
return self.stats
if self.input_method == 'local':
# Update stats using the PyMDstat lib (https://github.com/nicolargo/pymdstat)
try:
# Just for test
# mds = MdStat(path='/home/nicolargo/dev/pymdstat/tests/mdstat.10')
mds = MdStat()
stats = mds.get_stats()['arrays']
except Exception as e:
logger.debug("Can not grab RAID stats (%s)" % e)
return self.stats
elif self.input_method == 'snmp':
# Update stats using SNMP
# No standard way for the moment...
pass
# Update the stats
self.stats = stats
return self.stats | Update RAID stats using the input method. |
def get_songs()->Iterator:
"""
Return songs that have the fingerprinted flag set TRUE (1).
"""
with session_withcommit() as session:
val = session.query(songs).all()
for row in val:
yield row | Return songs that have the fingerprinted flag set TRUE (1). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.