code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def construct_publish_comands(additional_steps=None, nightly=False):
'''Get the shell commands we'll use to actually build and publish a package to PyPI.'''
publish_commands = (
['rm -rf dist']
+ (additional_steps if additional_steps else [])
+ [
'python setup.py sdist bdist_wheel{nightly}'.format(
nightly=' --nightly' if nightly else ''
),
'twine upload dist/*',
]
)
return publish_commands | Get the shell commands we'll use to actually build and publish a package to PyPI. |
def set_xticks(self, row, column, ticks):
"""Manually specify the x-axis tick values.
:param row,column: specify the subplot.
:param ticks: list of tick values.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_xticks(ticks) | Manually specify the x-axis tick values.
:param row,column: specify the subplot.
:param ticks: list of tick values. |
def _import_LOV(
baseuri="http://lov.okfn.org/dataset/lov/api/v2/vocabulary/list",
keyword=""):
"""
2016-03-02: import from json list
"""
printDebug("----------\nReading source... <%s>" % baseuri)
query = requests.get(baseuri, params={})
all_options = query.json()
options = []
# pre-filter if necessary
if keyword:
for x in all_options:
if keyword in x['uri'].lower() or keyword in x['titles'][0][
'value'].lower() or keyword in x['nsp'].lower():
options.append(x)
else:
options = all_options
printDebug("----------\n%d results found.\n----------" % len(options))
if options:
# display:
counter = 1
for x in options:
uri, title, ns = x['uri'], x['titles'][0]['value'], x['nsp']
# print("%s ==> %s" % (d['titles'][0]['value'], d['uri']))
click.echo(
click.style("[%d]" % counter, fg='blue') +
click.style(uri + " ==> ", fg='black') +
click.style(title, fg='red'))
counter += 1
while True:
var = input(Style.BRIGHT +
"=====\nSelect ID to import: (q=quit)\n" +
Style.RESET_ALL)
if var == "q":
break
else:
try:
_id = int(var)
ontouri = options[_id - 1]['uri']
print(Fore.RED + "\n---------\n" + ontouri +
"\n---------" + Style.RESET_ALL)
action_analyze([ontouri])
if click.confirm(
'=====\nDo you want to save to your local library?'
):
action_import(ontouri)
return
except:
print("Error retrieving file. Import failed.")
continue | 2016-03-02: import from json list |
def print_plugins(folders, exit_code=0):
"""Print available plugins and exit."""
modules = plugins.get_plugin_modules(folders)
pluginclasses = sorted(plugins.get_plugin_classes(modules), key=lambda x: x.__name__)
for pluginclass in pluginclasses:
print(pluginclass.__name__)
doc = strformat.wrap(pluginclass.__doc__, 80)
print(strformat.indent(doc))
print()
sys.exit(exit_code) | Print available plugins and exit. |
def getXmlText (parent, tag):
"""Return XML content of given tag in parent element."""
elem = parent.getElementsByTagName(tag)[0]
# Yes, the DOM standard is awful.
rc = []
for node in elem.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc) | Return XML content of given tag in parent element. |
def sg_layer_func(func):
r"""Decorates a function `func` as a sg_layer function.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(tensor, **kwargs):
r"""Manages arguments of `tf.sg_opt`.
Args:
tensor: A `tensor` (automatically passed by decorator).
kwargs:
shape: A list of integers. The shape of `tensor`. Inferred if not specified.
in_dim: An integer. The size of input dimension, which is set to the last one by default.
dim: An integer. The size of output dimension. Has the same value as in_dim by default.
bn: Boolean. If True, batch normalization is applied.
ln: Boolean. If True, layer normalization is applied.
scale: If true, multiple by a trainable gamma variable. When the activation is
linear (relu included), this can be disabled because it can be implicitly
learned by the next layer. The default is True.
dout: A float of range [0, 100). A dropout rate. Set to 0 by default.
bias: Boolean. If True, biases are added. As a default, it is set to True
name: A name for the layer. As a default, the function name is assigned.
act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
regularizer: A string. None, 'l1' or 'l2'. The default is None
summary: If True, summaries are added. The default is True.
"""
from . import sg_initializer as init
from . import sg_activation
# kwargs parsing
opt = tf.sg_opt(kwargs) + sg_get_context()
# set default argument
try:
shape = tensor.get_shape().as_list()
# batch normalization off, layer normalization off, dropout off
opt += tf.sg_opt(shape=shape, in_dim=shape[-1], dim=shape[-1],
bn=False, ln=False, dout=0, summary=True, scale=True)
if opt.regularizer == 'l1':
opt.regularizer = lambda x: tf.reduce_mean(tf.abs(x))
elif opt.regularizer == 'l2':
opt.regularizer = lambda x: tf.square(tf.reduce_mean(tf.square(x)))
else:
opt.regularizer = None
assert not (opt.bn and opt.ln), 'one of batch normalization and layer normalization is available.'
# disable bias when normalization on
opt += tf.sg_opt(bias=not (opt.bn or opt.ln))
finally:
pass
# automatic layer naming
if opt.name is None:
# layer function name will be used as layer name
opt.name = func.__name__.replace('sg_', '')
# find existing layer names
exist_layers = []
for t in tf.global_variables():
scope_name = tf.get_variable_scope().name
prefix = scope_name + '/' if len(scope_name) > 0 else ''
i = t.name.rfind(prefix + opt.name)
if i >= 0:
exist_layers.append(t.name[i:].split('/')[-2])
exist_layers = list(set(exist_layers))
# layer name numbering
if len(exist_layers) == 0:
opt.name += '_1'
else:
opt.name += '_%d' % (max([int(n.split('_')[-1]) for n in exist_layers]) + 1)
with tf.variable_scope(opt.name, reuse=opt.reuse) as scope:
# call layer function
out = func(tensor, opt)
out_shape = out.get_shape()
# apply batch normalization
if opt.bn:
beta = init.constant('beta', opt.dim, summary=opt.summary)
gamma = init.constant('gamma', opt.dim, value=1, summary=opt.summary, trainable=opt.scale)
# offset, scale parameter ( for inference )
mean_running = init.constant('mean', opt.dim, trainable=False, summary=opt.summary)
variance_running = init.constant('variance', opt.dim, value=1, trainable=False, summary=opt.summary)
# use fused batch norm if ndims in [2, 3, 4]
if out_shape.ndims in [2, 3, 4]:
# add HW dims if necessary, fused_batch_norm requires shape to be NHWC
if out_shape.ndims == 2:
out = tf.expand_dims(out, axis=1)
out = tf.expand_dims(out, axis=2)
elif out_shape.ndims == 3:
out = tf.expand_dims(out, axis=2)
fused_eps = tf.sg_eps if tf.sg_eps > 1e-5 else 1e-5
out, mean, variance = tf.cond(
_phase,
lambda: tf.nn.fused_batch_norm(out, gamma, beta, epsilon=fused_eps),
lambda: tf.nn.fused_batch_norm(out, gamma, beta, mean=mean_running, variance=variance_running, epsilon=fused_eps, is_training=False),
)
# restore original shape if HW dims was added
if out_shape.ndims == 2:
out = tf.squeeze(out, axis=[1, 2])
elif out_shape.ndims == 3:
out = tf.squeeze(out, axis=2)
# fallback to naive batch norm
else:
mean, variance = tf.nn.moments(out, axes=list(range(len(out.get_shape()) - 1)))
out = tf.cond(
_phase,
lambda: tf.nn.batch_normalization(out, mean, variance, beta, gamma, tf.sg_eps),
lambda: tf.nn.batch_normalization(out, mean_running, variance_running, beta, gamma, tf.sg_eps)
)
decay = 0.99
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_running.assign(mean_running * decay + mean * (1 - decay)))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_running.assign(variance_running * decay + variance * (1 - decay)))
# apply layer normalization
if opt.ln:
# offset, scale parameter
beta = init.constant('beta', opt.dim, summary=opt.summary)
if opt.scale:
gamma = init.constant('gamma', opt.dim, value=1, summary=opt.summary)
# calc layer mean, variance for final axis
mean, variance = tf.nn.moments(out, axes=[len(out.get_shape()) - 1], keep_dims=True)
# apply normalization
out = (out - mean) / tf.sqrt(variance + tf.sg_eps)
# apply parameter
if opt.scale:
out = gamma * out + beta
else:
out = out + beta
# apply activation
if opt.act:
out = getattr(sg_activation, 'sg_' + opt.act.lower())(out)
# apply dropout
if opt.dout:
out = tf.cond(_phase,
lambda: tf.nn.dropout(out, 1 - opt.dout),
lambda: out)
# rename tensor
out = tf.identity(out, 'out')
# add final output summary
if opt.summary:
tf.sg_summary_activation(out)
# save node info for reuse
out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs) + sg_get_context(),
prev=tensor, is_layer=True, name=opt.name)
# inject reuse function
out.sg_reuse = types.MethodType(sg_reuse, out)
return out
return wrapper | r"""Decorates a function `func` as a sg_layer function.
Args:
func: function to decorate |
def _create_affine(x_axis, y_axis, z_axis, image_pos, voxel_sizes):
"""
Function to generate the affine matrix for a dicom series
This method was based on (http://nipy.org/nibabel/dicom/dicom_orientation.html)
:param sorted_dicoms: list with sorted dicom files
"""
# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)
affine = numpy.array(
[[x_axis[0] * voxel_sizes[0], y_axis[0] * voxel_sizes[1], z_axis[0] * voxel_sizes[2], image_pos[0]],
[x_axis[1] * voxel_sizes[0], y_axis[1] * voxel_sizes[1], z_axis[1] * voxel_sizes[2], image_pos[1]],
[x_axis[2] * voxel_sizes[0], y_axis[2] * voxel_sizes[1], z_axis[2] * voxel_sizes[2], image_pos[2]],
[0, 0, 0, 1]])
return affine | Function to generate the affine matrix for a dicom series
This method was based on (http://nipy.org/nibabel/dicom/dicom_orientation.html)
:param sorted_dicoms: list with sorted dicom files |
def get_waveset(model):
"""Get optimal wavelengths for sampling a given model.
Parameters
----------
model : `~astropy.modeling.Model`
Model.
Returns
-------
waveset : array-like or `None`
Optimal wavelengths. `None` if undefined.
Raises
------
synphot.exceptions.SynphotError
Invalid model.
"""
if not isinstance(model, Model):
raise SynphotError('{0} is not a model.'.format(model))
if isinstance(model, _CompoundModel):
waveset = model._tree.evaluate(WAVESET_OPERATORS, getter=None)
else:
waveset = _get_sampleset(model)
return waveset | Get optimal wavelengths for sampling a given model.
Parameters
----------
model : `~astropy.modeling.Model`
Model.
Returns
-------
waveset : array-like or `None`
Optimal wavelengths. `None` if undefined.
Raises
------
synphot.exceptions.SynphotError
Invalid model. |
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.title
title = self.title
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print >>io.stdout, marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1))
else:
print >>io.stdout, marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1))
print >>io.stdout, block,
sys.stdout.flush() | Show entire demo on screen, block by block |
def _get_batches(self, mapping, batch_size=10000):
"""Get data from the local db"""
action = mapping.get("action", "insert")
fields = mapping.get("fields", {}).copy()
static = mapping.get("static", {})
lookups = mapping.get("lookups", {})
record_type = mapping.get("record_type")
# Skip Id field on insert
if action == "insert" and "Id" in fields:
del fields["Id"]
# Build the list of fields to import
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append("RecordTypeId")
# default to the profile assigned recordtype if we can't find any
# query for the RT by developer name
query = (
"SELECT Id FROM RecordType WHERE SObjectType='{0}'"
"AND DeveloperName = '{1}' LIMIT 1"
)
record_type_id = self.sf.query(
query.format(mapping.get("sf_object"), record_type)
)["records"][0]["Id"]
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return batch_file, writer, batch_ids
batch_file, writer, batch_ids = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
# Add static values to row
pkey = row[0]
row = list(row[1:]) + list(static.values())
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
# Yield and start a new file every [batch_size] rows
if not total_rows % batch_size:
batch_file.seek(0)
self.logger.info(" Processing batch {}".format(batch_num))
yield batch_file, batch_ids
batch_file, writer, batch_ids = start_batch()
batch_num += 1
# Yield result file for final batch
if batch_ids:
batch_file.seek(0)
yield batch_file, batch_ids
self.logger.info(
" Prepared {} rows for import to {}".format(
total_rows, mapping["sf_object"]
)
) | Get data from the local db |
def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(AAAAResourceRecordSet, **values) | Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance. |
def order(self, mechanism, purview):
"""Order the mechanism and purview in time.
If the direction is ``CAUSE``, then the purview is at |t-1| and the
mechanism is at time |t|. If the direction is ``EFFECT``, then the
mechanism is at time |t| and the purview is at |t+1|.
"""
if self is Direction.CAUSE:
return purview, mechanism
elif self is Direction.EFFECT:
return mechanism, purview
from . import validate
return validate.direction(self) | Order the mechanism and purview in time.
If the direction is ``CAUSE``, then the purview is at |t-1| and the
mechanism is at time |t|. If the direction is ``EFFECT``, then the
mechanism is at time |t| and the purview is at |t+1|. |
def _make_query(self, ID: str, methodname: str, returnable: bool, *args: Any, **kwargs: Any):
"""将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
returnable (bool): - 是否要求返回结果
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式
"""
query = {
"MPRPC": self.VERSION,
"ID": ID,
"METHOD": methodname,
"RETURN": returnable,
"ARGS": args,
"KWARGS": kwargs
}
print(query)
return query | 将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
returnable (bool): - 是否要求返回结果
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式 |
def _make_cmap(colors, position=None, bit=False):
'''
_make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). _make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
'''
bit_rgb = np.linspace(0,1,256)
if position == None:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
palette = [(i, (float(r), float(g), float(b), float(a))) for
i, (r, g, b, a) in enumerate(colors)]
cmap = Colormap(*palette)
return cmap | _make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). _make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color. |
def argmax(iterable, key=None, both=False):
"""
>>> argmax([4,2,-5])
0
>>> argmax([4,2,-5], key=abs)
2
>>> argmax([4,2,-5], key=abs, both=True)
(2, 5)
"""
if key is not None:
it = imap(key, iterable)
else:
it = iter(iterable)
score, argmax = reduce(max, izip(it, count()))
if both:
return argmax, score
return argmax | >>> argmax([4,2,-5])
0
>>> argmax([4,2,-5], key=abs)
2
>>> argmax([4,2,-5], key=abs, both=True)
(2, 5) |
def rekey(self,
uuid=None,
offset=None,
template_attribute=None,
credential=None):
"""
Check object usage according to specific constraints.
Args:
uuid (string): The unique identifier of a managed cryptographic
object that should be checked. Optional, defaults to None.
offset (int): An integer specifying, in seconds, the difference
between the rekeyed objects initialization date and activation
date. Optional, defaults to None.
template_attribute (TemplateAttribute): A TemplateAttribute struct
containing the attributes to set on the newly rekeyed object.
Optional, defaults to None.
credential (Credential): A Credential struct containing a set of
authorization parameters for the operation. Optional, defaults
to None.
Returns:
dict: The results of the check operation, containing the following
key/value pairs:
Key | Value
---------------------------|-----------------------------------
'unique_identifier' | (string) The unique ID of the
| checked cryptographic object.
'template_attribute' | (TemplateAttribute) A struct
| containing attribute set by the
| server. Optional.
'result_status' | (ResultStatus) An enumeration
| indicating the status of the
| operation result.
'result_reason' | (ResultReason) An enumeration
| providing context for the result
| status.
'result_message' | (string) A message providing
| additional context for the
| operation result.
"""
operation = Operation(OperationEnum.REKEY)
request_payload = payloads.RekeyRequestPayload(
unique_identifier=uuid,
offset=offset,
template_attribute=template_attribute
)
batch_item = messages.RequestBatchItem(
operation=operation,
request_payload=request_payload
)
request = self._build_request_message(credential, [batch_item])
response = self._send_and_receive_message(request)
batch_item = response.batch_items[0]
payload = batch_item.response_payload
result = {}
if payload:
result['unique_identifier'] = payload.unique_identifier
if payload.template_attribute is not None:
result['template_attribute'] = payload.template_attribute
result['result_status'] = batch_item.result_status.value
try:
result['result_reason'] = batch_item.result_reason.value
except Exception:
result['result_reason'] = batch_item.result_reason
try:
result['result_message'] = batch_item.result_message.value
except Exception:
result['result_message'] = batch_item.result_message
return result | Check object usage according to specific constraints.
Args:
uuid (string): The unique identifier of a managed cryptographic
object that should be checked. Optional, defaults to None.
offset (int): An integer specifying, in seconds, the difference
between the rekeyed objects initialization date and activation
date. Optional, defaults to None.
template_attribute (TemplateAttribute): A TemplateAttribute struct
containing the attributes to set on the newly rekeyed object.
Optional, defaults to None.
credential (Credential): A Credential struct containing a set of
authorization parameters for the operation. Optional, defaults
to None.
Returns:
dict: The results of the check operation, containing the following
key/value pairs:
Key | Value
---------------------------|-----------------------------------
'unique_identifier' | (string) The unique ID of the
| checked cryptographic object.
'template_attribute' | (TemplateAttribute) A struct
| containing attribute set by the
| server. Optional.
'result_status' | (ResultStatus) An enumeration
| indicating the status of the
| operation result.
'result_reason' | (ResultReason) An enumeration
| providing context for the result
| status.
'result_message' | (string) A message providing
| additional context for the
| operation result. |
def for_all_targets(self, module, func, filter_func=None):
"""Call func once for all of the targets of this module."""
for target in self.targets(module):
if filter_func is None or filter_func(target):
func(target) | Call func once for all of the targets of this module. |
def process_tile(tile_x, tile_y, tile_size, pix, draw, image):
"""Process a tile whose top left corner is at the given x and y
coordinates.
"""
logging.debug('Processing tile (%d, %d)', tile_x, tile_y)
# Calculate average color for each "triangle" in the given tile
n, e, s, w = triangle_colors(tile_x, tile_y, tile_size, pix)
# Calculate distance between triangle pairs
d_ne = get_color_dist(n, e)
d_nw = get_color_dist(n, w)
d_se = get_color_dist(s, e)
d_sw = get_color_dist(s, w)
# Figure out which pair is the closest, which will determine the direction
# we'll split this tile into triangles. A 'right' split runs from top left
# to bottom right. A 'left' split runs bottom left to top right.
closest = sorted([d_ne, d_nw, d_se, d_sw])[0]
if closest in (d_ne, d_sw):
split = 'right'
elif closest in (d_nw, d_se):
split = 'left'
# Figure out the average color for each side of the "split"
if split == 'right':
top_color = get_average_color([n, e])
bottom_color = get_average_color([s, w])
else:
top_color = get_average_color([n, w])
bottom_color = get_average_color([s, e])
draw_triangles(tile_x, tile_y, tile_size, split, top_color, bottom_color,
draw) | Process a tile whose top left corner is at the given x and y
coordinates. |
def byteswap(data, word_size=4):
""" Swap the byte-ordering in a packet with N=4 bytes per word
"""
return reduce(lambda x,y: x+''.join(reversed(y)), chunks(data, word_size), '') | Swap the byte-ordering in a packet with N=4 bytes per word |
def opendocs(where='index', how='default'):
'''
Rebuild documentation and opens it in your browser.
Use the first argument to specify how it should be opened:
`d` or `default`: Open in new tab or new window, using the default
method of your browser.
`t` or `tab`: Open documentation in new tab.
`n`, `w` or `window`: Open documentation in new window.
'''
import webbrowser
docs_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'docs')
index = os.path.join(docs_dir, '_build/html/%s.html' % where)
builddocs('html')
url = 'file://%s' % os.path.abspath(index)
if how in ('d', 'default'):
webbrowser.open(url)
elif how in ('t', 'tab'):
webbrowser.open_new_tab(url)
elif how in ('n', 'w', 'window'):
webbrowser.open_new(url) | Rebuild documentation and opens it in your browser.
Use the first argument to specify how it should be opened:
`d` or `default`: Open in new tab or new window, using the default
method of your browser.
`t` or `tab`: Open documentation in new tab.
`n`, `w` or `window`: Open documentation in new window. |
def setup_queue(self):
"""Declare the queue
When completed, the on_queue_declareok method will be invoked by pika.
"""
logger.debug("Declaring queue %s" % self._queue)
self._channel.queue_declare(self.on_queue_declareok, self._queue, durable=True) | Declare the queue
When completed, the on_queue_declareok method will be invoked by pika. |
def create_rectangular_prism(origin, size):
'''
Return a Mesh which is an axis-aligned rectangular prism. One vertex is
`origin`; the diametrically opposite vertex is `origin + size`.
size: 3x1 array.
'''
from lace.topology import quads_to_tris
lower_base_plane = np.array([
# Lower base plane
origin,
origin + np.array([size[0], 0, 0]),
origin + np.array([size[0], 0, size[2]]),
origin + np.array([0, 0, size[2]]),
])
upper_base_plane = lower_base_plane + np.array([0, size[1], 0])
vertices = np.vstack([lower_base_plane, upper_base_plane])
faces = quads_to_tris(np.array([
[0, 1, 2, 3], # lower base (-y)
[7, 6, 5, 4], # upper base (+y)
[4, 5, 1, 0], # +z face
[5, 6, 2, 1], # +x face
[6, 7, 3, 2], # -z face
[3, 7, 4, 0], # -x face
]))
return Mesh(v=vertices, f=faces) | Return a Mesh which is an axis-aligned rectangular prism. One vertex is
`origin`; the diametrically opposite vertex is `origin + size`.
size: 3x1 array. |
def optimal_t(self, t_max=100, plot=False, ax=None):
"""Find the optimal value of t
Selects the optimal value of t based on the knee point of the
Von Neumann Entropy of the diffusion operator.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
plot : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
t_opt : int
The optimal value of t
"""
tasklogger.log_start("optimal t")
t, h = self.von_neumann_entropy(t_max=t_max)
t_opt = vne.find_knee_point(y=h, x=t)
tasklogger.log_info("Automatically selected t = {}".format(t_opt))
tasklogger.log_complete("optimal t")
if plot:
if ax is None:
fig, ax = plt.subplots()
show = True
else:
show = False
ax.plot(t, h)
ax.scatter(t_opt, h[t == t_opt], marker='*', c='k', s=50)
ax.set_xlabel("t")
ax.set_ylabel("Von Neumann Entropy")
ax.set_title("Optimal t = {}".format(t_opt))
if show:
plt.show()
return t_opt | Find the optimal value of t
Selects the optimal value of t based on the knee point of the
Von Neumann Entropy of the diffusion operator.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
plot : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
t_opt : int
The optimal value of t |
async def open(self) -> 'Issuer':
"""
Explicit entry. Perform ancestor opening operations,
then synchronize revocation registry to tails tree content.
:return: current object
"""
LOGGER.debug('Issuer.open >>>')
await super().open()
for path_rr_id in Tails.links(self._dir_tails, self.did):
await self._sync_revoc(basename(path_rr_id))
LOGGER.debug('Issuer.open <<<')
return self | Explicit entry. Perform ancestor opening operations,
then synchronize revocation registry to tails tree content.
:return: current object |
def db_scan_block( block_id, op_list, db_state=None ):
"""
(required by virtualchain state engine)
Given the block ID and the list of virtualchain operations in the block,
do block-level preprocessing:
* find the state-creation operations we will accept
* make sure there are no collisions.
This modifies op_list, but returns nothing.
This aborts on runtime error.
"""
try:
assert db_state is not None, "BUG: no state given"
except Exception, e:
log.exception(e)
log.error("FATAL: no state given")
os.abort()
log.debug("SCAN BEGIN: {} ops at block {}".format(len(op_list), block_id))
checked_ops = []
for op_data in op_list:
try:
opcode = op_get_opcode_name( op_data['op'] )
assert opcode is not None, "BUG: unknown op '%s'" % op
except Exception, e:
log.exception(e)
log.error("FATAL: invalid operation")
os.abort()
if opcode not in OPCODE_CREATION_OPS:
continue
# make sure there are no collisions:
# build up our collision table in db_state.
op_check( db_state, op_data, block_id, checked_ops )
checked_ops.append( op_data )
# get collision information for this block
collisions = db_state.find_collisions( checked_ops )
# reject all operations that will collide
db_state.put_collisions( block_id, collisions )
log.debug("SCAN END: {} ops at block {} ({} collisions)".format(len(op_list), block_id, len(collisions))) | (required by virtualchain state engine)
Given the block ID and the list of virtualchain operations in the block,
do block-level preprocessing:
* find the state-creation operations we will accept
* make sure there are no collisions.
This modifies op_list, but returns nothing.
This aborts on runtime error. |
def parse_port_pin(name_str):
"""Parses a string and returns a (port-num, pin-num) tuple."""
if len(name_str) < 3:
raise ValueError("Expecting pin name to be at least 3 charcters.")
if name_str[0] != 'P':
raise ValueError("Expecting pin name to start with P")
if name_str[1] < 'A' or name_str[1] > 'K':
raise ValueError("Expecting pin port to be between A and K")
port = ord(name_str[1]) - ord('A')
pin_str = name_str[2:]
if not pin_str.isdigit():
raise ValueError("Expecting numeric pin number.")
return (port, int(pin_str)) | Parses a string and returns a (port-num, pin-num) tuple. |
def parse_file(filename):
"""
Convenience method to parse a generic volumetric data file in the vasp
like format. Used by subclasses for parsing file.
Args:
filename (str): Path of file to parse
Returns:
(poscar, data)
"""
poscar_read = False
poscar_string = []
dataset = []
all_dataset = []
# for holding any strings in input that are not Poscar
# or VolumetricData (typically augmentation charges)
all_dataset_aug = {}
dim = None
dimline = None
read_dataset = False
ngrid_pts = 0
data_count = 0
poscar = None
with zopen(filename, "rt") as f:
for line in f:
original_line = line
line = line.strip()
if read_dataset:
toks = line.split()
for tok in toks:
if data_count < ngrid_pts:
# This complicated procedure is necessary because
# vasp outputs x as the fastest index, followed by y
# then z.
x = data_count % dim[0]
y = int(math.floor(data_count / dim[0])) % dim[1]
z = int(math.floor(data_count / dim[0] / dim[1]))
dataset[x, y, z] = float(tok)
data_count += 1
if data_count >= ngrid_pts:
read_dataset = False
data_count = 0
all_dataset.append(dataset)
elif not poscar_read:
if line != "" or len(poscar_string) == 0:
poscar_string.append(line)
elif line == "":
poscar = Poscar.from_string("\n".join(poscar_string))
poscar_read = True
elif not dim:
dim = [int(i) for i in line.split()]
ngrid_pts = dim[0] * dim[1] * dim[2]
dimline = line
read_dataset = True
dataset = np.zeros(dim)
elif line == dimline:
# when line == dimline, expect volumetric data to follow
# so set read_dataset to True
read_dataset = True
dataset = np.zeros(dim)
else:
# store any extra lines that were not part of the
# volumetric data so we know which set of data the extra
# lines are associated with
key = len(all_dataset) - 1
if key not in all_dataset_aug:
all_dataset_aug[key] = []
all_dataset_aug[key].append(original_line)
if len(all_dataset) == 4:
data = {"total": all_dataset[0], "diff_x": all_dataset[1],
"diff_y": all_dataset[2], "diff_z": all_dataset[3]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff_x": all_dataset_aug.get(1, None),
"diff_y": all_dataset_aug.get(2, None),
"diff_z": all_dataset_aug.get(3, None)}
# construct a "diff" dict for scalar-like magnetization density,
# referenced to an arbitrary direction (using same method as
# pymatgen.electronic_structure.core.Magmom, see
# Magmom documentation for justification for this)
# TODO: re-examine this, and also similar behavior in
# Magmom - @mkhorton
# TODO: does CHGCAR change with different SAXIS?
diff_xyz = np.array([data["diff_x"], data["diff_y"],
data["diff_z"]])
diff_xyz = diff_xyz.reshape((3, dim[0] * dim[1] * dim[2]))
ref_direction = np.array([1.01, 1.02, 1.03])
ref_sign = np.sign(np.dot(ref_direction, diff_xyz))
diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign)
data["diff"] = diff.reshape((dim[0], dim[1], dim[2]))
elif len(all_dataset) == 2:
data = {"total": all_dataset[0], "diff": all_dataset[1]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff": all_dataset_aug.get(1, None)}
else:
data = {"total": all_dataset[0]}
data_aug = {"total": all_dataset_aug.get(0, None)}
return poscar, data, data_aug | Convenience method to parse a generic volumetric data file in the vasp
like format. Used by subclasses for parsing file.
Args:
filename (str): Path of file to parse
Returns:
(poscar, data) |
def recarraydifference(X,Y):
"""
Records of a numpy recarray (or ndarray with structured dtype)
that do not appear in another.
Fast routine for determining which records in numpy array `X`
do not appear in numpy recarray `Y`.
Record array version of func:`tabular.fast.arraydifference`.
**Parameters**
**X** : numpy recarray
Numpy recarray to comapare to numpy recarray `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy recarray
Numpy recarray to which numpy recarray `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy recarray
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.arraydifference`, :func:`tabular.fast.recarrayisin`
"""
if len(Y) > 0:
Z = recarrayisin(X,Y)
return X[np.invert(Z)]
else:
return X | Records of a numpy recarray (or ndarray with structured dtype)
that do not appear in another.
Fast routine for determining which records in numpy array `X`
do not appear in numpy recarray `Y`.
Record array version of func:`tabular.fast.arraydifference`.
**Parameters**
**X** : numpy recarray
Numpy recarray to comapare to numpy recarray `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy recarray
Numpy recarray to which numpy recarray `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy recarray
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.arraydifference`, :func:`tabular.fast.recarrayisin` |
def feed_line(self, line):
"""Feeds one line of input into the reader machine. This method is
a generator that yields all top-level S-expressions that have been
recognized on this line (including multi-line expressions whose last
character is on this line).
"""
self.line += 1
pos = 0
while pos < len(line):
loc_start = TextLocationSingle(self.filename, self.line, pos + 1)
if self.state is State.NORMAL:
item_re = RE_TOKEN
thing = 'token'
elif self.state is State.STRING:
item_re = RE_STRING_ITEM
thing = 'escape sequence'
elif self.state is State.BINARRAY:
item_re = RE_BINARRAY_ITEM[self.binarray_base]
thing = 'binarray item'
else:
assert 0
match = item_re.match(line, pos)
if not match:
raise ReadError(f'{loc_start}: unknown {thing}')
pos = match.end()
loc_end = TextLocationSingle(self.filename, self.line, pos + 1)
loc = loc_start - loc_end
if match['ws_error'] is not None:
raise ReadError(f'{loc_end}: no whitespace after token')
if self.state is State.NORMAL:
# Normal state -- read tokens.
if match['lparen'] is not None:
self.stack.append(StackEntryList(loc_start, []))
elif match['rparen'] is not None:
if not self.stack:
raise ReadError(f'{loc}: unmatched closing paren')
top = self.stack.pop()
if not isinstance(top, StackEntryList):
top.raise_unclosed_error()
yield from self._feed_node(top.items, top.start - loc_end)
elif match['symbol'] is not None:
value = Symbol(match['symbol'])
yield from self._feed_node(value, loc)
elif match['sexpr_comment'] is not None:
self.stack.append(StackEntryComment(loc))
elif match['bool_value'] is not None:
value = match['bool_value'] == '@true'
yield from self._feed_node(value, loc)
elif match['nil_value'] is not None:
yield from self._feed_node(None, loc)
elif match['int_or_word'] is not None:
if match['number'] is not None:
value = int(match['number'], 0)
elif match['raw_char'] is not None:
value = ord(match['raw_char'])
elif match['simple_escape'] is not None:
value = ord(ESCAPE_TO_CHAR[match['simple_escape']])
elif match['hex_code'] is not None:
value = int(match['hex_code'], 16)
if value not in range(0x110000):
raise ReadError(
f'{loc}: not a valid unicode codepoint')
else:
assert 0
if match['word_width'] is not None:
width = int(match['word_width'])
if value < 0:
value += 1 << width
if value not in range(1 << width):
raise ReadError(f'{loc}: word value out of range')
value = BinWord(width, value)
yield from self._feed_node(value, loc)
elif match['array_width'] is not None:
self.binarray_base = {
'0b': 2,
'0o': 8,
None: 10,
'0x': 16,
}[match['array_base']]
self.binarray_data = []
self.binarray_width = int(match['array_width'])
self.token_start = loc_start
self.state = State.BINARRAY
elif match['start_quote'] is not None:
self.state = State.STRING
self.token_start = loc_start
self.string_buffer = StringIO()
if match['string_width'] is not None:
self.binarray_width = int(match['string_width'])
else:
self.binarray_width = None
elif self.state is State.STRING:
# Inside a string.
if match['end_quote'] is not None:
self.state = State.NORMAL
value = self.string_buffer.getvalue()
loc = self.token_start - loc_end
if self.binarray_width is not None:
vals = [ord(x) for x in value]
for x in vals:
if x not in range(1 << self.binarray_width):
raise ReadError(
f'{loc}: character code out of range')
value = BinArray(vals, width=self.binarray_width)
yield from self._feed_node(value, loc)
elif match['raw_chars'] is not None:
self.string_buffer.write(match['raw_chars'])
elif match['simple_escape'] is not None:
c = ESCAPE_TO_CHAR[match['simple_escape']]
self.string_buffer.write(c)
elif match['hex_code'] is not None:
code = int(match['hex_code'], 16)
if code not in range(0x110000):
raise ReadError(
f'{loc}: not a valid unicode codepoint')
self.string_buffer.write(chr(code))
else:
assert 0
elif self.state is State.BINARRAY:
# In a BinArray.
if match['rparen'] is not None:
self.state = State.NORMAL
value = BinArray(self.binarray_data,
width=self.binarray_width)
loc = self.token_start - loc_end
yield from self._feed_node(value, loc)
elif match['digits'] is not None:
value = int(match['digits'], self.binarray_base)
if value < 0:
value += 1 << self.binarray_width
if value not in range(1 << self.binarray_width):
raise ReadError(f'{loc}: word value out of range')
self.binarray_data.append(value)
else:
assert 0 | Feeds one line of input into the reader machine. This method is
a generator that yields all top-level S-expressions that have been
recognized on this line (including multi-line expressions whose last
character is on this line). |
def set_html(self, html, url = None):
""" Sets custom HTML in our Webkit session and allows to specify a fake URL.
Scripts and CSS is dynamically fetched as if the HTML had been loaded from
the given URL. """
if url:
self.conn.issue_command('SetHtml', html, url)
else:
self.conn.issue_command('SetHtml', html) | Sets custom HTML in our Webkit session and allows to specify a fake URL.
Scripts and CSS is dynamically fetched as if the HTML had been loaded from
the given URL. |
def root(reference_labels, estimated_labels):
"""Compare chords according to roots.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.root(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut.
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots = encode_many(estimated_labels, False)[0]
comparison_scores = (ref_roots == est_roots).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | Compare chords according to roots.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.root(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut. |
def get_kwargs(self, form, name):
"""
Return the keyword arguments that are used to instantiate the formset.
"""
kwargs = {
'prefix': self.get_prefix(form, name),
'initial': self.get_initial(form, name),
}
kwargs.update(self.default_kwargs)
return kwargs | Return the keyword arguments that are used to instantiate the formset. |
def read(fname):
"""
utility function to read and return file contents
"""
fpath = os.path.join(os.path.dirname(__file__), fname)
with codecs.open(fpath, 'r', 'utf8') as fhandle:
return fhandle.read().strip() | utility function to read and return file contents |
def decrement(self, key, value=1):
"""
Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool
"""
return self._memcache.decr(self._prefix + key, value) | Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool |
def _align_with_substrings(self, chains_to_skip = set()):
'''Simple substring-based matching'''
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
#colortext.pcyan(c)
#colortext.warning(self.fasta[c])
fasta_sequence = self.fasta[c]
substring_matches = {}
for uniparc_id, uniparc_sequence in sorted(self.uniparc_sequences.iteritems()):
uniparc_sequence = str(uniparc_sequence)
idx = uniparc_sequence.find(fasta_sequence)
if idx != -1:
substring_matches[uniparc_id] = 0
elif len(fasta_sequence) > 30:
idx = uniparc_sequence.find(fasta_sequence[5:-5])
if idx != -1:
substring_matches[uniparc_id] = 5
else:
idx = uniparc_sequence.find(fasta_sequence[7:-7])
if idx != -1:
substring_matches[uniparc_id] = 7
elif len(fasta_sequence) > 15:
idx = uniparc_sequence.find(fasta_sequence[3:-3])
if idx != -1:
substring_matches[uniparc_id] = 3
self.substring_matches[c] = substring_matches
# Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database.
colortext.pcyan('*' * 100)
pprint.pprint(self.substring_matches)
if self.restrict_to_uniparc_values:
for c in self.representative_chains:
#print('HERE!')
#print(c)
if set(map(str, self.substring_matches[c].keys())).intersection(set(self.restrict_to_uniparc_values)) > 0:
# Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values
# Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches
# e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E),
# we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A
restricted_matches = dict((str(k), self.substring_matches[c][k]) for k in self.substring_matches[c].keys() if str(k) in self.restrict_to_uniparc_values)
if len(restricted_matches) != len(self.substring_matches[c]):
removed_matches = sorted(set(self.substring_matches[c].keys()).difference(set(restricted_matches)))
# todo: see above re:quiet colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values)))
self.substring_matches[c] = restricted_matches
#pprint.pprint(self.substring_matches)
#colortext.pcyan('*' * 100)
# Use the representatives' alignments for their respective equivalent classes
for c_1, related_chains in self.equivalence_fiber.iteritems():
for c_2 in related_chains:
self.substring_matches[c_2] = self.substring_matches[c_1] | Simple substring-based matching |
async def create_vm(self, *, preset_name, image, flavor, security_groups=None,
userdata=None, key_name=None, availability_zone=None,
subnet=None):
"""
Dummy create_vm func.
"""
info = {
'id': next(self._id_it),
'name': preset_name,
'ip': ['127.0.0.1'],
'created': 0,
'state': VmState.RUNNING,
'flavor': flavor,
'image': image,
'metadata': {'test-meta': 'abctest'},
'timed_shutdown_at': 1522753481,
'tags': ['a-tag', 'b-tag', 'c-tag']
}
logging.debug('Prepare vm: %s', info)
vm = Vm(self, **info)
self._vms[vm.id] = vm
logging.debug('Create: %s', vm)
return None | Dummy create_vm func. |
def toString(self):
""" Returns date as string. """
slist = self.toList()
sign = '' if slist[0] == '+' else '-'
string = '/'.join(['%02d' % v for v in slist[1:]])
return sign + string | Returns date as string. |
def entry_modification_time(self):
"""dfdatetime.Filetime: entry modification time or None if not set."""
timestamp = self._fsntfs_attribute.get_entry_modification_time_as_integer()
return dfdatetime_filetime.Filetime(timestamp=timestamp) | dfdatetime.Filetime: entry modification time or None if not set. |
def create_container_service(access_token, subscription_id, resource_group, service_name, \
agent_count, agent_vm_size, agent_dns, master_dns, admin_user, location, public_key=None,\
master_count=3, orchestrator='DCOS', app_id=None, app_secret=None, admin_password=None, \
ostype='Linux'):
'''Create a new container service - include app_id and app_secret if using Kubernetes.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
agent_count (int): The number of agent VMs.
agent_vm_size (str): VM size of agents, e.g. Standard_D1_v2.
agent_dns (str): A unique DNS string for the agent DNS.
master_dns (str): A unique string for the master DNS.
admin_user (str): Admin user name.
location (str): Azure data center location, e.g. westus.
public_key (str): RSA public key (utf-8).
master_count (int): Number of master VMs.
orchestrator (str): Container orchestrator. E.g. DCOS, Kubernetes.
app_id (str): Application ID for Kubernetes.
app_secret (str): Application secret for Kubernetes.
admin_password (str): Admin user password.
ostype (str): Operating system. Windows of Linux.
Returns:
HTTP response. Container service JSON model.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerService/ContainerServices/', service_name,
'?api-version=', ACS_API])
acs_body = {'location': location}
properties = {'orchestratorProfile': {'orchestratorType': orchestrator}}
properties['masterProfile'] = {'count': master_count, 'dnsPrefix': master_dns}
ap_profile = {'name': 'AgentPool1'}
ap_profile['count'] = agent_count
ap_profile['vmSize'] = agent_vm_size
ap_profile['dnsPrefix'] = agent_dns
properties['agentPoolProfiles'] = [ap_profile]
if ostype == 'Linux':
linux_profile = {'adminUsername': admin_user}
linux_profile['ssh'] = {'publicKeys': [{'keyData': public_key}]}
properties['linuxProfile'] = linux_profile
else: # Windows
windows_profile = {'adminUsername': admin_user, 'adminPassword': admin_password}
properties['windowsProfile'] = windows_profile
if orchestrator == 'Kubernetes':
sp_profile = {'ClientID': app_id}
sp_profile['Secret'] = app_secret
properties['servicePrincipalProfile'] = sp_profile
acs_body['properties'] = properties
body = json.dumps(acs_body)
return do_put(endpoint, body, access_token) | Create a new container service - include app_id and app_secret if using Kubernetes.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
agent_count (int): The number of agent VMs.
agent_vm_size (str): VM size of agents, e.g. Standard_D1_v2.
agent_dns (str): A unique DNS string for the agent DNS.
master_dns (str): A unique string for the master DNS.
admin_user (str): Admin user name.
location (str): Azure data center location, e.g. westus.
public_key (str): RSA public key (utf-8).
master_count (int): Number of master VMs.
orchestrator (str): Container orchestrator. E.g. DCOS, Kubernetes.
app_id (str): Application ID for Kubernetes.
app_secret (str): Application secret for Kubernetes.
admin_password (str): Admin user password.
ostype (str): Operating system. Windows of Linux.
Returns:
HTTP response. Container service JSON model. |
def is_course_complete(last_update):
"""
Determine is the course is likely to have been terminated or not.
We return True if the timestamp given by last_update is 30 days or older
than today's date. Otherwise, we return True.
The intended use case for this is to detect if a given courses has not
seen any update in the last 30 days or more. Otherwise, we return True,
since it is probably too soon to declare the course complete.
"""
rv = False
if last_update >= 0:
delta = time.time() - last_update
max_delta = total_seconds(datetime.timedelta(days=30))
if delta > max_delta:
rv = True
return rv | Determine is the course is likely to have been terminated or not.
We return True if the timestamp given by last_update is 30 days or older
than today's date. Otherwise, we return True.
The intended use case for this is to detect if a given courses has not
seen any update in the last 30 days or more. Otherwise, we return True,
since it is probably too soon to declare the course complete. |
def get_public_key(key, passphrase=None, asObj=False):
'''
Returns a string containing the public key in PEM format.
key:
A path or PEM encoded string containing a CSR, Certificate or
Private Key from which a public key can be retrieved.
CLI Example:
.. code-block:: bash
salt '*' x509.get_public_key /etc/pki/mycert.cer
'''
if isinstance(key, M2Crypto.X509.X509):
rsa = key.get_pubkey().get_rsa()
text = b''
else:
text = _text_or_file(key)
text = get_pem_entry(text)
if text.startswith(b'-----BEGIN PUBLIC KEY-----'):
if not asObj:
return text
bio = M2Crypto.BIO.MemoryBuffer()
bio.write(text)
rsa = M2Crypto.RSA.load_pub_key_bio(bio)
bio = M2Crypto.BIO.MemoryBuffer()
if text.startswith(b'-----BEGIN CERTIFICATE-----'):
cert = M2Crypto.X509.load_cert_string(text)
rsa = cert.get_pubkey().get_rsa()
if text.startswith(b'-----BEGIN CERTIFICATE REQUEST-----'):
csr = M2Crypto.X509.load_request_string(text)
rsa = csr.get_pubkey().get_rsa()
if (text.startswith(b'-----BEGIN PRIVATE KEY-----') or
text.startswith(b'-----BEGIN RSA PRIVATE KEY-----')):
rsa = M2Crypto.RSA.load_key_string(
text, callback=_passphrase_callback(passphrase))
if asObj:
evppubkey = M2Crypto.EVP.PKey()
evppubkey.assign_rsa(rsa)
return evppubkey
rsa.save_pub_key_bio(bio)
return bio.read_all() | Returns a string containing the public key in PEM format.
key:
A path or PEM encoded string containing a CSR, Certificate or
Private Key from which a public key can be retrieved.
CLI Example:
.. code-block:: bash
salt '*' x509.get_public_key /etc/pki/mycert.cer |
def get_cached_source_variable(self, source_id, variable, default=None):
""" Get the cached value of a source variable. If the variable is not
cached return the default value. """
source_id = int(source_id)
try:
return self._retrieve_cached_source_variable(
source_id, variable)
except UncachedVariable:
return default | Get the cached value of a source variable. If the variable is not
cached return the default value. |
def remove_slug(path):
"""
Return the remainin part of the path
>>> remove_slug('/test/some/function/')
test/some
"""
if path.endswith('/'):
path = path[:-1]
if path.startswith('/'):
path = path[1:]
if "/" not in path or not path:
return None
parts = path.split("/")[:-1]
return "/".join(parts) | Return the remainin part of the path
>>> remove_slug('/test/some/function/')
test/some |
def persistent_timer(func):
"""
Times the execution of a function. If the process is stopped and restarted then timing is continued using saved
files.
Parameters
----------
func
Some function to be timed
Returns
-------
timed_function
The same function with a timer attached.
"""
@functools.wraps(func)
def timed_function(optimizer_instance, *args, **kwargs):
start_time_path = "{}/.start_time".format(optimizer_instance.phase_output_path)
try:
with open(start_time_path) as f:
start = float(f.read())
except FileNotFoundError:
start = time.time()
with open(start_time_path, "w+") as f:
f.write(str(start))
result = func(optimizer_instance, *args, **kwargs)
execution_time = str(dt.timedelta(seconds=time.time() - start))
logger.info("{} took {} to run".format(
optimizer_instance.phase_name,
execution_time
))
with open("{}/execution_time".format(optimizer_instance.phase_output_path), "w+") as f:
f.write(execution_time)
return result
return timed_function | Times the execution of a function. If the process is stopped and restarted then timing is continued using saved
files.
Parameters
----------
func
Some function to be timed
Returns
-------
timed_function
The same function with a timer attached. |
def start(self):
"""Initiate server connection."""
yield from self._do_connect()
_LOGGER.info('connected to snapserver on %s:%s', self._host, self._port)
status = yield from self.status()
self.synchronize(status)
self._on_server_connect() | Initiate server connection. |
def close(self):
"""Close the tough connection.
You are allowed to close a tough connection by default
and it will not complain if you close it more than once.
You can disallow closing connections by setting
the closeable parameter to something false. In this case,
closing tough connections will be silently ignored.
"""
if self._closeable:
self._close()
elif self._transaction:
self._reset() | Close the tough connection.
You are allowed to close a tough connection by default
and it will not complain if you close it more than once.
You can disallow closing connections by setting
the closeable parameter to something false. In this case,
closing tough connections will be silently ignored. |
def load_or_create(cls, filename=None, no_input=False, create_new=False, **kwargs):
"""
Load system from a dump, if dump file exists, or create a new system if it does not exist.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--no_input', action='store_true')
parser.add_argument('--create_new', action='store_true')
args = parser.parse_args()
if args.no_input:
print('Parameter --no_input was given')
no_input = True
if args.create_new:
print('Parameter --create_new was given')
create_new = True
no_input = True
def savefile_more_recent():
time_savefile = os.path.getmtime(filename)
time_program = os.path.getmtime(sys.argv[0])
return time_savefile > time_program
def load_pickle():
with open(filename, 'rb') as of:
statefile_version, data = pickle.load(of)
if statefile_version != STATEFILE_VERSION:
raise RuntimeError(f'Wrong statefile version, please remove state file {filename}')
return data
def load():
print('Loading %s' % filename)
obj_list, config = load_pickle()
system = System(load_state=obj_list, filename=filename, **kwargs)
return system
def create():
print('Creating new system')
config = None
if filename:
try:
obj_list, config = load_pickle()
except FileNotFoundError:
config = None
return cls(filename=filename, load_config=config, **kwargs)
if filename and os.path.isfile(filename):
if savefile_more_recent() and not create_new:
return load()
else:
if no_input:
print('Program file more recent. Loading that instead.')
return create()
while True:
answer = input('Program file more recent. Do you want to load it? (y/n) ')
if answer == 'y':
return create()
elif answer == 'n':
return load()
else:
return create() | Load system from a dump, if dump file exists, or create a new system if it does not exist. |
def add_view_permissions(sender, verbosity, **kwargs):
"""
This post_syncdb/post_migrate hooks takes care of adding a view permission too all our
content types.
"""
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
for content_type in ContentType.objects.all():
codename = "view_%s" % content_type.model
_, created = Permission.objects \
.get_or_create(content_type=content_type,
codename=codename,
defaults={'name': 'Can view %s' % content_type.name})
if created and verbosity >= 1:
print('Added view permission for %s' % content_type.name) | This post_syncdb/post_migrate hooks takes care of adding a view permission too all our
content types. |
def required_arguments(func):
"""Return all arguments of a function that do not have a default value."""
defaults = default_values_of(func)
args = arguments_of(func)
if defaults:
args = args[:-len(defaults)]
return args | Return all arguments of a function that do not have a default value. |
def _read_buffer(self, data_in):
"""Process the socket buffer, and direct the data to the appropriate
channel.
:rtype: bytes
"""
while data_in:
data_in, channel_id, frame_in = self._handle_amqp_frame(data_in)
if frame_in is None:
break
self.heartbeat.register_read()
if channel_id == 0:
self._channel0.on_frame(frame_in)
elif channel_id in self._channels:
self._channels[channel_id].on_frame(frame_in)
return data_in | Process the socket buffer, and direct the data to the appropriate
channel.
:rtype: bytes |
def create_negotiate_message(self, domain_name=None, workstation=None):
"""
Create an NTLM NEGOTIATE_MESSAGE
:param domain_name: The domain name of the user account we are authenticating with, default is None
:param worksation: The workstation we are using to authenticate with, default is None
:return: A base64 encoded string of the NEGOTIATE_MESSAGE
"""
self.negotiate_message = NegotiateMessage(self.negotiate_flags, domain_name, workstation)
return base64.b64encode(self.negotiate_message.get_data()) | Create an NTLM NEGOTIATE_MESSAGE
:param domain_name: The domain name of the user account we are authenticating with, default is None
:param worksation: The workstation we are using to authenticate with, default is None
:return: A base64 encoded string of the NEGOTIATE_MESSAGE |
def iscomplex(polynomial):
"""Returns whether the polynomial has complex coefficients
:param polynomial: Polynomial of noncommutive variables.
:type polynomial: :class:`sympy.core.expr.Expr`.
:returns: bool -- whether there is a complex coefficient.
"""
if isinstance(polynomial, (int, float)):
return False
if isinstance(polynomial, complex):
return True
polynomial = polynomial.expand()
for monomial in polynomial.as_coefficients_dict():
for variable in monomial.as_coeff_mul()[1]:
if isinstance(variable, complex) or variable == I:
return True
return False | Returns whether the polynomial has complex coefficients
:param polynomial: Polynomial of noncommutive variables.
:type polynomial: :class:`sympy.core.expr.Expr`.
:returns: bool -- whether there is a complex coefficient. |
def download(self, id, attid): # pylint: disable=invalid-name,redefined-builtin
"""Download a device's attachment.
:param id: Device ID as an int.
:param attid: Attachment ID as an int.
:rtype: tuple `(io.BytesIO, 'filename')`
"""
resp = self.service.get_id(self._base(id), attid, params={'format': 'download'}, stream=True)
b = io.BytesIO()
stream.stream_response_to_file(resp, path=b)
resp.close()
b.seek(0)
return (b, self.service.filename(resp)) | Download a device's attachment.
:param id: Device ID as an int.
:param attid: Attachment ID as an int.
:rtype: tuple `(io.BytesIO, 'filename')` |
def log(self, metrics_dict):
"""Print calculated metrics and optionally write to file (json/tb)"""
if self.writer:
self.write_to_file(metrics_dict)
if self.verbose:
self.print_to_screen(metrics_dict)
self.reset() | Print calculated metrics and optionally write to file (json/tb) |
def index_model(index_name, adapter):
''' Indel all objects given a model'''
model = adapter.model
log.info('Indexing {0} objects'.format(model.__name__))
qs = model.objects
if hasattr(model.objects, 'visible'):
qs = qs.visible()
if adapter.exclude_fields:
qs = qs.exclude(*adapter.exclude_fields)
docs = iter_qs(qs, adapter)
docs = iter_for_index(docs, index_name)
for ok, info in streaming_bulk(es.client, docs, raise_on_error=False):
if not ok:
log.error('Unable to index %s "%s": %s', model.__name__,
info['index']['_id'], info['index']['error']) | Indel all objects given a model |
def conf(self):
"""Generate the Sphinx `conf.py` configuration file
Returns:
(str): the contents of the `conf.py` file.
"""
return self.env.get_template('conf.py.j2').render(
metadata=self.metadata,
package=self.package) | Generate the Sphinx `conf.py` configuration file
Returns:
(str): the contents of the `conf.py` file. |
def grid(self, dimensions=None, **kwargs):
"""Group by supplied dimension(s) and lay out groups in grid
Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a GridSpace.
Args:
dimensions: Dimension/str or list
Dimension or list of dimensions to group by
Returns:
GridSpace with supplied dimensions
"""
dimensions = self._valid_dimensions(dimensions)
if len(dimensions) == self.ndims:
with item_check(False):
return GridSpace(self, **kwargs).reindex(dimensions)
return self.groupby(dimensions, container_type=GridSpace, **kwargs) | Group by supplied dimension(s) and lay out groups in grid
Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a GridSpace.
Args:
dimensions: Dimension/str or list
Dimension or list of dimensions to group by
Returns:
GridSpace with supplied dimensions |
def computeRawAnomalyScore(activeColumns, prevPredictedColumns):
"""Computes the raw anomaly score.
The raw anomaly score is the fraction of active columns not predicted.
:param activeColumns: array of active column indices
:param prevPredictedColumns: array of columns indices predicted in prev step
:returns: anomaly score 0..1 (float)
"""
nActiveColumns = len(activeColumns)
if nActiveColumns > 0:
# Test whether each element of a 1-D array is also present in a second
# array. Sum to get the total # of columns that are active and were
# predicted.
score = numpy.in1d(activeColumns, prevPredictedColumns).sum()
# Get the percent of active columns that were NOT predicted, that is
# our anomaly score.
score = (nActiveColumns - score) / float(nActiveColumns)
else:
# There are no active columns.
score = 0.0
return score | Computes the raw anomaly score.
The raw anomaly score is the fraction of active columns not predicted.
:param activeColumns: array of active column indices
:param prevPredictedColumns: array of columns indices predicted in prev step
:returns: anomaly score 0..1 (float) |
def write(models, out=None, base=None, logger=logging):
'''
models - one or more input Versa models from which output is generated.
'''
assert out is not None #Output stream required
if not isinstance(models, list): models = [models]
for m in models:
for link in m.match():
s, p, o = link[:3]
#Skip docheader statements
if s == (base or '') + '@docheader': continue
if p in RESOURCE_MAPPING: p = RESOURCE_MAPPING[p]
if o in RESOURCE_MAPPING: o = RESOURCE_MAPPING[o]
if p == VERSA_TYPE_REL: p = RDF_TYPE_REL
print(strconv(s), strconv(p), strconv(o), '.', file=out)
return | models - one or more input Versa models from which output is generated. |
def build_inside(input_method, input_args=None, substitutions=None):
"""
use requested input plugin to load configuration and then initiate build
"""
def process_keyvals(keyvals):
""" ["key=val", "x=y"] -> {"key": "val", "x": "y"} """
keyvals = keyvals or []
processed_keyvals = {}
for arg in keyvals:
key, value = arg.split("=", 1)
processed_keyvals[key] = value
return processed_keyvals
main = __name__.split('.', 1)[0]
log_encoding = get_logging_encoding(main)
logger.info("log encoding: %s", log_encoding)
if not input_method:
raise RuntimeError("No input method specified!")
logger.debug("getting build json from input %s", input_method)
cleaned_input_args = process_keyvals(input_args)
cleaned_input_args['substitutions'] = process_keyvals(substitutions)
input_runner = InputPluginsRunner([{'name': input_method,
'args': cleaned_input_args}])
build_json = input_runner.run()[input_method]
if isinstance(build_json, Exception):
raise RuntimeError("Input plugin raised exception: {}".format(build_json))
logger.debug("build json: %s", build_json)
if not build_json:
raise RuntimeError("No valid build json!")
if not isinstance(build_json, dict):
raise RuntimeError("Input plugin did not return valid build json: {}".format(build_json))
dbw = DockerBuildWorkflow(**build_json)
try:
build_result = dbw.build_docker_image()
except Exception as e:
logger.error('image build failed: %s', e)
raise
else:
if not build_result or build_result.is_failed():
raise RuntimeError("no image built")
else:
logger.info("build has finished successfully \\o/") | use requested input plugin to load configuration and then initiate build |
def start_system(components, bind_to, hooks={}):
"""Start all components on component map."""
deps = build_deps_graph(components)
started_components = start_components(components, deps, None)
run_hooks(hooks, started_components)
if type(bind_to) is str:
master = started_components[bind_to]
else:
master = bind_to
setattr(master, '__components', started_components)
return master | Start all components on component map. |
def make_predictor(regressor=LassoLarsIC(fit_intercept=False),
Selector=GridSearchCV, fourier_degree=(2, 25),
selector_processes=1,
use_baart=False, scoring='r2', scoring_cv=3,
**kwargs):
"""make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs)
Makes a predictor object for use in :func:`get_lightcurve`.
**Parameters**
regressor : object with "fit" and "transform" methods, optional
Regression object used for solving Fourier matrix
(default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``).
Selector : class with "fit" and "predict" methods, optional
Model selection class used for finding the best fit
(default :class:`sklearn.grid_search.GridSearchCV`).
selector_processes : positive integer, optional
Number of processes to use for *Selector* (default 1).
use_baart : boolean, optional
If True, ignores *Selector* and uses Baart's Criteria to find
the Fourier degree, within the boundaries (default False).
fourier_degree : 2-tuple, optional
Tuple containing lower and upper bounds on Fourier degree, in that
order (default (2, 25)).
scoring : str, optional
Scoring method to use for *Selector*. This parameter can be:
* "r2", in which case use :math:`R^2` (the default)
* "mse", in which case use mean square error
scoring_cv : positive integer, optional
Number of cross validation folds used in scoring (default 3).
**Returns**
out : object with "fit" and "predict" methods
The created predictor object.
"""
fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \
if use_baart else Fourier()
pipeline = Pipeline([('Fourier', fourier), ('Regressor', regressor)])
if use_baart:
return pipeline
else:
params = {'Fourier__degree': list(range(fourier_degree[0],
fourier_degree[1]+1))}
return Selector(pipeline, params, scoring=scoring, cv=scoring_cv,
n_jobs=selector_processes) | make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs)
Makes a predictor object for use in :func:`get_lightcurve`.
**Parameters**
regressor : object with "fit" and "transform" methods, optional
Regression object used for solving Fourier matrix
(default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``).
Selector : class with "fit" and "predict" methods, optional
Model selection class used for finding the best fit
(default :class:`sklearn.grid_search.GridSearchCV`).
selector_processes : positive integer, optional
Number of processes to use for *Selector* (default 1).
use_baart : boolean, optional
If True, ignores *Selector* and uses Baart's Criteria to find
the Fourier degree, within the boundaries (default False).
fourier_degree : 2-tuple, optional
Tuple containing lower and upper bounds on Fourier degree, in that
order (default (2, 25)).
scoring : str, optional
Scoring method to use for *Selector*. This parameter can be:
* "r2", in which case use :math:`R^2` (the default)
* "mse", in which case use mean square error
scoring_cv : positive integer, optional
Number of cross validation folds used in scoring (default 3).
**Returns**
out : object with "fit" and "predict" methods
The created predictor object. |
def rlmb_long_stochastic_discrete_100steps():
"""Long setting with stochastic discrete model, changed ppo steps."""
hparams = rlmb_long_stochastic_discrete()
hparams.ppo_epoch_length = 100
hparams.simulated_rollout_length = 100
hparams.simulated_batch_size = 8
return hparams | Long setting with stochastic discrete model, changed ppo steps. |
def _process_genes(self, limit=None):
"""
This method processes the KEGG gene IDs.
The label for the gene is pulled as
the first symbol in the list of gene symbols;
the rest are added as synonyms.
The long-form of the gene name is added as a definition.
This is hardcoded to just processes human genes.
Triples created:
<gene_id> is a SO:gene
<gene_id> rdfs:label <gene_name>
:param limit:
:return:
"""
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
family = Family(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['hsa_genes']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_name) = row
gene_id = 'KEGG-'+gene_id.strip()
# the gene listing has a bunch of labels
# that are delimited, as:
# DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT,
# EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin
# it looks like the list is semicolon delimited
# (symbol, name, gene_class)
# where the symbol is a comma-delimited list
# here, we split them up.
# we will take the first abbreviation and make it the symbol
# then take the rest as synonyms
gene_stuff = re.split('r;', gene_name)
symbollist = re.split(r',', gene_stuff[0])
first_symbol = symbollist[0].strip()
if gene_id not in self.label_hash:
self.label_hash[gene_id] = first_symbol
if self.test_mode and gene_id not in self.test_ids['genes']:
continue
# Add the gene as a class.
geno.addGene(gene_id, first_symbol)
# add the long name as the description
if len(gene_stuff) > 1:
description = gene_stuff[1].strip()
model.addDefinition(gene_id, description)
# add the rest of the symbols as synonyms
for i in enumerate(symbollist, start=1):
model.addSynonym(gene_id, i[1].strip())
if len(gene_stuff) > 2:
ko_part = gene_stuff[2]
ko_match = re.search(r'K\d+', ko_part)
if ko_match is not None and len(ko_match.groups()) == 1:
ko = 'KEGG-ko:'+ko_match.group(1)
family.addMemberOf(gene_id, ko)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with genes")
return | This method processes the KEGG gene IDs.
The label for the gene is pulled as
the first symbol in the list of gene symbols;
the rest are added as synonyms.
The long-form of the gene name is added as a definition.
This is hardcoded to just processes human genes.
Triples created:
<gene_id> is a SO:gene
<gene_id> rdfs:label <gene_name>
:param limit:
:return: |
def tear_down(self):
""" Called when controller is destroyed. """
super(ReceiverController, self).tear_down()
self.status = None
self.launch_failure = None
self.app_to_launch = None
self.app_launch_event.clear()
self._status_listeners[:] = [] | Called when controller is destroyed. |
def namelist_handle(tokens):
"""Process inline nonlocal and global statements."""
if len(tokens) == 1:
return tokens[0]
elif len(tokens) == 2:
return tokens[0] + "\n" + tokens[0] + " = " + tokens[1]
else:
raise CoconutInternalException("invalid in-line nonlocal / global tokens", tokens) | Process inline nonlocal and global statements. |
def parse_rules(data, chain):
"""
Parse the rules for the specified chain.
"""
rules = []
for line in data.splitlines(True):
m = re_rule.match(line)
if m and m.group(3) == chain:
rule = parse_rule(m.group(4))
rule.packets = int(m.group(1))
rule.bytes = int(m.group(2))
rules.append(rule)
return rules | Parse the rules for the specified chain. |
def execd_module_paths(execd_dir=None):
"""Generate a list of full paths to modules within execd_dir."""
if not execd_dir:
execd_dir = default_execd_dir()
if not os.path.exists(execd_dir):
return
for subpath in os.listdir(execd_dir):
module = os.path.join(execd_dir, subpath)
if os.path.isdir(module):
yield module | Generate a list of full paths to modules within execd_dir. |
def _render_expression(self, check):
"""Turn a mongodb-style search dict into an SQL query."""
expressions = []
args = []
skeys = set(check.keys())
skeys.difference_update(set(self._keys))
skeys.difference_update(set(['buffers', 'result_buffers']))
if skeys:
raise KeyError("Illegal testing key(s): %s"%skeys)
for name,sub_check in check.iteritems():
if isinstance(sub_check, dict):
for test,value in sub_check.iteritems():
try:
op = operators[test]
except KeyError:
raise KeyError("Unsupported operator: %r"%test)
if isinstance(op, tuple):
op, join = op
if value is None and op in null_operators:
expr = "%s %s" % (name, null_operators[op])
else:
expr = "%s %s ?"%(name, op)
if isinstance(value, (tuple,list)):
if op in null_operators and any([v is None for v in value]):
# equality tests don't work with NULL
raise ValueError("Cannot use %r test with NULL values on SQLite backend"%test)
expr = '( %s )'%( join.join([expr]*len(value)) )
args.extend(value)
else:
args.append(value)
expressions.append(expr)
else:
# it's an equality check
if sub_check is None:
expressions.append("%s IS NULL" % name)
else:
expressions.append("%s = ?"%name)
args.append(sub_check)
expr = " AND ".join(expressions)
return expr, args | Turn a mongodb-style search dict into an SQL query. |
def _validate(config):
""" Config validation
Raises:
KeyError on missing mandatory key
SyntaxError on invalid key
ValueError on invalid value for key
:param config: {dict} config to validate
:return: None
"""
for mandatory_key in _mandatory_keys:
if mandatory_key not in config:
raise KeyError(mandatory_key)
for key in config.keys():
if key not in _mandatory_keys and key not in _optional_keys:
raise SyntaxError(key)
if not isinstance(config[key], _default_config[key].__class__):
raise ValueError(key) | Config validation
Raises:
KeyError on missing mandatory key
SyntaxError on invalid key
ValueError on invalid value for key
:param config: {dict} config to validate
:return: None |
def check_has_docstring(self, api):
'''An API class must have a docstring.'''
if not api.__doc__:
msg = 'The Api class "{}" lacks a docstring.'
return [msg.format(api.__name__)] | An API class must have a docstring. |
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client."""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug('Found running nailgun server that needs updating, killing {server}'
.format(server=self._identity))
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr, stdin)
return self._create_ngclient(self.socket, stdout, stderr, stdin) | This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client. |
def to_dict(self):
"""
Return a dict that can be serialised to JSON and sent to UpCloud's API.
"""
return dict(
(attr, getattr(self, attr))
for attr in self.ATTRIBUTES
if hasattr(self, attr)
) | Return a dict that can be serialised to JSON and sent to UpCloud's API. |
async def _handle_container_timeout(self, container_id, timeout):
"""
Check timeout with docker stats
:param container_id:
:param timeout: in seconds (cpu time)
"""
try:
docker_stats = await self._docker_interface.get_stats(container_id)
source = AsyncIteratorWrapper(docker_stats)
nano_timeout = timeout * (10 ** 9)
async for upd in source:
if upd is None:
await self._kill_it_with_fire(container_id)
self._logger.debug("%i", upd['cpu_stats']['cpu_usage']['total_usage'])
if upd['cpu_stats']['cpu_usage']['total_usage'] > nano_timeout:
self._logger.info("Killing container %s as it used %i CPU seconds (max was %i)",
container_id, int(upd['cpu_stats']['cpu_usage']['total_usage'] / (10 ** 9)), timeout)
await self._kill_it_with_fire(container_id)
return
except asyncio.CancelledError:
pass
except:
self._logger.exception("Exception in _handle_container_timeout") | Check timeout with docker stats
:param container_id:
:param timeout: in seconds (cpu time) |
def next_permutation(tab):
"""find the next permutation of tab in the lexicographical order
:param tab: table with n elements from an ordered set
:modifies: table to next permutation
:returns: False if permutation is already lexicographical maximal
:complexity: O(n)
"""
n = len(tab)
pivot = None # find pivot
for i in range(n - 1):
if tab[i] < tab[i + 1]:
pivot = i
if pivot is None: # tab is already the last perm.
return False
for i in range(pivot + 1, n): # find the element to swap
if tab[i] > tab[pivot]:
swap = i
tab[swap], tab[pivot] = tab[pivot], tab[swap]
i = pivot + 1
j = n - 1 # invert suffix
while i < j:
tab[i], tab[j] = tab[j], tab[i]
i += 1
j -= 1
return True | find the next permutation of tab in the lexicographical order
:param tab: table with n elements from an ordered set
:modifies: table to next permutation
:returns: False if permutation is already lexicographical maximal
:complexity: O(n) |
def _filter_commands(ctx, commands=None):
"""Return list of used commands."""
lookup = getattr(ctx.command, 'commands', {})
if not lookup and isinstance(ctx.command, click.MultiCommand):
lookup = _get_lazyload_commands(ctx.command)
if commands is None:
return sorted(lookup.values(), key=lambda item: item.name)
names = [name.strip() for name in commands.split(',')]
return [lookup[name] for name in names if name in lookup] | Return list of used commands. |
def set_scene_velocity(self, scene_id, velocity):
"""reconfigure a scene by scene ID"""
if not scene_id in self.state.scenes: # does that scene_id exist?
err_msg = "Requested to set velocity on scene {sceneNum}, which does not exist".format(sceneNum=scene_id)
logging.info(err_msg)
return(False, 0, err_msg)
self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(velocity=velocity)
sequence_number = self.zmq_publisher.publish_scene_velocity(scene_id, velocity)
logging.debug("set velocity on scene {sceneNum}".format(sceneNum=scene_id))
if scene_id == self.state.activeSceneId:
self.state.activeAnimation.set_velocity(velocity)
self._do_next_frame() # TODO: make it more sensible, e.g. call only if static scene
return (True, sequence_number, "OK") | reconfigure a scene by scene ID |
def _has_local_storage(self, pod=None):
"""
Determines if a K8sPod has any local storage susceptible to be lost.
:param pod: The K8sPod we're interested in.
:return: a boolean.
"""
for vol in pod.volumes:
if vol.emptyDir is not None:
return True
return False | Determines if a K8sPod has any local storage susceptible to be lost.
:param pod: The K8sPod we're interested in.
:return: a boolean. |
def parse_route_name_and_version(route_repr):
"""
Parse a route representation string and return the route name and version number.
:param route_repr: Route representation string.
:return: A tuple containing route name and version number.
"""
if ':' in route_repr:
route_name, version = route_repr.split(':', 1)
try:
version = int(version)
except ValueError:
raise ValueError('Invalid route representation: {}'.format(route_repr))
else:
route_name = route_repr
version = 1
return route_name, version | Parse a route representation string and return the route name and version number.
:param route_repr: Route representation string.
:return: A tuple containing route name and version number. |
def _execute_hooks(self, element):
"""
Executes finalize hooks
"""
if self.hooks and self.finalize_hooks:
self.param.warning(
"Supply either hooks or finalize_hooks not both, "
"using hooks and ignoring finalize_hooks.")
hooks = self.hooks or self.finalize_hooks
for hook in hooks:
try:
hook(self, element)
except Exception as e:
self.param.warning("Plotting hook %r could not be "
"applied:\n\n %s" % (hook, e)) | Executes finalize hooks |
def append(self, other, ignore_index=False):
"""Append rows of `other` to the end of this frame, returning a new object.
Wrapper around the :meth:`pandas.DataFrame.append` method.
Args:
other (Cartesian):
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
Returns:
Cartesian:
"""
if not isinstance(other, self.__class__):
raise ValueError('May only append instances of same type.')
if type(ignore_index) is bool:
new_frame = self._frame.append(other._frame,
ignore_index=ignore_index,
verify_integrity=True)
else:
new_frame = self._frame.append(other._frame,
ignore_index=True,
verify_integrity=True)
if type(ignore_index) is int:
new_frame.index = range(ignore_index,
ignore_index + len(new_frame))
else:
new_frame.index = ignore_index
return self.__class__(new_frame) | Append rows of `other` to the end of this frame, returning a new object.
Wrapper around the :meth:`pandas.DataFrame.append` method.
Args:
other (Cartesian):
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
Returns:
Cartesian: |
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names) | Filter out AST nodes that are subclasses of ``superclass``. |
def find_side(ls, side):
"""
Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle.
"""
minx, miny, maxx, maxy = ls.bounds
points = {'left': [(minx, miny), (minx, maxy)],
'right': [(maxx, miny), (maxx, maxy)],
'bottom': [(minx, miny), (maxx, miny)],
'top': [(minx, maxy), (maxx, maxy)],}
return sgeom.LineString(points[side]) | Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle. |
def _run_init_queries(self):
'''
Initialization queries
'''
for obj in (Package, PackageCfgFile, PayloadFile, IgnoredDir, AllowedDir):
self._db.create_table_from_object(obj()) | Initialization queries |
def occurrence_halved_fingerprint(
word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG
):
"""Return the occurrence halved fingerprint.
This is a wrapper for :py:meth:`OccurrenceHalved.fingerprint`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The occurrence halved fingerprint
Examples
--------
>>> bin(occurrence_halved_fingerprint('hat'))
'0b1010000000010'
>>> bin(occurrence_halved_fingerprint('niall'))
'0b10010100000'
>>> bin(occurrence_halved_fingerprint('colin'))
'0b1001010000'
>>> bin(occurrence_halved_fingerprint('atcg'))
'0b10100000000000'
>>> bin(occurrence_halved_fingerprint('entreatment'))
'0b1111010000110000'
"""
return OccurrenceHalved().fingerprint(word, n_bits, most_common) | Return the occurrence halved fingerprint.
This is a wrapper for :py:meth:`OccurrenceHalved.fingerprint`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The occurrence halved fingerprint
Examples
--------
>>> bin(occurrence_halved_fingerprint('hat'))
'0b1010000000010'
>>> bin(occurrence_halved_fingerprint('niall'))
'0b10010100000'
>>> bin(occurrence_halved_fingerprint('colin'))
'0b1001010000'
>>> bin(occurrence_halved_fingerprint('atcg'))
'0b10100000000000'
>>> bin(occurrence_halved_fingerprint('entreatment'))
'0b1111010000110000' |
def power_corr(r=None, n=None, power=None, alpha=0.05, tail='two-sided'):
"""
Evaluate power, sample size, correlation coefficient or
significance level of a correlation test.
Parameters
----------
r : float
Correlation coefficient.
n : int
Number of observations (sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
tail : str
Indicates whether the test is "two-sided" or "one-sided".
Notes
-----
Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must
be passed as None, and that parameter is determined from the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
This function is a mere Python translation of the original `pwr.r.test`
function implemented in the `pwr` R package.
All credit goes to the author, Stephane Champely.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power given ``r``, ``n`` and ``alpha``
>>> from pingouin import power_corr
>>> print('power: %.4f' % power_corr(r=0.5, n=20))
power: 0.6379
2. Compute required sample size given ``r``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_corr(r=0.5, power=0.80,
... tail='one-sided'))
n: 22.6091
3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level
>>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05))
r: 0.5822
4. Compute achieved alpha level given ``r``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80,
... alpha=None))
alpha: 0.1377
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [r, n, power, alpha]])
if n_none != 1:
raise ValueError('Exactly one of n, r, power, and alpha must be None')
# Safety checks
if r is not None:
assert -1 <= r <= 1
r = abs(r)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
if n is not None:
assert n > 4
# Define main function
if tail == 'two-sided':
def func(r, n, power, alpha):
dof = n - 2
ttt = stats.t.ppf(1 - alpha / 2, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) + \
stats.norm.cdf((-zr - zrc) * np.sqrt(n - 3))
return power
else:
def func(r, n, power, alpha):
dof = n - 2
ttt = stats.t.ppf(1 - alpha, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3))
return power
# Evaluate missing variable
if power is None and n is not None and r is not None:
# Compute achieved power given r, n and alpha
return func(r, n, power=None, alpha=alpha)
elif n is None and power is not None and r is not None:
# Compute required sample size given r, power and alpha
def _eval_n(n, r, power, alpha):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_n, 4 + 1e-10, 1e+09, args=(r, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif r is None and power is not None and n is not None:
# Compute achieved r given sample size, power and alpha level
def _eval_r(r, n, power, alpha):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_r, 1e-10, 1 - 1e-10, args=(n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha (significance) level given r, n and power
def _eval_alpha(alpha, r, n, power):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(r, n, power))
except ValueError: # pragma: no cover
return np.nan | Evaluate power, sample size, correlation coefficient or
significance level of a correlation test.
Parameters
----------
r : float
Correlation coefficient.
n : int
Number of observations (sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
tail : str
Indicates whether the test is "two-sided" or "one-sided".
Notes
-----
Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must
be passed as None, and that parameter is determined from the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
This function is a mere Python translation of the original `pwr.r.test`
function implemented in the `pwr` R package.
All credit goes to the author, Stephane Champely.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power given ``r``, ``n`` and ``alpha``
>>> from pingouin import power_corr
>>> print('power: %.4f' % power_corr(r=0.5, n=20))
power: 0.6379
2. Compute required sample size given ``r``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_corr(r=0.5, power=0.80,
... tail='one-sided'))
n: 22.6091
3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level
>>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05))
r: 0.5822
4. Compute achieved alpha level given ``r``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80,
... alpha=None))
alpha: 0.1377 |
def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()):
"""
Generate a dummy subclass of Item that will have the given attributes,
and the base Item methods, but no methods of its own. This is for use
with upgrading.
@param typeName: a string, the Axiom TypeName to have attributes for.
@param schemaVersion: an int, the (old) version of the schema this is a proxy
for.
@param attributes: a dict mapping {columnName: attr instance} describing
the schema of C{typeName} at C{schemaVersion}.
@param dummyBases: a sequence of 4-tuples of (baseTypeName,
baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases
of this legacy class.
"""
if (typeName, schemaVersion) in _legacyTypes:
return _legacyTypes[typeName, schemaVersion]
if dummyBases:
realBases = [declareLegacyItem(*A) for A in dummyBases]
else:
realBases = (Item,)
attributes = attributes.copy()
attributes['__module__'] = 'item_dummy'
attributes['__legacy__'] = True
attributes['typeName'] = typeName
attributes['schemaVersion'] = schemaVersion
result = type(str('DummyItem<%s,%d>' % (typeName, schemaVersion)),
realBases,
attributes)
assert result is not None, 'wtf, %r' % (type,)
_legacyTypes[(typeName, schemaVersion)] = result
return result | Generate a dummy subclass of Item that will have the given attributes,
and the base Item methods, but no methods of its own. This is for use
with upgrading.
@param typeName: a string, the Axiom TypeName to have attributes for.
@param schemaVersion: an int, the (old) version of the schema this is a proxy
for.
@param attributes: a dict mapping {columnName: attr instance} describing
the schema of C{typeName} at C{schemaVersion}.
@param dummyBases: a sequence of 4-tuples of (baseTypeName,
baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases
of this legacy class. |
def get_rc_creds():
"""
Reads ~/.rightscalerc and returns API endpoint and refresh token.
Always returns a tuple of strings even if the file is empty - in which
case, returns ``('', '')``.
"""
config = get_config()
try:
return (
config.get(CFG_SECTION_OAUTH, CFG_OPTION_ENDPOINT),
config.get(CFG_SECTION_OAUTH, CFG_OPTION_REF_TOKEN),
)
except:
return ('', '') | Reads ~/.rightscalerc and returns API endpoint and refresh token.
Always returns a tuple of strings even if the file is empty - in which
case, returns ``('', '')``. |
def get_bucket_region(self, bucket) -> str:
"""
Get region associated with a specified bucket name.
:param bucket: the bucket to be checked.
:return: region, Note that underlying AWS API returns None for default US-East-1,
I'm replacing that with us-east-1.
"""
region = self.s3_client.get_bucket_location(Bucket=bucket)["LocationConstraint"]
return 'us-east-1' if region is None else region | Get region associated with a specified bucket name.
:param bucket: the bucket to be checked.
:return: region, Note that underlying AWS API returns None for default US-East-1,
I'm replacing that with us-east-1. |
def convert_attrs_to_uppercase(obj: Any, attrs: Iterable[str]) -> None:
"""
Converts the specified attributes of an object to upper case, modifying
the object in place.
"""
for a in attrs:
value = getattr(obj, a)
if value is None:
continue
setattr(obj, a, value.upper()) | Converts the specified attributes of an object to upper case, modifying
the object in place. |
def calc_qdga2_v1(self):
"""Perform the runoff concentration calculation for "fast" direct runoff.
The working equation is the analytical solution of the linear storage
equation under the assumption of constant change in inflow during
the simulation time step.
Required derived parameter:
|KD2|
Required state sequence:
|QDGZ2|
Calculated state sequence:
|QDGA2|
Basic equation:
:math:`QDGA2_{neu} = QDGA2_{alt} +
(QDGZ2_{alt}-QDGA2_{alt}) \\cdot (1-exp(-KD2^{-1})) +
(QDGZ2_{neu}-QDGZ2_{alt}) \\cdot (1-KD2\\cdot(1-exp(-KD2^{-1})))`
Examples:
A normal test case:
>>> from hydpy.models.lland import *
>>> parameterstep()
>>> derived.kd2(0.1)
>>> states.qdgz2.old = 2.0
>>> states.qdgz2.new = 4.0
>>> states.qdga2.old = 3.0
>>> model.calc_qdga2_v1()
>>> states.qdga2
qdga2(3.800054)
First extreme test case (zero division is circumvented):
>>> derived.kd2(0.0)
>>> model.calc_qdga2_v1()
>>> states.qdga2
qdga2(4.0)
Second extreme test case (numerical overflow is circumvented):
>>> derived.kd2(1e500)
>>> model.calc_qdga2_v1()
>>> states.qdga2
qdga2(5.0)
"""
der = self.parameters.derived.fastaccess
old = self.sequences.states.fastaccess_old
new = self.sequences.states.fastaccess_new
if der.kd2 <= 0.:
new.qdga2 = new.qdgz2
elif der.kd2 > 1e200:
new.qdga2 = old.qdga2+new.qdgz2-old.qdgz2
else:
d_temp = (1.-modelutils.exp(-1./der.kd2))
new.qdga2 = (old.qdga2 +
(old.qdgz2-old.qdga2)*d_temp +
(new.qdgz2-old.qdgz2)*(1.-der.kd2*d_temp)) | Perform the runoff concentration calculation for "fast" direct runoff.
The working equation is the analytical solution of the linear storage
equation under the assumption of constant change in inflow during
the simulation time step.
Required derived parameter:
|KD2|
Required state sequence:
|QDGZ2|
Calculated state sequence:
|QDGA2|
Basic equation:
:math:`QDGA2_{neu} = QDGA2_{alt} +
(QDGZ2_{alt}-QDGA2_{alt}) \\cdot (1-exp(-KD2^{-1})) +
(QDGZ2_{neu}-QDGZ2_{alt}) \\cdot (1-KD2\\cdot(1-exp(-KD2^{-1})))`
Examples:
A normal test case:
>>> from hydpy.models.lland import *
>>> parameterstep()
>>> derived.kd2(0.1)
>>> states.qdgz2.old = 2.0
>>> states.qdgz2.new = 4.0
>>> states.qdga2.old = 3.0
>>> model.calc_qdga2_v1()
>>> states.qdga2
qdga2(3.800054)
First extreme test case (zero division is circumvented):
>>> derived.kd2(0.0)
>>> model.calc_qdga2_v1()
>>> states.qdga2
qdga2(4.0)
Second extreme test case (numerical overflow is circumvented):
>>> derived.kd2(1e500)
>>> model.calc_qdga2_v1()
>>> states.qdga2
qdga2(5.0) |
def walk(p, mode='all', **kw):
"""Wrapper for `os.walk`, yielding `Path` objects.
:param p: root of the directory tree to walk.
:param mode: 'all|dirs|files', defaulting to 'all'.
:param kw: Keyword arguments are passed to `os.walk`.
:return: Generator for the requested Path objects.
"""
for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw):
if mode in ('all', 'dirs'):
for dirname in dirnames:
yield Path(dirpath).joinpath(dirname)
if mode in ('all', 'files'):
for fname in filenames:
yield Path(dirpath).joinpath(fname) | Wrapper for `os.walk`, yielding `Path` objects.
:param p: root of the directory tree to walk.
:param mode: 'all|dirs|files', defaulting to 'all'.
:param kw: Keyword arguments are passed to `os.walk`.
:return: Generator for the requested Path objects. |
def _get_wms_wcs_url_parameters(request, date):
""" Returns parameters common dictionary for WMS and WCS request.
:param request: OGC-type request with specified bounding box, cloud coverage for specific product.
:type request: OgcRequest or GeopediaRequest
:param date: acquisition date or None
:type date: datetime.datetime or None
:return: dictionary with parameters
:rtype: dict
"""
params = {
'BBOX': str(request.bbox.reverse()) if request.bbox.crs is CRS.WGS84 else str(request.bbox),
'FORMAT': MimeType.get_string(request.image_format),
'CRS': CRS.ogc_string(request.bbox.crs),
}
if date is not None:
start_date = date if request.time_difference < datetime.timedelta(
seconds=0) else date - request.time_difference
end_date = date if request.time_difference < datetime.timedelta(
seconds=0) else date + request.time_difference
params['TIME'] = '{}/{}'.format(start_date.isoformat(), end_date.isoformat())
return params | Returns parameters common dictionary for WMS and WCS request.
:param request: OGC-type request with specified bounding box, cloud coverage for specific product.
:type request: OgcRequest or GeopediaRequest
:param date: acquisition date or None
:type date: datetime.datetime or None
:return: dictionary with parameters
:rtype: dict |
async def on_raw_731(self, message):
""" Someone we are monitoring got offline. """
for nick in message.params[1].split(','):
self._destroy_user(nick, monitor_override=True)
await self.on_user_offline(nickname) | Someone we are monitoring got offline. |
def tree(node, formatter=None, prefix=None, postfix=None, _depth=1):
"""Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
:param callable formatter: A callable that takes a single argument, the key,
that formats the key in the tree.
:param callable prefix: A callable that takes a single argument, the key,
that adds any additional text before the formatted key.
:param callable postfix: A callable that takes a single argument, the key,
that adds any additional text after the formatted key.
"""
current = 0
length = len(node.keys())
tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80'
elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80'
for key, value in node.iteritems():
current += 1
k = formatter(key) if formatter else key
pre = prefix(key) if prefix else ''
post = postfix(key) if postfix else ''
space = elbow_joint if current == length else tee_joint
yield ' {space} {prefix}{key}{postfix}'.format(space=space, key=k, prefix=pre, postfix=post)
if value:
for e in tree(value, formatter=formatter, prefix=prefix, postfix=postfix, _depth=_depth + 1):
yield (' | ' if current != length else ' ') + e | Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
:param callable formatter: A callable that takes a single argument, the key,
that formats the key in the tree.
:param callable prefix: A callable that takes a single argument, the key,
that adds any additional text before the formatted key.
:param callable postfix: A callable that takes a single argument, the key,
that adds any additional text after the formatted key. |
def input_yn(conf_mess):
"""Print Confirmation Message and Get Y/N response from user."""
ui_erase_ln()
ui_print(conf_mess)
with term.cbreak():
input_flush()
val = input_by_key()
return bool(val.lower() == 'y') | Print Confirmation Message and Get Y/N response from user. |
def getParameters(self, postalAddress):
"""
Return a C{list} of one L{LiveForm} parameter for editing a
L{PostalAddress}.
@type postalAddress: L{PostalAddress} or C{NoneType}
@param postalAddress: If not C{None}, an existing contact item from
which to get the postal address default value.
@rtype: C{list}
@return: The parameters necessary for specifying a postal address.
"""
address = u''
if postalAddress is not None:
address = postalAddress.address
return [
liveform.Parameter('address', liveform.TEXT_INPUT,
unicode, 'Postal Address', default=address)] | Return a C{list} of one L{LiveForm} parameter for editing a
L{PostalAddress}.
@type postalAddress: L{PostalAddress} or C{NoneType}
@param postalAddress: If not C{None}, an existing contact item from
which to get the postal address default value.
@rtype: C{list}
@return: The parameters necessary for specifying a postal address. |
def _nonmatch_class_pos(self):
"""Return the position of the non-match class."""
# TODO: add notfitted warnings
if self.kernel.classes_.shape[0] != 2:
raise ValueError("Number of classes is {}, expected 2.".format(
self.kernel.classes_.shape[0]))
# # get the position of match probabilities
# classes = list(self.kernel.classes_)
# return classes.index(0)
return 0 | Return the position of the non-match class. |
def maybe_convert_values(self,
identifier: Identifier,
data: Dict[str, Any],
) -> Dict[str, Any]:
"""
Takes a dictionary of raw values for a specific identifier, as parsed
from the YAML file, and depending upon the type of db column the data
is meant for, decides what to do with the value (eg leave it alone,
convert a string to a date/time instance, or convert identifiers to
model instances by calling :meth:`self.loader.convert_identifiers`)
:param identifier: An object with :attr:`class_name` and :attr:`key`
attributes
:param data: A dictionary keyed by column name, with values being the
raw values as parsed from the YAML
:return: A dictionary keyed by column name, with values being the
converted values meant to be set on the model instance
"""
raise NotImplementedError | Takes a dictionary of raw values for a specific identifier, as parsed
from the YAML file, and depending upon the type of db column the data
is meant for, decides what to do with the value (eg leave it alone,
convert a string to a date/time instance, or convert identifiers to
model instances by calling :meth:`self.loader.convert_identifiers`)
:param identifier: An object with :attr:`class_name` and :attr:`key`
attributes
:param data: A dictionary keyed by column name, with values being the
raw values as parsed from the YAML
:return: A dictionary keyed by column name, with values being the
converted values meant to be set on the model instance |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
return '<Property name="{0}"'.format(self.name) +\
(' dimension="{0}"'.format(self.dimension) if self.dimension else 'none') +\
(' defaultValue = "{0}"'.format(self.default_value) if self.default_value else '') +\
'/>' | Exports this object into a LEMS XML object |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.