code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _use_gl(objs):
''' Whether a collection of Bokeh objects contains a plot requesting WebGL
Args:
objs (seq[Model or Document]) :
Returns:
bool
'''
from ..models.plots import Plot
return _any(objs, lambda obj: isinstance(obj, Plot) and obj.output_backend == "webgl") | Whether a collection of Bokeh objects contains a plot requesting WebGL
Args:
objs (seq[Model or Document]) :
Returns:
bool |
def _on_remove_library(self, *event):
"""Callback method handling the removal of an existing library
"""
self.view['library_tree_view'].grab_focus()
if react_to_event(self.view, self.view['library_tree_view'], event):
path = self.view["library_tree_view"].get_cursor()[0]
if path is not None:
library_name = self.library_list_store[int(path[0])][0]
library_config = self.core_config_model.get_current_config_value("LIBRARY_PATHS", use_preliminary=True,
default={})
del library_config[library_name]
self.core_config_model.set_preliminary_config_value("LIBRARY_PATHS", library_config)
if len(self.library_list_store) > 0:
self.view['library_tree_view'].set_cursor(min(path[0], len(self.library_list_store) - 1))
return True | Callback method handling the removal of an existing library |
def add_element(self, elt):
"""Helper to add a element to the current section. The Element name
will be used as an identifier."""
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt | Helper to add a element to the current section. The Element name
will be used as an identifier. |
def proxy_label_for(label: str) -> str:
"""
>>> Sequence.proxy_label_for("foo")
'proxy_for.foo'
"""
label_java = _VertexLabel(label).unwrap()
proxy_label_java = k.jvm_view().SequenceBuilder.proxyLabelFor(label_java)
return proxy_label_java.getQualifiedName() | >>> Sequence.proxy_label_for("foo")
'proxy_for.foo' |
def best_policy(mdp, U):
"""Given an MDP and a utility function U, determine the best policy,
as a mapping from state to action. (Equation 17.4)"""
pi = {}
for s in mdp.states:
pi[s] = argmax(mdp.actions(s), lambda a:expected_utility(a, s, U, mdp))
return pi | Given an MDP and a utility function U, determine the best policy,
as a mapping from state to action. (Equation 17.4) |
def get_push_pop_stack():
"""Create pop and push nodes for substacks that are linked.
Returns:
A push and pop node which have `push_func` and `pop_func` annotations
respectively, identifying them as such. They also have a `pop` and
`push` annotation respectively, which links the push node to the pop
node and vice versa.
"""
push = copy.deepcopy(PUSH_STACK)
pop = copy.deepcopy(POP_STACK)
anno.setanno(push, 'pop', pop)
anno.setanno(push, 'gen_push', True)
anno.setanno(pop, 'push', push)
op_id = _generate_op_id()
return push, pop, op_id | Create pop and push nodes for substacks that are linked.
Returns:
A push and pop node which have `push_func` and `pop_func` annotations
respectively, identifying them as such. They also have a `pop` and
`push` annotation respectively, which links the push node to the pop
node and vice versa. |
def _dry_message_received(self, msg):
"""Report a dry state."""
for callback in self._dry_wet_callbacks:
callback(LeakSensorState.DRY)
self._update_subscribers(0x11) | Report a dry state. |
def _take_values(self, item: Node) -> DictBasicType:
"""Takes snapshot of the object and replaces _parent property value on None to avoid
infitinite recursion in GPflow tree traversing.
:param item: GPflow node object.
:return: dictionary snapshot of the node object."""
values = super()._take_values(item)
values['_parent'] = None
return values | Takes snapshot of the object and replaces _parent property value on None to avoid
infitinite recursion in GPflow tree traversing.
:param item: GPflow node object.
:return: dictionary snapshot of the node object. |
def admin_log(instances, msg: str, who: User=None, **kw):
"""
Logs an entry to admin logs of model(s).
:param instances: Model instance or list of instances
:param msg: Message to log
:param who: Who did the change
:param kw: Optional key-value attributes to append to message
:return: None
"""
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.admin.options import get_content_type_for_model
from django.utils.encoding import force_text
# use system user if 'who' is missing
if not who:
username = settings.DJANGO_SYSTEM_USER if hasattr(settings, 'DJANGO_SYSTEM_USER') else 'system'
who, created = User.objects.get_or_create(username=username)
# append extra keyword attributes if any
att_str = ''
for k, v in kw.items():
if hasattr(v, 'pk'): # log only primary key for model instances, not whole str representation
v = v.pk
att_str += '{}={}'.format(k, v) if not att_str else ', {}={}'.format(k, v)
if att_str:
att_str = ' [{}]'.format(att_str)
msg = str(msg) + att_str
if not isinstance(instances, list) and not isinstance(instances, tuple):
instances = [instances]
for instance in instances:
if instance:
LogEntry.objects.log_action(
user_id=who.pk,
content_type_id=get_content_type_for_model(instance).pk,
object_id=instance.pk,
object_repr=force_text(instance),
action_flag=CHANGE,
change_message=msg,
) | Logs an entry to admin logs of model(s).
:param instances: Model instance or list of instances
:param msg: Message to log
:param who: Who did the change
:param kw: Optional key-value attributes to append to message
:return: None |
def zlist(self, name_start, name_end, limit=10):
"""
Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in ascending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means -inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means +inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_1', 'zset_2']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_1', 'zset_2']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[]
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('zlist', name_start, name_end, limit) | Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in ascending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means -inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means +inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_1', 'zset_2']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_1', 'zset_2']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[] |
def apply_pre_filters(instance, html):
"""
Perform optimizations in the HTML source code.
:type instance: fluent_contents.models.ContentItem
:raise ValidationError: when one of the filters detects a problem.
"""
# Allow pre processing. Typical use-case is HTML syntax correction.
for post_func in appsettings.PRE_FILTER_FUNCTIONS:
html = post_func(instance, html)
return html | Perform optimizations in the HTML source code.
:type instance: fluent_contents.models.ContentItem
:raise ValidationError: when one of the filters detects a problem. |
def visit_importfrom(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
name_parts = node.modname.split(".")
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == "*":
continue
self._check_module_attrs(node, module, name.split(".")) | check modules attribute accesses |
def p_recent(self, kind, cur_p='', with_catalog=True, with_date=True):
'''
List posts that recent edited, partially.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MPost.total_number(kind) / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': 'Recent posts.',
'with_catalog': with_catalog,
'with_date': with_date,
'kind': kind,
'current_page': current_page_number,
'post_count': MPost.get_counts(),
'router': config.router_post[kind],
}
self.render('admin/post_ajax/post_list.html',
kwd=kwd,
view=MPost.query_recent(num=20, kind=kind),
infos=MPost.query_pager_by_slug(
kind=kind,
current_page_num=current_page_number
),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG, ) | List posts that recent edited, partially. |
def update_user(self, user_id, **kwargs):
"""Update a user."""
body = self._formdata(kwargs, FastlyUser.FIELDS)
content = self._fetch("/user/%s" % user_id, method="PUT", body=body)
return FastlyUser(self, content) | Update a user. |
def is_child_of(self, node):
"""
:returns: ``True`` if the node is a child of another node given as an
argument, else, returns ``False``
:param node:
The node that will be checked as a parent
"""
return node.get_children().filter(pk=self.pk).exists() | :returns: ``True`` if the node is a child of another node given as an
argument, else, returns ``False``
:param node:
The node that will be checked as a parent |
def patch_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_stateful_set_scale # noqa: E501
partially update scale of the specified StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | patch_namespaced_stateful_set_scale # noqa: E501
partially update scale of the specified StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Scale
If the method is called asynchronously,
returns the request thread. |
def read_multi(flatten, cls, source, *args, **kwargs):
"""Read sources into a `cls` with multiprocessing
This method should be called by `cls.read` and uses the `nproc`
keyword to enable and handle pool-based multiprocessing of
multiple source files, using `flatten` to combine the
chunked data into a single object of the correct type.
Parameters
----------
flatten : `callable`
a method to take a list of ``cls`` instances, and combine them
into a single ``cls`` instance
cls : `type`
the object type to read
source : `str`, `list` of `str`, ...
the input data source, can be of in many different forms
*args
positional arguments to pass to the reader
**kwargs
keyword arguments to pass to the reader
"""
verbose = kwargs.pop('verbose', False)
# parse input as a list of files
try: # try and map to a list of file-like objects
files = file_list(source)
except ValueError: # otherwise treat as single file
files = [source]
path = None # to pass to get_read_format()
else:
path = files[0] if files else None
# determine input format (so we don't have to do it multiple times)
if kwargs.get('format', None) is None:
kwargs['format'] = get_read_format(cls, path, (source,) + args, kwargs)
# calculate maximum number of processes
nproc = min(kwargs.pop('nproc', 1), len(files))
# define multiprocessing method
def _read_single_file(fobj):
try:
return fobj, io_read(cls, fobj, *args, **kwargs)
# pylint: disable=broad-except,redefine-in-handler
except Exception as exc:
if nproc == 1:
raise
if isinstance(exc, SAXException): # SAXExceptions don't pickle
return fobj, exc.getException() # pylint: disable=no-member
return fobj, exc
# format verbosity
if verbose is True:
verbose = 'Reading ({})'.format(kwargs['format'])
# read files
output = mp_utils.multiprocess_with_queues(
nproc, _read_single_file, files, verbose=verbose, unit='files')
# raise exceptions (from multiprocessing, single process raises inline)
for fobj, exc in output:
if isinstance(exc, Exception):
exc.args = ('Failed to read %s: %s' % (fobj, str(exc)),)
raise exc
# return combined object
_, out = zip(*output)
return flatten(out) | Read sources into a `cls` with multiprocessing
This method should be called by `cls.read` and uses the `nproc`
keyword to enable and handle pool-based multiprocessing of
multiple source files, using `flatten` to combine the
chunked data into a single object of the correct type.
Parameters
----------
flatten : `callable`
a method to take a list of ``cls`` instances, and combine them
into a single ``cls`` instance
cls : `type`
the object type to read
source : `str`, `list` of `str`, ...
the input data source, can be of in many different forms
*args
positional arguments to pass to the reader
**kwargs
keyword arguments to pass to the reader |
async def delete_chat_photo(self, chat_id: typing.Union[base.Integer, base.String]) -> base.Boolean:
"""
Use this method to delete a chat photo. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#deletechatphoto
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.DELETE_CHAT_PHOTO, payload)
return result | Use this method to delete a chat photo. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#deletechatphoto
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns True on success
:rtype: :obj:`base.Boolean` |
def compare(ver1, ver2):
"""Compare two versions
:param ver1: version string 1
:param ver2: version string 2
:return: The return value is negative if ver1 < ver2,
zero if ver1 == ver2 and strictly positive if ver1 > ver2
:rtype: int
>>> import semver
>>> semver.compare("1.0.0", "2.0.0")
-1
>>> semver.compare("2.0.0", "1.0.0")
1
>>> semver.compare("2.0.0", "2.0.0")
0
"""
v1, v2 = parse(ver1), parse(ver2)
return _compare_by_keys(v1, v2) | Compare two versions
:param ver1: version string 1
:param ver2: version string 2
:return: The return value is negative if ver1 < ver2,
zero if ver1 == ver2 and strictly positive if ver1 > ver2
:rtype: int
>>> import semver
>>> semver.compare("1.0.0", "2.0.0")
-1
>>> semver.compare("2.0.0", "1.0.0")
1
>>> semver.compare("2.0.0", "2.0.0")
0 |
def alphavsks(self,autozoom=True,**kwargs):
"""
Plot alpha versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
"""
pylab.plot(self._alpha_values, self._xmin_kstest, '.')
pylab.errorbar(self._alpha, self._ks, xerr=self._alphaerr, fmt='+')
ax=pylab.gca()
if autozoom:
ax.set_ylim(0.8*(self._ks),3*(self._ks))
ax.set_xlim((self._alpha)-5*self._alphaerr,(self._alpha)+5*self._alphaerr)
ax.set_ylabel("KS statistic")
ax.set_xlabel(r'$\alpha$')
pylab.draw()
return ax | Plot alpha versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function. |
def edit(self, entity, id, payload, sync=True):
""" Edit a document. """
url = urljoin(self.host, entity.value + '/')
url = urljoin(url, id + '/')
params = {'sync': str(sync).lower()}
url = Utils.add_url_parameters(url, params)
r = requests.put(url, auth=self.auth, data=json.dumps(payload),
headers=self.headers)
if r.status_code == 500:
error_message = r.json()['error_message']
raise CoredataError('Error! {error}'.format(error=error_message)) | Edit a document. |
def merge_commit(commit):
"Fetches the latest code and merges up the specified commit."
with cd(env.path):
run('git fetch')
if '@' in commit:
branch, commit = commit.split('@')
run('git checkout {0}'.format(branch))
run('git merge {0}'.format(commit)) | Fetches the latest code and merges up the specified commit. |
def calcRapRperi(self,*args,**kwargs):
"""
NAME:
calcRapRperi
PURPOSE:
calculate the apocenter and pericenter radii
INPUT:
Either:
a) R,vR,vT,z,vz
b) Orbit instance: initial condition used if that's it, orbit(t)
if there is a time given as well
OUTPUT:
(rperi,rap)
HISTORY:
2013-11-27 - Written - Bovy (IAS)
"""
#Set up the actionAngleAxi object
if isinstance(self._pot,list):
thispot= [p.toPlanar() for p in self._pot if not isinstance(p,planarPotential)]
thispot.extend([p for p in self._pot if isinstance(p,planarPotential)])
elif not isinstance(self._pot,planarPotential):
thispot= self._pot.toPlanar()
else:
thispot= self._pot
aAAxi= actionAngleAxi(*args,pot=thispot,
gamma=self._gamma)
return aAAxi.calcRapRperi(**kwargs) | NAME:
calcRapRperi
PURPOSE:
calculate the apocenter and pericenter radii
INPUT:
Either:
a) R,vR,vT,z,vz
b) Orbit instance: initial condition used if that's it, orbit(t)
if there is a time given as well
OUTPUT:
(rperi,rap)
HISTORY:
2013-11-27 - Written - Bovy (IAS) |
def subclass(cls, t):
"""Change a term into a Section Term"""
t.doc = None
t.terms = []
t.__class__ = SectionTerm
return t | Change a term into a Section Term |
def save(self):
"""Format and save cells."""
# re-number cells
self.cells = list(self.renumber())
# add a newline to the last line if necessary
if not self.cells[-1].endswith('\n'):
self.cells[-1] += '\n'
# save the rejoined the list of cells
with open(self.filename, 'w') as file_open:
file_open.write('\n\n'.join(self.cells)) | Format and save cells. |
def _add_embedding_config(file_path, data_dir, has_metadata=False, label_img_shape=None):
"""Creates a config file used by the embedding projector.
Adapted from the TensorFlow function `visualize_embeddings()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/tensorboard/plugins/projector/__init__.py"""
with open(os.path.join(file_path, 'projector_config.pbtxt'), 'a') as f:
s = 'embeddings {\n'
s += 'tensor_name: "{}"\n'.format(data_dir)
s += 'tensor_path: "{}"\n'.format(os.path.join(data_dir, 'tensors.tsv'))
if has_metadata:
s += 'metadata_path: "{}"\n'.format(os.path.join(data_dir, 'metadata.tsv'))
if label_img_shape is not None:
if len(label_img_shape) != 4:
logging.warning('expected 4D sprite image in the format NCHW, while received image'
' ndim=%d, skipping saving sprite'
' image info', len(label_img_shape))
else:
s += 'sprite {\n'
s += 'image_path: "{}"\n'.format(os.path.join(data_dir, 'sprite.png'))
s += 'single_image_dim: {}\n'.format(label_img_shape[3])
s += 'single_image_dim: {}\n'.format(label_img_shape[2])
s += '}\n'
s += '}\n'
f.write(s) | Creates a config file used by the embedding projector.
Adapted from the TensorFlow function `visualize_embeddings()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/tensorboard/plugins/projector/__init__.py |
def fixtags(self, text):
"""Clean up special characters, only run once, next-to-last before doBlockLevels"""
# french spaces, last one Guillemet-left
# only if there is something before the space
text = _guillemetLeftPat.sub(ur'\1 \2', text)
# french spaces, Guillemet-right
text = _guillemetRightPat.sub(ur'\1 ', text)
return text | Clean up special characters, only run once, next-to-last before doBlockLevels |
def _to_ascii(s):
""" Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str:
"""
# TODO: Always use unicode within ambry.
from six import text_type, binary_type
if isinstance(s, text_type):
ascii_ = s.encode('ascii', 'ignore')
elif isinstance(s, binary_type):
ascii_ = s.decode('utf-8').encode('ascii', 'ignore')
else:
raise Exception('Unknown text type - {}'.format(type(s)))
return ascii_ | Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str: |
def generic_visit(self, node):
"""TODO: docstring in public method."""
if node.__class__.__name__ == 'Name':
if node.ctx.__class__ == ast.Load and node.id not in self.names:
self.names.append(node.id)
ast.NodeVisitor.generic_visit(self, node) | TODO: docstring in public method. |
def findAnyBracketBackward(self, block, column):
"""Search for a needle and return (block, column)
Raise ValueError, if not found
NOTE this methods ignores strings and comments
"""
depth = {'()': 1,
'[]': 1,
'{}': 1
}
for foundBlock, foundColumn, char in self.iterateCharsBackwardFrom(block, column):
if self._qpart.isCode(foundBlock.blockNumber(), foundColumn):
for brackets in depth.keys():
opening, closing = brackets
if char == opening:
depth[brackets] -= 1
if depth[brackets] == 0:
return foundBlock, foundColumn
elif char == closing:
depth[brackets] += 1
else:
raise ValueError('Not found') | Search for a needle and return (block, column)
Raise ValueError, if not found
NOTE this methods ignores strings and comments |
def _helpful_failure(method):
"""
Decorator for eval_ that prints a helpful error message
if an exception is generated in a Q expression
"""
@wraps(method)
def wrapper(self, val):
try:
return method(self, val)
except:
exc_cls, inst, tb = sys.exc_info()
if hasattr(inst, '_RERAISE'):
_, expr, _, inner_val = Q.__debug_info__
Q.__debug_info__ = QDebug(self, expr, val, inner_val)
raise
if issubclass(exc_cls, KeyError): # Overrides formatting
exc_cls = QKeyError
# Show val, unless it's too long
prettyval = repr(val)
if len(prettyval) > 150:
prettyval = "<%s instance>" % (type(val).__name__)
msg = "{0}\n\n\tEncountered when evaluating {1}{2}".format(
inst, prettyval, self)
new_exc = exc_cls(msg)
new_exc._RERAISE = True
Q.__debug_info__ = QDebug(self, self, val, val)
six.reraise(exc_cls, new_exc, tb)
return wrapper | Decorator for eval_ that prints a helpful error message
if an exception is generated in a Q expression |
def get_version():
"Returns a PEP 386-compliant version number from VERSION."
assert len(VERSION) == 5
assert VERSION[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if VERSION[2] == 0 else 3
main = '.'.join(str(x) for x in VERSION[:parts])
sub = ''
if VERSION[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[VERSION[3]] + str(VERSION[4])
return str(main + sub) | Returns a PEP 386-compliant version number from VERSION. |
def debug(self, value):
"""
Turn on debug logging if necessary.
:param value: Value of debug flag
"""
self._debug = value
if self._debug:
# Turn on debug logging
logging.getLogger().setLevel(logging.DEBUG) | Turn on debug logging if necessary.
:param value: Value of debug flag |
def translate(self, body, params=None):
"""
`<Translate SQL into Elasticsearch queries>`_
:arg body: Specify the query in the `query` element.
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"POST", "/_sql/translate", params=params, body=body
) | `<Translate SQL into Elasticsearch queries>`_
:arg body: Specify the query in the `query` element. |
def wait(self):
"wait for a message, respecting timeout"
data=self.getcon().recv(256) # this can raise socket.timeout
if not data: raise PubsubDisco
if self.reset:
self.reset=False # i.e. ack it. reset is used to tell the wait-thread there was a reconnect (though it's plausible that this never happens)
raise PubsubDisco
self.buf+=data
msg,self.buf=complete_message(self.buf)
return msg | wait for a message, respecting timeout |
def generate(cls, curve=ec.SECP256R1(), progress_func=None, bits=None):
"""
Generate a new private ECDSA key. This factory function can be used to
generate a new host key or authentication key.
:param progress_func: Not used for this type of key.
:returns: A new private key (`.ECDSAKey`) object
"""
if bits is not None:
curve = cls._ECDSA_CURVES.get_by_key_length(bits)
if curve is None:
raise ValueError("Unsupported key length: {:d}".format(bits))
curve = curve.curve_class()
private_key = ec.generate_private_key(curve, backend=default_backend())
return ECDSAKey(vals=(private_key, private_key.public_key())) | Generate a new private ECDSA key. This factory function can be used to
generate a new host key or authentication key.
:param progress_func: Not used for this type of key.
:returns: A new private key (`.ECDSAKey`) object |
def replay_position(position, result):
"""
Wrapper for a go.Position which replays its history.
Assumes an empty start position! (i.e. no handicap, and history must be exhaustive.)
Result must be passed in, since a resign cannot be inferred from position
history alone.
for position_w_context in replay_position(position):
print(position_w_context.position)
"""
assert position.n == len(position.recent), "Position history is incomplete"
pos = Position(komi=position.komi)
for player_move in position.recent:
color, next_move = player_move
yield PositionWithContext(pos, next_move, result)
pos = pos.play_move(next_move, color=color) | Wrapper for a go.Position which replays its history.
Assumes an empty start position! (i.e. no handicap, and history must be exhaustive.)
Result must be passed in, since a resign cannot be inferred from position
history alone.
for position_w_context in replay_position(position):
print(position_w_context.position) |
def _iop(self, operation, other, *allowed):
"""An iterative operation operating on multiple values.
Consumes iterators to construct a concrete list at time of execution.
"""
f = self._field
if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).
return reduce(self._combining,
(q._iop(operation, other, *allowed) for q in f)) # pylint:disable=protected-access
# Optimize this away in production; diagnosic aide.
if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover
raise NotImplementedError("{self!r} does not allow {op} comparison.".format(
self=self, op=operation))
def _t(o):
for value in o:
yield None if value is None else f.transformer.foreign(value, (f, self._document))
other = other if len(other) > 1 else other[0]
values = list(_t(other))
return Filter({self._name: {operation: values}}) | An iterative operation operating on multiple values.
Consumes iterators to construct a concrete list at time of execution. |
def MergeAttributeContainers(
self, callback=None, maximum_number_of_containers=0):
"""Reads attribute containers from a task storage file into the writer.
Args:
callback (function[StorageWriter, AttributeContainer]): function to call
after each attribute container is deserialized.
maximum_number_of_containers (Optional[int]): maximum number of
containers to merge, where 0 represent no limit.
Returns:
bool: True if the entire task storage file has been merged.
Raises:
RuntimeError: if the add method for the active attribute container
type is missing.
OSError: if the task storage file cannot be deleted.
ValueError: if the maximum number of containers is a negative value.
"""
if maximum_number_of_containers < 0:
raise ValueError('Invalid maximum number of containers')
if not self._cursor:
self._Open()
self._ReadStorageMetadata()
self._container_types = self._GetContainerTypes()
number_of_containers = 0
while self._active_cursor or self._container_types:
if not self._active_cursor:
self._PrepareForNextContainerType()
if maximum_number_of_containers == 0:
rows = self._active_cursor.fetchall()
else:
number_of_rows = maximum_number_of_containers - number_of_containers
rows = self._active_cursor.fetchmany(size=number_of_rows)
if not rows:
self._active_cursor = None
continue
for row in rows:
identifier = identifiers.SQLTableIdentifier(
self._active_container_type, row[0])
if self._compression_format == definitions.COMPRESSION_FORMAT_ZLIB:
serialized_data = zlib.decompress(row[1])
else:
serialized_data = row[1]
attribute_container = self._DeserializeAttributeContainer(
self._active_container_type, serialized_data)
attribute_container.SetIdentifier(identifier)
if self._active_container_type == self._CONTAINER_TYPE_EVENT_TAG:
event_identifier = identifiers.SQLTableIdentifier(
self._CONTAINER_TYPE_EVENT,
attribute_container.event_row_identifier)
attribute_container.SetEventIdentifier(event_identifier)
del attribute_container.event_row_identifier
if callback:
callback(self._storage_writer, attribute_container)
self._add_active_container_method(attribute_container)
number_of_containers += 1
if (maximum_number_of_containers != 0 and
number_of_containers >= maximum_number_of_containers):
return False
self._Close()
os.remove(self._path)
return True | Reads attribute containers from a task storage file into the writer.
Args:
callback (function[StorageWriter, AttributeContainer]): function to call
after each attribute container is deserialized.
maximum_number_of_containers (Optional[int]): maximum number of
containers to merge, where 0 represent no limit.
Returns:
bool: True if the entire task storage file has been merged.
Raises:
RuntimeError: if the add method for the active attribute container
type is missing.
OSError: if the task storage file cannot be deleted.
ValueError: if the maximum number of containers is a negative value. |
def parseWord(word):
"""
Split given attribute word to key, value pair.
Values are casted to python equivalents.
:param word: API word.
:returns: Key, value pair.
"""
mapping = {'yes': True, 'true': True, 'no': False, 'false': False}
_, key, value = word.split('=', 2)
try:
value = int(value)
except ValueError:
value = mapping.get(value, value)
return (key, value) | Split given attribute word to key, value pair.
Values are casted to python equivalents.
:param word: API word.
:returns: Key, value pair. |
def set_output_fields(self, output_fields):
"""Defines where to put the dictionary output of the extractor in the doc, but renames
the fields of the extracted output for the document or just filters the keys"""
if isinstance(output_fields, dict) or isinstance(output_fields, list):
self.output_fields = output_fields
elif isinstance(output_fields, basestring):
self.output_field = output_fields
else:
raise ValueError("set_output_fields requires a dictionary of "
+ "output fields to remap, a list of keys to filter, or a scalar string")
return self | Defines where to put the dictionary output of the extractor in the doc, but renames
the fields of the extracted output for the document or just filters the keys |
def get_random(self):
"""
Returns a random statement from the database
"""
Statement = self.get_model('statement')
statement = Statement.objects.order_by('?').first()
if statement is None:
raise self.EmptyDatabaseException()
return statement | Returns a random statement from the database |
def get_prtfmt_list(self, flds, add_nl=True):
"""Get print format, given fields."""
fmts = []
for fld in flds:
if fld[:2] == 'p_':
fmts.append('{{{FLD}:8.2e}}'.format(FLD=fld))
elif fld in self.default_fld2fmt:
fmts.append(self.default_fld2fmt[fld])
else:
raise Exception("UNKNOWN FORMAT: {FLD}".format(FLD=fld))
if add_nl:
fmts.append("\n")
return fmts | Get print format, given fields. |
def _request_bulk(self, urls: List[str]) -> List:
"""Batch the requests going out."""
if not urls:
raise Exception("No results were found")
session: FuturesSession = FuturesSession(max_workers=len(urls))
self.log.info("Bulk requesting: %d" % len(urls))
futures = [session.get(u, headers=gen_headers(), timeout=3) for u in urls]
done, incomplete = wait(futures)
results: List = list()
for response in done:
try:
results.append(response.result())
except Exception as err:
self.log.warn("Failed result: %s" % err)
return results | Batch the requests going out. |
def remove(self, removeItems=False):
"""
Removes this layer from the scene. If the removeItems flag is set to \
True, then all the items on this layer will be removed as well. \
Otherwise, they will be transferred to another layer from the scene.
:param removeItems | <bool>
:return <bool>
"""
# makes sure this can be removed
if not self.prepareToRemove():
return False
items = self.items()
# grabs the next layer
if self._scene._layers:
new_layer = self._scene._layers[0]
else:
new_layer = None
# removes the items from the scene if flagged
if removeItems:
self.scene().removeItems(items)
# otherwise assign to the next layer
else:
for item in items:
item.setLayer(new_layer)
# remove the layer from the scenes reference
if self in self._scene._layers:
self._scene._layers.remove(self)
if new_layer:
new_layer.setCurrent()
self._scene.setModified()
return True | Removes this layer from the scene. If the removeItems flag is set to \
True, then all the items on this layer will be removed as well. \
Otherwise, they will be transferred to another layer from the scene.
:param removeItems | <bool>
:return <bool> |
def update_house(self, complex: str, id: str, **kwargs):
"""
Update the existing house
"""
self.check_house(complex, id)
self.put('developers/{developer}/complexes/{complex}/houses/{id}'.format(
developer=self.developer,
complex=complex,
id=id,
), data=kwargs) | Update the existing house |
def default(self, obj):
'''
Converts an object and returns a ``JSON``-friendly structure.
:param obj: object or structure to be converted into a
``JSON``-ifiable structure
Considers the following special cases in order:
* object has a callable __json__() attribute defined
returns the result of the call to __json__()
* date and datetime objects
returns the object cast to str
* Decimal objects
returns the object cast to float
* SQLAlchemy objects
returns a copy of the object.__dict__ with internal SQLAlchemy
parameters removed
* SQLAlchemy ResultProxy objects
Casts the iterable ResultProxy into a list of tuples containing
the entire resultset data, returns the list in a dictionary
along with the resultset "row" count.
.. note:: {'count': 5, 'rows': [('Ed Jones',), ('Pete Jones',),
('Wendy Williams',), ('Mary Contrary',), ('Fred Smith',)]}
* SQLAlchemy RowProxy objects
Casts the RowProxy cursor object into a dictionary, probably
losing its ordered dictionary behavior in the process but
making it JSON-friendly.
* webob_dicts objects
returns webob_dicts.mixed() dictionary, which is guaranteed
to be JSON-friendly.
'''
if hasattr(obj, '__json__') and six.callable(obj.__json__):
return obj.__json__()
elif isinstance(obj, (date, datetime)):
return str(obj)
elif isinstance(obj, Decimal):
# XXX What to do about JSONEncoder crappy handling of Decimals?
# SimpleJSON has better Decimal encoding than the std lib
# but only in recent versions
return float(obj)
elif is_saobject(obj):
props = {}
for key in obj.__dict__:
if not key.startswith('_sa_'):
props[key] = getattr(obj, key)
return props
elif isinstance(obj, ResultProxy):
props = dict(rows=list(obj), count=obj.rowcount)
if props['count'] < 0:
props['count'] = len(props['rows'])
return props
elif isinstance(obj, RowProxy):
return dict(obj)
elif isinstance(obj, webob_dicts):
return obj.mixed()
else:
return JSONEncoder.default(self, obj) | Converts an object and returns a ``JSON``-friendly structure.
:param obj: object or structure to be converted into a
``JSON``-ifiable structure
Considers the following special cases in order:
* object has a callable __json__() attribute defined
returns the result of the call to __json__()
* date and datetime objects
returns the object cast to str
* Decimal objects
returns the object cast to float
* SQLAlchemy objects
returns a copy of the object.__dict__ with internal SQLAlchemy
parameters removed
* SQLAlchemy ResultProxy objects
Casts the iterable ResultProxy into a list of tuples containing
the entire resultset data, returns the list in a dictionary
along with the resultset "row" count.
.. note:: {'count': 5, 'rows': [('Ed Jones',), ('Pete Jones',),
('Wendy Williams',), ('Mary Contrary',), ('Fred Smith',)]}
* SQLAlchemy RowProxy objects
Casts the RowProxy cursor object into a dictionary, probably
losing its ordered dictionary behavior in the process but
making it JSON-friendly.
* webob_dicts objects
returns webob_dicts.mixed() dictionary, which is guaranteed
to be JSON-friendly. |
def _solNa2SO4(T, mH2SO4, mNaCl):
"""Equation for the solubility of sodium sulfate in aqueous mixtures of
sodium chloride and sulfuric acid
Parameters
----------
T : float
Temperature, [K]
mH2SO4 : float
Molality of sufuric acid, [mol/kg(water)]
mNaCl : float
Molality of sodium chloride, [mol/kg(water)]
Returns
-------
S : float
Molal solutility of sodium sulfate, [mol/kg(water)]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 523.15 ≤ T ≤ 623.15
* 0 ≤ mH2SO4 ≤ 0.75
* 0 ≤ mNaCl ≤ 2.25
Examples
--------
>>> _solNa2SO4(523.15, 0.25, 0.75)
2.68
References
----------
IAPWS, Solubility of Sodium Sulfate in Aqueous Mixtures of Sodium Chloride
and Sulfuric Acid from Water to Concentrated Solutions,
http://www.iapws.org/relguide/na2so4.pdf
"""
# Check input parameters
if T < 523.15 or T > 623.15 or mH2SO4 < 0 or mH2SO4 > 0.75 or \
mNaCl < 0 or mNaCl > 2.25:
raise NotImplementedError("Incoming out of bound")
A00 = -0.8085987*T+81.4613752+0.10537803*T*log(T)
A10 = 3.4636364*T-281.63322-0.46779874*T*log(T)
A20 = -6.0029634*T+480.60108+0.81382854*T*log(T)
A30 = 4.4540258*T-359.36872-0.60306734*T*log(T)
A01 = 0.4909061*T-46.556271-0.064612393*T*log(T)
A02 = -0.002781314*T+1.722695+0.0000013319698*T*log(T)
A03 = -0.014074108*T+0.99020227+0.0019397832*T*log(T)
A11 = -0.87146573*T+71.808756+0.11749585*T*log(T)
S = A00 + A10*mH2SO4 + A20*mH2SO4**2 + A30*mH2SO4**3 + A01*mNaCl + \
A02*mNaCl**2 + A03*mNaCl**3 + A11*mH2SO4*mNaCl
return S | Equation for the solubility of sodium sulfate in aqueous mixtures of
sodium chloride and sulfuric acid
Parameters
----------
T : float
Temperature, [K]
mH2SO4 : float
Molality of sufuric acid, [mol/kg(water)]
mNaCl : float
Molality of sodium chloride, [mol/kg(water)]
Returns
-------
S : float
Molal solutility of sodium sulfate, [mol/kg(water)]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 523.15 ≤ T ≤ 623.15
* 0 ≤ mH2SO4 ≤ 0.75
* 0 ≤ mNaCl ≤ 2.25
Examples
--------
>>> _solNa2SO4(523.15, 0.25, 0.75)
2.68
References
----------
IAPWS, Solubility of Sodium Sulfate in Aqueous Mixtures of Sodium Chloride
and Sulfuric Acid from Water to Concentrated Solutions,
http://www.iapws.org/relguide/na2so4.pdf |
def refresh(self, leave_clean=False):
"""Attempt to pull-with-rebase from upstream. This is implemented as fetch-plus-rebase
so that we can distinguish between errors in the fetch stage (likely network errors)
and errors in the rebase stage (conflicts). If leave_clean is true, then in the event
of a rebase failure, the branch will be rolled back. Otherwise, it will be left in the
conflicted state.
"""
remote, merge = self._get_upstream()
self._check_call(['fetch', '--tags', remote, merge], raise_type=Scm.RemoteException)
try:
self._check_call(['rebase', 'FETCH_HEAD'], raise_type=Scm.LocalException)
except Scm.LocalException as e:
if leave_clean:
logger.debug('Cleaning up after failed rebase')
try:
self._check_call(['rebase', '--abort'], raise_type=Scm.LocalException)
except Scm.LocalException as abort_exc:
logger.debug('Failed to up after failed rebase')
logger.debug(traceback.format_exc(abort_exc))
# But let the original exception propagate, since that's the more interesting one
raise e | Attempt to pull-with-rebase from upstream. This is implemented as fetch-plus-rebase
so that we can distinguish between errors in the fetch stage (likely network errors)
and errors in the rebase stage (conflicts). If leave_clean is true, then in the event
of a rebase failure, the branch will be rolled back. Otherwise, it will be left in the
conflicted state. |
def operation_recorder_enabled(self, value):
"""Setter method; for a description see the getter method."""
for recorder in self._operation_recorders:
if value:
recorder.enable()
else:
recorder.disable() | Setter method; for a description see the getter method. |
def get_item(env, name, default=None):
""" Get an item from a dictionary, handling nested lookups with dotted notation.
Args:
env: the environment (dictionary) to use to look up the name.
name: the name to look up, in dotted notation.
default: the value to return if the name if not found.
Returns:
The result of looking up the name, if found; else the default.
"""
# TODO: handle attributes
for key in name.split('.'):
if isinstance(env, dict) and key in env:
env = env[key]
elif isinstance(env, types.ModuleType) and key in env.__dict__:
env = env.__dict__[key]
else:
return default
return env | Get an item from a dictionary, handling nested lookups with dotted notation.
Args:
env: the environment (dictionary) to use to look up the name.
name: the name to look up, in dotted notation.
default: the value to return if the name if not found.
Returns:
The result of looking up the name, if found; else the default. |
def set_position(self, position):
"""Set media position."""
if position > self._duration():
return
position_ns = position * _NANOSEC_MULT
self._manager[ATTR_POSITION] = position
self._player.seek_simple(_FORMAT_TIME, Gst.SeekFlags.FLUSH, position_ns) | Set media position. |
def compute_Wp(self, Epmin=None, Epmax=None):
""" Total energy in protons between energies Epmin and Epmax
Parameters
----------
Epmin : :class:`~astropy.units.Quantity` float, optional
Minimum proton energy for energy content calculation.
Epmax : :class:`~astropy.units.Quantity` float, optional
Maximum proton energy for energy content calculation.
"""
if Epmin is None and Epmax is None:
Wp = self.Wp
else:
if Epmax is None:
Epmax = self.Epmax
if Epmin is None:
Epmin = self.Epmin
log10Epmin = np.log10(Epmin.to("GeV").value)
log10Epmax = np.log10(Epmax.to("GeV").value)
Ep = (
np.logspace(
log10Epmin,
log10Epmax,
int(self.nEpd * (log10Epmax - log10Epmin)),
)
* u.GeV
)
pdist = self.particle_distribution(Ep)
Wp = trapz_loglog(Ep * pdist, Ep).to("erg")
return Wp | Total energy in protons between energies Epmin and Epmax
Parameters
----------
Epmin : :class:`~astropy.units.Quantity` float, optional
Minimum proton energy for energy content calculation.
Epmax : :class:`~astropy.units.Quantity` float, optional
Maximum proton energy for energy content calculation. |
def filter(args):
"""
%prog filter frgfile idsfile
Removes the reads from frgfile that are indicated as duplicates in the
clstrfile (generated by CD-HIT-454). `idsfile` includes a set of names to
include in the filtered frgfile. See apps.cdhit.ids().
"""
p = OptionParser(filter.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
frgfile, idsfile = args
assert frgfile.endswith(".frg")
fp = open(idsfile)
allowed = set(x.strip() for x in fp)
logging.debug("A total of {0} allowed ids loaded.".format(len(allowed)))
newfrgfile = frgfile.replace(".frg", ".filtered.frg")
fp = open(frgfile)
fw = open(newfrgfile, "w")
nfrags, discarded_frags = 0, 0
nmates, discarded_mates = 0, 0
for rec in iter_records(fp):
if rec.type == "FRG":
readname = rec.get_field("acc")
readname = readname.rstrip("ab")
nfrags += 1
if readname not in allowed:
discarded_frags += 1
continue
if rec.type == "LKG":
readname = rec.get_field("frg")
readname = readname.rstrip("ab")
nmates += 1
if readname not in allowed:
discarded_mates += 1
continue
print(rec, file=fw)
# Print out a summary
survived_frags = nfrags - discarded_frags
survived_mates = nmates - discarded_mates
print("Survived fragments: {0}".\
format(percentage(survived_frags, nfrags)), file=sys.stderr)
print("Survived mates: {0}".\
format(percentage(survived_mates, nmates)), file=sys.stderr) | %prog filter frgfile idsfile
Removes the reads from frgfile that are indicated as duplicates in the
clstrfile (generated by CD-HIT-454). `idsfile` includes a set of names to
include in the filtered frgfile. See apps.cdhit.ids(). |
def pick(self, *props):
"""
Picks select parameters from this Parameters and returns them as a new Parameters object.
:param props: keys to be picked and copied over to new Parameters.
:return: a new Parameters object.
"""
result = Parameters()
for prop in props:
if self.contains_key(prop):
result.put(prop, self.get(prop))
return result | Picks select parameters from this Parameters and returns them as a new Parameters object.
:param props: keys to be picked and copied over to new Parameters.
:return: a new Parameters object. |
def check(self, topic, value):
""" Checking the value if it fits into the given specification """
datatype_key = topic.meta.get('datatype', 'none')
self._datatypes[datatype_key].check(topic, value)
validate_dt = topic.meta.get('validate', None)
if validate_dt:
self._datatypes[validate_dt].check(topic, value) | Checking the value if it fits into the given specification |
def run_iterations(cls, the_callable, iterations=1, label=None, schedule='* * * * * *', userdata = None, run_immediately=False, delay_until=None):
"""Class method to run a callable with a specified number of iterations"""
task = task_with_callable(the_callable, label=label, schedule=schedule, userdata=userdata)
task.iterations = iterations
if delay_until is not None:
if isinstance(delay_until, datetime):
if delay_until > timezone.now():
task.start_running = delay_until
else:
raise ValueError("Task cannot start running in the past")
else:
raise ValueError("delay_until must be a datetime.datetime instance")
if run_immediately:
task.next_run = timezone.now()
else:
task.calc_next_run()
task.save() | Class method to run a callable with a specified number of iterations |
def exportable(self):
"""
``False`` if this signature is marked as being not exportable. Otherwise, ``True``.
"""
if 'ExportableCertification' in self._signature.subpackets:
return bool(next(iter(self._signature.subpackets['ExportableCertification'])))
return True | ``False`` if this signature is marked as being not exportable. Otherwise, ``True``. |
def deploy(self, initial_instance_count, instance_type, accelerator_type=None, endpoint_name=None,
use_compiled_model=False, update_endpoint=False, **kwargs):
"""Deploy the trained model to an Amazon SageMaker endpoint and return a ``sagemaker.RealTimePredictor`` object.
More information:
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Args:
initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction.
instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction,
for example, 'ml.c4.xlarge'.
accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading
and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator
will be attached to the endpoint.
For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified, the name of
the training job is used.
use_compiled_model (bool): Flag to select whether to use compiled (optimized) model. Default: False.
update_endpoint (bool): Flag to update the model in an existing Amazon SageMaker endpoint.
If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources
corresponding to the previous EndpointConfig. Default: False
tags(List[dict[str, str]]): Optional. The list of tags to attach to this specific endpoint. Example:
>>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]
For more information about tags, see https://boto3.amazonaws.com/v1/documentation\
/api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method,
which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences.
"""
self._ensure_latest_training_job()
endpoint_name = endpoint_name or self.latest_training_job.name
self.deploy_instance_type = instance_type
if use_compiled_model:
family = '_'.join(instance_type.split('.')[:-1])
if family not in self._compiled_models:
raise ValueError("No compiled model for {}. "
"Please compile one with compile_model before deploying.".format(family))
model = self._compiled_models[family]
else:
model = self.create_model(**kwargs)
return model.deploy(
instance_type=instance_type,
initial_instance_count=initial_instance_count,
accelerator_type=accelerator_type,
endpoint_name=endpoint_name,
update_endpoint=update_endpoint,
tags=self.tags) | Deploy the trained model to an Amazon SageMaker endpoint and return a ``sagemaker.RealTimePredictor`` object.
More information:
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Args:
initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction.
instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction,
for example, 'ml.c4.xlarge'.
accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading
and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator
will be attached to the endpoint.
For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified, the name of
the training job is used.
use_compiled_model (bool): Flag to select whether to use compiled (optimized) model. Default: False.
update_endpoint (bool): Flag to update the model in an existing Amazon SageMaker endpoint.
If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources
corresponding to the previous EndpointConfig. Default: False
tags(List[dict[str, str]]): Optional. The list of tags to attach to this specific endpoint. Example:
>>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]
For more information about tags, see https://boto3.amazonaws.com/v1/documentation\
/api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method,
which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences. |
def _apply_advanced_config(config_spec, advanced_config, vm_extra_config=None):
'''
Sets configuration parameters for the vm
config_spec
vm.ConfigSpec object
advanced_config
config key value pairs
vm_extra_config
Virtual machine vm_ref.config.extraConfig object
'''
log.trace('Configuring advanced configuration '
'parameters %s', advanced_config)
if isinstance(advanced_config, str):
raise salt.exceptions.ArgumentValueError(
'The specified \'advanced_configs\' configuration '
'option cannot be parsed, please check the parameters')
for key, value in six.iteritems(advanced_config):
if vm_extra_config:
for option in vm_extra_config:
if option.key == key and option.value == str(value):
continue
else:
option = vim.option.OptionValue(key=key, value=value)
config_spec.extraConfig.append(option) | Sets configuration parameters for the vm
config_spec
vm.ConfigSpec object
advanced_config
config key value pairs
vm_extra_config
Virtual machine vm_ref.config.extraConfig object |
def render_image(self, rgbobj, dst_x, dst_y):
"""Render the image represented by (rgbobj) at dst_x, dst_y
in the pixel space.
"""
pos = (0, 0)
arr = self.viewer.getwin_array(order=self.rgb_order, alpha=1.0,
dtype=np.uint8)
#pos = (dst_x, dst_y)
#print('dst', pos)
#pos = self.tform['window_to_native'].to_(pos)
#print('dst(c)', pos)
self.gl_set_image(arr, pos) | Render the image represented by (rgbobj) at dst_x, dst_y
in the pixel space. |
def is_containerized() -> bool:
'''
Check if I am running inside a Linux container.
'''
try:
cginfo = Path('/proc/self/cgroup').read_text()
if '/docker/' in cginfo or '/lxc/' in cginfo:
return True
except IOError:
return False | Check if I am running inside a Linux container. |
def format_row(self, row):
""" Apply overflow, justification and padding to a row. Returns lines
(plural) of rendered text for the row. """
assert all(isinstance(x, VTMLBuffer) for x in row)
raw = (fn(x) for x, fn in zip(row, self.formatters))
for line in itertools.zip_longest(*raw):
line = list(line)
for i, col in enumerate(line):
if col is None:
line[i] = self._get_blank_cell(i)
yield line | Apply overflow, justification and padding to a row. Returns lines
(plural) of rendered text for the row. |
def get_connections(self):
"""
:returns: list of dicts, or an empty list if there are no connections.
"""
path = Client.urls['all_connections']
conns = self._call(path, 'GET')
return conns | :returns: list of dicts, or an empty list if there are no connections. |
def to_dict(self):
"""
Since Collection.to_dict() returns a state dictionary with an
'elements' field we have to rename it to 'variants'.
"""
return dict(
variants=self.variants,
distinct=self.distinct,
sort_key=self.sort_key,
sources=self.sources,
source_to_metadata_dict=self.source_to_metadata_dict) | Since Collection.to_dict() returns a state dictionary with an
'elements' field we have to rename it to 'variants'. |
def character_set(instance):
"""Ensure certain properties of cyber observable objects come from the IANA
Character Set list.
"""
char_re = re.compile(r'^[a-zA-Z0-9_\(\)-]+$')
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'directory' and 'path_enc' in obj):
if enums.char_sets():
if obj['path_enc'] not in enums.char_sets():
yield JSONError("The 'path_enc' property of object '%s' "
"('%s') must be an IANA registered "
"character set."
% (key, obj['path_enc']), instance['id'])
else:
info("Can't reach IANA website; using regex for character_set.")
if not char_re.match(obj['path_enc']):
yield JSONError("The 'path_enc' property of object '%s' "
"('%s') must be an IANA registered "
"character set."
% (key, obj['path_enc']), instance['id'])
if ('type' in obj and obj['type'] == 'file' and 'name_enc' in obj):
if enums.char_sets():
if obj['name_enc'] not in enums.char_sets():
yield JSONError("The 'name_enc' property of object '%s' "
"('%s') must be an IANA registered "
"character set."
% (key, obj['name_enc']), instance['id'])
else:
info("Can't reach IANA website; using regex for character_set.")
if not char_re.match(obj['name_enc']):
yield JSONError("The 'name_enc' property of object '%s' "
"('%s') must be an IANA registered "
"character set."
% (key, obj['name_enc']), instance['id']) | Ensure certain properties of cyber observable objects come from the IANA
Character Set list. |
def _replace(self, feature, cursor):
"""
Insert a feature into the database.
"""
try:
cursor.execute(
constants._UPDATE,
list(feature.astuple()) + [feature.id])
except sqlite3.ProgrammingError:
cursor.execute(
constants._INSERT,
list(feature.astuple(self.default_encoding)) + [feature.id]) | Insert a feature into the database. |
def doQuery(self, url, method='GET', getParmeters=None, postParameters=None, files=None, extraHeaders={}, session={}):
"""Send a request to the server and return the result"""
# Build headers
headers = {}
if not postParameters:
postParameters = {}
for key, value in extraHeaders.iteritems():
# Fixes #197 for values with utf-8 chars to be passed into plugit
if isinstance(value, basestring):
headers['X-Plugit-' + key] = value.encode('utf-8')
else:
headers['X-Plugit-' + key] = value
for key, value in session.iteritems():
headers['X-Plugitsession-' + key] = value
if 'Cookie' not in headers:
headers['Cookie'] = ''
headers['Cookie'] += key + '=' + str(value) + '; '
if method == 'POST':
if not files:
r = requests.post(self.baseURI + '/' + url, params=getParmeters, data=postParameters, stream=True, headers=headers)
else:
# Special way, for big files
# Requests is not usable: https://github.com/shazow/urllib3/issues/51
from poster.encode import multipart_encode, MultipartParam
from poster.streaminghttp import register_openers
import urllib2
import urllib
# Register the streaming http handlers with urllib2
register_openers()
# headers contains the necessary Content-Type and Content-Length
# datagen is a generator object that yields the encoded parameters
data = []
for x in postParameters:
if isinstance(postParameters[x], list):
for elem in postParameters[x]:
data.append((x, elem))
else:
data.append((x, postParameters[x]))
for f in files:
data.append((f, MultipartParam(f, fileobj=open(files[f].temporary_file_path(), 'rb'), filename=files[f].name)))
datagen, headers_multi = multipart_encode(data)
headers.update(headers_multi)
if getParmeters:
get_uri = '?' + urllib.urlencode(getParmeters)
else:
get_uri = ''
# Create the Request object
request = urllib2.Request(self.baseURI + '/' + url + get_uri, datagen, headers)
re = urllib2.urlopen(request)
from requests import Response
r = Response()
r.status_code = re.getcode()
r.headers = dict(re.info())
r.encoding = "application/json"
r.raw = re.read()
r._content = r.raw
return r
else:
# Call the function based on the method.
r = requests.request(method.upper(), self.baseURI + '/' + url, params=getParmeters, stream=True, headers=headers, allow_redirects=True)
return r | Send a request to the server and return the result |
def add_dnc(
self,
obj_id,
channel='email',
reason=MANUAL,
channel_id=None,
comments='via API'
):
"""
Adds Do Not Contact
:param obj_id: int
:param channel: str
:param reason: str
:param channel_id: int
:param comments: str
:return: dict|str
"""
data = {
'reason': reason,
'channelId': channel_id,
'comments': comments
}
response = self._client.session.post(
'{url}/{id}/dnc/add/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
),
data=data
)
return self.process_response(response) | Adds Do Not Contact
:param obj_id: int
:param channel: str
:param reason: str
:param channel_id: int
:param comments: str
:return: dict|str |
def get_prinz_pot(nstep, x0=0., nskip=1, dt=0.01, kT=10.0, mass=1.0, damping=1.0):
r"""wrapper for the Prinz model generator"""
pw = PrinzModel(dt, kT, mass=mass, damping=damping)
return pw.sample(x0, nstep, nskip=nskip) | r"""wrapper for the Prinz model generator |
def fetch_table_names(self, include_system_table=False):
"""
:return: List of table names in the database.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Sample Code:
.. code:: python
from simplesqlite import SimpleSQLite
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
"hoge",
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_table_names())
:Output:
.. code-block:: python
['hoge']
"""
self.check_connection()
return self.schema_extractor.fetch_table_names(include_system_table) | :return: List of table names in the database.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Sample Code:
.. code:: python
from simplesqlite import SimpleSQLite
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
"hoge",
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_table_names())
:Output:
.. code-block:: python
['hoge'] |
def recv(self, bufsiz, flags=None):
"""
Receive data on the connection.
:param bufsiz: The maximum number of bytes to read
:param flags: (optional) The only supported flag is ``MSG_PEEK``,
all other flags are ignored.
:return: The string read from the Connection
"""
buf = _no_zero_allocator("char[]", bufsiz)
if flags is not None and flags & socket.MSG_PEEK:
result = _lib.SSL_peek(self._ssl, buf, bufsiz)
else:
result = _lib.SSL_read(self._ssl, buf, bufsiz)
self._raise_ssl_error(self._ssl, result)
return _ffi.buffer(buf, result)[:] | Receive data on the connection.
:param bufsiz: The maximum number of bytes to read
:param flags: (optional) The only supported flag is ``MSG_PEEK``,
all other flags are ignored.
:return: The string read from the Connection |
def p_const_expression_stringliteral(self, p):
'const_expression : stringliteral'
p[0] = StringConst(p[1], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | const_expression : stringliteral |
def eventFilter(self, watchedObject, event):
""" Deletes an item from an editable combobox when the delete or backspace key is pressed
in the list of items, or when ctrl-delete or ctrl-back space is pressed in the
line-edit.
When the combobox is not editable the filter does nothing.
"""
if self.comboBox.isEditable() and event.type() == QtCore.QEvent.KeyPress:
key = event.key()
if key in (Qt.Key_Delete, Qt.Key_Backspace):
if (watchedObject == self._comboboxListView
or (watchedObject == self.comboBox
and event.modifiers() == Qt.ControlModifier)):
index = self._comboboxListView.currentIndex()
if index.isValid():
row = index.row()
logger.debug("Removing item {} from the combobox: {}"
.format(row, self._comboboxListView.model().data(index)))
self.cti.removeValueByIndex(row)
self.comboBox.removeItem(row)
return True
# Calling parent event filter, which may filter out other events.
return super(ChoiceCtiEditor, self).eventFilter(watchedObject, event) | Deletes an item from an editable combobox when the delete or backspace key is pressed
in the list of items, or when ctrl-delete or ctrl-back space is pressed in the
line-edit.
When the combobox is not editable the filter does nothing. |
def insert(self, key, obj, future_expiration_minutes=15):
"""
Insert item into cache.
:param key: key to look up in cache.
:type key: ``object``
:param obj: item to store in cache.
:type obj: varies
:param future_expiration_minutes: number of minutes item is valid
:type param: ``int``
:returns: True
:rtype: ``bool``
"""
expiration_time = self._calculate_expiration(future_expiration_minutes)
self._CACHE[key] = (expiration_time, obj)
return True | Insert item into cache.
:param key: key to look up in cache.
:type key: ``object``
:param obj: item to store in cache.
:type obj: varies
:param future_expiration_minutes: number of minutes item is valid
:type param: ``int``
:returns: True
:rtype: ``bool`` |
def present(name,
vname=None,
vdata=None,
vtype='REG_SZ',
use_32bit_registry=False,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False):
r'''
Ensure a registry key or value is present.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (``vname``) is passed,
this will be the data for that value name. If not, this will be the
``(Default)`` value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary automatically. To pass binary data, use the built-in
yaml tag ``!!binary`` to denote the actual binary
characters. For example, the following lines will both set
the same data in the registry:
- ``vdata: Salty Test``
- ``vdata: !!binary U2FsdHkgVGVzdA==\n``
For more information about the ``!!binary`` tag see
`here <http://yaml.org/type/binary.html>`_
.. note::
The type for the ``(Default)`` value is always REG_SZ and cannot
be changed. This parameter is optional. If not passed, the Key
will be created with no associated item/value pairs.
vtype (str):
The value type for the data you wish to store in the registry. Valid
values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ (Default)
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
win_owner (str):
The owner of the registry key. If this is not passed, the account
under which Salt is running will be used.
.. note::
Owner is set for the key that contains the value/data pair. You
cannot set ownership on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms (dict):
A dictionary containing permissions to grant and their propagation.
If not passed the 'Grant` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
For each user specify the account name, with a sub dict for the
permissions to grant and the 'Applies to' setting. For example:
``{'Administrators': {'perms': 'full_control', 'applies_to':
'this_key_subkeys'}}``. ``perms`` must be specified.
Registry permissions are specified using the ``perms`` key. You can
specify a single basic permission or a list of advanced perms. The
following are valid perms:
Basic (passed as a string):
- full_control
- read
- write
Advanced (passed as a list):
- delete
- query_value
- set_value
- create_subkey
- enum_subkeys
- notify
- create_link
- read_control
- write_dac
- write_owner
The 'Applies to' setting is optional. It is specified using the
``applies_to`` key. If not specified ``this_key_subkeys`` is used.
Valid options are:
Applies to settings:
- this_key_only
- this_key_subkeys
- subkeys_only
.. versionadded:: 2019.2.0
win_deny_perms (dict):
A dictionary containing permissions to deny and their propagation.
If not passed the `Deny` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
Valid options are the same as those specified in ``win_perms``
.. note::
'Deny' permissions always take precedence over 'grant'
permissions.
.. versionadded:: 2019.2.0
win_inheritance (bool):
``True`` to inherit permissions from the parent key. ``False`` to
disable inheritance. Default is ``True``.
.. note::
Inheritance is set for the key that contains the value/data
pair. You cannot set inheritance on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms_reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``
.. note::
Perms are reset for the key that contains the value/data pair.
You cannot set permissions on value/data pairs themselves.
.. versionadded:: 2019.2.0
Returns:
dict: A dictionary showing the results of the registry operation.
Example:
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
Example:
The following example will set the value for the ``version`` entry under
the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``. The value will be reflected in ``Wow6432Node``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
Example:
Binary data can be set in two ways. The following two examples will set
a binary value of ``Salty Test``
.. code-block:: yaml
no_conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state
- vdata: Salty Test
- vtype: REG_BINARY
conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state_with_tag
- vdata: !!binary U2FsdHkgVGVzdA==\n
- vtype: REG_BINARY
Example:
To set a ``REG_MULTI_SZ`` value:
.. code-block:: yaml
reg_multi_sz:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: reg_multi_sz
- vdata:
- list item 1
- list item 2
Example:
To ensure a key is present and has permissions:
.. code-block:: yaml
set_key_permissions:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: version
- vdata: 2016.3.1
- win_owner: Administrators
- win_perms:
jsnuffy:
perms: full_control
sjones:
perms:
- read_control
- enum_subkeys
- query_value
applies_to:
- this_key_only
- win_deny_perms:
bsimpson:
perms: full_control
applies_to: this_key_subkeys
- win_inheritance: True
- win_perms_reset: True
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
reg_current = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
# Check if the key already exists
# If so, check perms
# We check `vdata` and `success` because `vdata` can be None
if vdata == reg_current['vdata'] and reg_current['success']:
ret['comment'] = '{0} in {1} is already present' \
''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)',
salt.utils.stringutils.to_unicode(name, 'utf-8'))
return __utils__['dacl.check_perms'](
obj_name='\\'.join([hive, key]),
obj_type='registry32' if use_32bit_registry else 'registry',
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
# Cast the vdata according to the vtype
vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype)
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'),
'Value': vdata_decoded,
'Owner': win_owner,
'Perms': {'Grant': win_perms,
'Deny': win_deny_perms},
'Inheritance': win_inheritance}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret
# Configure the value
ret['result'] = __utils__['reg.set_value'](hive=hive,
key=key,
vname=vname,
vdata=vdata,
vtype=vtype,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key)
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key)
if ret['result']:
ret = __utils__['dacl.check_perms'](
obj_name='\\'.join([hive, key]),
obj_type='registry32' if use_32bit_registry else 'registry',
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
return ret | r'''
Ensure a registry key or value is present.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (``vname``) is passed,
this will be the data for that value name. If not, this will be the
``(Default)`` value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary automatically. To pass binary data, use the built-in
yaml tag ``!!binary`` to denote the actual binary
characters. For example, the following lines will both set
the same data in the registry:
- ``vdata: Salty Test``
- ``vdata: !!binary U2FsdHkgVGVzdA==\n``
For more information about the ``!!binary`` tag see
`here <http://yaml.org/type/binary.html>`_
.. note::
The type for the ``(Default)`` value is always REG_SZ and cannot
be changed. This parameter is optional. If not passed, the Key
will be created with no associated item/value pairs.
vtype (str):
The value type for the data you wish to store in the registry. Valid
values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ (Default)
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
win_owner (str):
The owner of the registry key. If this is not passed, the account
under which Salt is running will be used.
.. note::
Owner is set for the key that contains the value/data pair. You
cannot set ownership on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms (dict):
A dictionary containing permissions to grant and their propagation.
If not passed the 'Grant` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
For each user specify the account name, with a sub dict for the
permissions to grant and the 'Applies to' setting. For example:
``{'Administrators': {'perms': 'full_control', 'applies_to':
'this_key_subkeys'}}``. ``perms`` must be specified.
Registry permissions are specified using the ``perms`` key. You can
specify a single basic permission or a list of advanced perms. The
following are valid perms:
Basic (passed as a string):
- full_control
- read
- write
Advanced (passed as a list):
- delete
- query_value
- set_value
- create_subkey
- enum_subkeys
- notify
- create_link
- read_control
- write_dac
- write_owner
The 'Applies to' setting is optional. It is specified using the
``applies_to`` key. If not specified ``this_key_subkeys`` is used.
Valid options are:
Applies to settings:
- this_key_only
- this_key_subkeys
- subkeys_only
.. versionadded:: 2019.2.0
win_deny_perms (dict):
A dictionary containing permissions to deny and their propagation.
If not passed the `Deny` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
Valid options are the same as those specified in ``win_perms``
.. note::
'Deny' permissions always take precedence over 'grant'
permissions.
.. versionadded:: 2019.2.0
win_inheritance (bool):
``True`` to inherit permissions from the parent key. ``False`` to
disable inheritance. Default is ``True``.
.. note::
Inheritance is set for the key that contains the value/data
pair. You cannot set inheritance on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms_reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``
.. note::
Perms are reset for the key that contains the value/data pair.
You cannot set permissions on value/data pairs themselves.
.. versionadded:: 2019.2.0
Returns:
dict: A dictionary showing the results of the registry operation.
Example:
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
Example:
The following example will set the value for the ``version`` entry under
the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``. The value will be reflected in ``Wow6432Node``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
Example:
Binary data can be set in two ways. The following two examples will set
a binary value of ``Salty Test``
.. code-block:: yaml
no_conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state
- vdata: Salty Test
- vtype: REG_BINARY
conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state_with_tag
- vdata: !!binary U2FsdHkgVGVzdA==\n
- vtype: REG_BINARY
Example:
To set a ``REG_MULTI_SZ`` value:
.. code-block:: yaml
reg_multi_sz:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: reg_multi_sz
- vdata:
- list item 1
- list item 2
Example:
To ensure a key is present and has permissions:
.. code-block:: yaml
set_key_permissions:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: version
- vdata: 2016.3.1
- win_owner: Administrators
- win_perms:
jsnuffy:
perms: full_control
sjones:
perms:
- read_control
- enum_subkeys
- query_value
applies_to:
- this_key_only
- win_deny_perms:
bsimpson:
perms: full_control
applies_to: this_key_subkeys
- win_inheritance: True
- win_perms_reset: True |
def generate_scalar_constant(output_name, tensor_name, scalar):
"""Convert a scalar value to a Constant buffer.
This is mainly used for xxScalar operators."""
t = onnx.helper.make_tensor(tensor_name,
data_type=TensorProto.FLOAT,
dims=[1], vals=[scalar])
c = onnx.helper.make_node("Constant",
[],
[output_name],
value=t)
return c | Convert a scalar value to a Constant buffer.
This is mainly used for xxScalar operators. |
def binaryEntropy(x):
"""
Calculate entropy for a list of binary random variables
:param x: (torch tensor) the probability of the variable to be 1.
:return: entropy: (torch tensor) entropy, sum(entropy)
"""
entropy = - x*x.log2() - (1-x)*(1-x).log2()
entropy[x*(1 - x) == 0] = 0
return entropy, entropy.sum() | Calculate entropy for a list of binary random variables
:param x: (torch tensor) the probability of the variable to be 1.
:return: entropy: (torch tensor) entropy, sum(entropy) |
def copy(self, dest, symlinks=False):
""" Copy to destination directory recursively.
If symlinks is true, symbolic links in the source tree are represented
as symbolic links in the new tree, but the metadata of the original
links is NOT copied; if false or omitted, the contents and metadata of
the linked files are copied to the new tree.
"""
if isinstance(dest, Directory):
dest = dest.get_name()
shutil.copytree(self.dirname, dest) | Copy to destination directory recursively.
If symlinks is true, symbolic links in the source tree are represented
as symbolic links in the new tree, but the metadata of the original
links is NOT copied; if false or omitted, the contents and metadata of
the linked files are copied to the new tree. |
def _l2rgb(self, mode):
"""Convert from L (black and white) to RGB.
"""
self._check_modes(("L", "LA"))
self.channels.append(self.channels[0].copy())
self.channels.append(self.channels[0].copy())
if self.fill_value is not None:
self.fill_value = self.fill_value[:1] * 3 + self.fill_value[1:]
if self.mode == "LA":
self.channels[1], self.channels[3] = \
self.channels[3], self.channels[1]
self.mode = mode | Convert from L (black and white) to RGB. |
def filtany(entities, **kw):
"""Filter a set of entities based on method return. Use keyword arguments.
Example:
filtmeth(entities, id='123')
filtmeth(entities, name='bart')
Multiple filters are 'OR'.
"""
ret = set()
for k,v in kw.items():
for entity in entities:
if getattr(entity, k)() == v:
ret.add(entity)
return ret | Filter a set of entities based on method return. Use keyword arguments.
Example:
filtmeth(entities, id='123')
filtmeth(entities, name='bart')
Multiple filters are 'OR'. |
def objwalk(obj, path=(), memo=None):
"""
Walks an arbitrary python pbject.
:param mixed obj: Any python object
:param tuple path: A tuple of the set attributes representing the path to the value
:param set memo: The list of attributes traversed thus far
:rtype <tuple<tuple>, <mixed>>: The path to the value on the object, the value.
"""
if len( path ) > MAX_DEPTH + 1:
yield path, obj # Truncate it!
if memo is None:
memo = set()
iterator = None
if isinstance(obj, Mapping):
iterator = iteritems
elif isinstance(obj, (Sequence, Set)) and not isinstance(obj, string_types):
iterator = enumerate
elif hasattr( obj, '__class__' ) and hasattr( obj, '__dict__' ) and type(obj) not in primitives: # If type(obj) == <instance>
iterator = class_iterator
elif hasattr(obj, '__iter__') or isinstance(obj, types.GeneratorType):
obj = [o for o in obj]
else:
pass
if iterator:
if id(obj) not in memo:
memo.add(id(obj))
for path_component, value in iterator(obj):
for result in objwalk(value, path + (path_component,), memo):
yield result
memo.remove(id(obj))
else:
yield path, obj | Walks an arbitrary python pbject.
:param mixed obj: Any python object
:param tuple path: A tuple of the set attributes representing the path to the value
:param set memo: The list of attributes traversed thus far
:rtype <tuple<tuple>, <mixed>>: The path to the value on the object, the value. |
def nb_r_deriv(r, data_row):
"""
Derivative of log-likelihood wrt r (formula from wikipedia)
Args:
r (float): the R paramemter in the NB distribution
data_row (array): 1d array of length cells
"""
n = len(data_row)
d = sum(digamma(data_row + r)) - n*digamma(r) + n*np.log(r/(r+np.mean(data_row)))
return d | Derivative of log-likelihood wrt r (formula from wikipedia)
Args:
r (float): the R paramemter in the NB distribution
data_row (array): 1d array of length cells |
def delete_webhook(self, ):
"""
Use this method to remove webhook integration if you decide to switch back to getUpdates. Returns True on success. Requires no parameters.
https://core.telegram.org/bots/api#deletewebhook
Returns:
:return: Returns True on success
:rtype: bool
"""
result = self.do("deleteWebhook", )
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
try:
return from_array_list(bool, result, list_level=0, is_builtin=True)
except TgApiParseException:
logger.debug("Failed parsing as primitive bool", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result | Use this method to remove webhook integration if you decide to switch back to getUpdates. Returns True on success. Requires no parameters.
https://core.telegram.org/bots/api#deletewebhook
Returns:
:return: Returns True on success
:rtype: bool |
def generate_report(
self,
components,
output_folder=None,
iface=None,
ordered_layers_uri=None,
legend_layers_uri=None,
use_template_extent=False):
"""Generate Impact Report independently by the Impact Function.
:param components: Report components to be generated.
:type components: list
:param output_folder: The output folder.
:type output_folder: str
:param iface: A QGIS App interface
:type iface: QgsInterface
:param ordered_layers_uri: A list of layers uri for map.
:type ordered_layers_uri: list
:param legend_layers_uri: A list of layers uri for map legend.
:type legend_layers_uri: list
:param use_template_extent: A condition for using template extent.
:type use_template_extent: bool
:returns: Tuple of error code and message
:type: tuple
.. versionadded:: 4.3
"""
# iface set up, in case IF run from test
if not iface:
iface = iface_object
# don't generate infographic if exposure is not population
exposure_type = definition(
self.provenance['exposure_keywords']['exposure'])
map_overview_layer = None
generated_components = deepcopy(components)
# remove unnecessary components
if standard_multi_exposure_impact_report_metadata_pdf in (
generated_components):
generated_components.remove(
standard_multi_exposure_impact_report_metadata_pdf)
if exposure_type != exposure_population and (
infographic_report in generated_components):
generated_components.remove(infographic_report)
else:
map_overview_layer = QgsRasterLayer(
map_overview['path'], 'Overview')
add_layer_to_canvas(
map_overview_layer, map_overview['id'])
"""Map report layers preparation"""
# preparing extra layers
extra_layers = []
print_atlas = setting('print_atlas_report', False, bool)
aggregation_summary_layer = self.aggregation_summary
# Define the layers for layer order and legend
ordered_layers = None
legend_layers = None
if ordered_layers_uri:
ordered_layers = [
load_layer_from_registry(layer_path) for (
layer_path) in ordered_layers_uri]
if legend_layers_uri:
legend_layers = [
load_layer_from_registry(layer_path) for (
layer_path) in legend_layers_uri]
if print_atlas:
extra_layers.append(aggregation_summary_layer)
error_code = None
message = None
for component in generated_components:
# create impact report instance
if component['key'] == map_report['key']:
report_metadata = ReportMetadata(
metadata_dict=component)
else:
report_metadata = ReportMetadata(
metadata_dict=update_template_component(component))
self._report_metadata.append(report_metadata)
self._impact_report = ImpactReport(
iface,
report_metadata,
impact_function=self,
extra_layers=extra_layers,
ordered_layers=ordered_layers,
legend_layers=legend_layers,
use_template_extent=use_template_extent)
# Get other setting
logo_path = setting('organisation_logo_path', None, str)
self._impact_report.inasafe_context.organisation_logo = logo_path
disclaimer_text = setting('reportDisclaimer', None, str)
self._impact_report.inasafe_context.disclaimer = disclaimer_text
north_arrow_path = setting('north_arrow_path', None, str)
self._impact_report.inasafe_context.north_arrow = north_arrow_path
# get the extent of impact layer
self._impact_report.qgis_composition_context.extent = (
self.impact.extent())
# generate report folder
# no other option for now
# TODO: retrieve the information from data store
if isinstance(self.datastore.uri, QDir):
layer_dir = self.datastore.uri.absolutePath()
else:
# No other way for now
return
# We will generate it on the fly without storing it after datastore
# supports
if output_folder:
self._impact_report.output_folder = output_folder
else:
self._impact_report.output_folder = join(layer_dir, 'output')
error_code, message = self._impact_report.process_components()
if error_code == ImpactReport.REPORT_GENERATION_FAILED:
break
if map_overview_layer:
QgsProject.instance().removeMapLayer(map_overview_layer)
# Create json file for report urls
report_path = self._impact_report.output_folder
filename = join(report_path, 'report_metadata.json')
write_json(report_urls(self), filename)
return error_code, message | Generate Impact Report independently by the Impact Function.
:param components: Report components to be generated.
:type components: list
:param output_folder: The output folder.
:type output_folder: str
:param iface: A QGIS App interface
:type iface: QgsInterface
:param ordered_layers_uri: A list of layers uri for map.
:type ordered_layers_uri: list
:param legend_layers_uri: A list of layers uri for map legend.
:type legend_layers_uri: list
:param use_template_extent: A condition for using template extent.
:type use_template_extent: bool
:returns: Tuple of error code and message
:type: tuple
.. versionadded:: 4.3 |
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {
'None': None,
'True': True,
'False': False,
'dict': dict,
'list': list,
'sorted': sorted
}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, ast.List):
return list(map(_convert, node.elts))
elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, ast.Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, ast.BinOp):
left = _convert(node.left)
right = _convert(node.right)
op = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.div,
ast.Mod: operator.mod
}.get(type(node.op), None)
if op:
return op(left, right)
elif isinstance(node, ast.Call):
func = _convert(node.func)
args = map(_convert, node.args)
kwargs = dict((kw.arg, _convert(kw.value)) for kw in node.keywords)
if node.starargs:
args.extend(_convert(node.starargs))
if node.kwargs:
kwargs.update(_convert(node.kwargs))
return func(*args, **kwargs)
elif isinstance(node, ast.Attribute):
if not node.attr.startswith('_'):
return getattr(_convert(node.value), node.attr)
raise ValueError('malformed string: %r' % node)
return _convert(node_or_string) | Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None. |
def _apply_odf_properties(df, headers, model):
"""
Attach properties to the Dataframe to carry along ODF metadata
:param df: The dataframe to be modified
:param headers: The ODF header lines
:param model: The ODF model type
"""
df.headers = headers
df.model = model | Attach properties to the Dataframe to carry along ODF metadata
:param df: The dataframe to be modified
:param headers: The ODF header lines
:param model: The ODF model type |
def get_times_from_utterance(utterance: str,
char_offset_to_token_index: Dict[int, int],
indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]:
"""
Given an utterance, we get the numbers that correspond to times and convert them to
values that may appear in the query. For example: convert ``7pm`` to ``1900``.
"""
pm_linking_dict = _time_regex_match(r'\d+pm',
utterance,
char_offset_to_token_index,
pm_map_match_to_query_value,
indices_of_approximate_words)
am_linking_dict = _time_regex_match(r'\d+am',
utterance,
char_offset_to_token_index,
am_map_match_to_query_value,
indices_of_approximate_words)
oclock_linking_dict = _time_regex_match(r"\d+ o'clock",
utterance,
char_offset_to_token_index,
lambda match: digit_to_query_time(match.rstrip(" o'clock")),
indices_of_approximate_words)
hours_linking_dict = _time_regex_match(r"\d+ hours",
utterance,
char_offset_to_token_index,
lambda match: [int(match.rstrip(" hours"))],
indices_of_approximate_words)
times_linking_dict: Dict[str, List[int]] = defaultdict(list)
linking_dicts = [pm_linking_dict, am_linking_dict, oclock_linking_dict, hours_linking_dict]
for linking_dict in linking_dicts:
for key, value in linking_dict.items():
times_linking_dict[key].extend(value)
return times_linking_dict | Given an utterance, we get the numbers that correspond to times and convert them to
values that may appear in the query. For example: convert ``7pm`` to ``1900``. |
def changiling(self, infile):
'''Changiling: 任意のバイト文字を 他の任意のバイト文字に置き換える
'''
gf = infile[31:]
baby, fetch = (self.word_toaster() for _ in range(2))
gf = [g.replace(baby, fetch) for g in gf]
return infile[:31] + gf | Changiling: 任意のバイト文字を 他の任意のバイト文字に置き換える |
def patch_project(self, owner, id, **kwargs):
"""
Update a project
Update an existing project. Note that only elements, files or linked datasets included in the request will be updated. All omitted elements, files or linked datasets will remain untouched.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_project(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required)
:param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required)
:param ProjectPatchRequest body:
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_project_with_http_info(owner, id, **kwargs)
else:
(data) = self.patch_project_with_http_info(owner, id, **kwargs)
return data | Update a project
Update an existing project. Note that only elements, files or linked datasets included in the request will be updated. All omitted elements, files or linked datasets will remain untouched.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_project(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), government is the unique identifier of the owner. (required)
:param str id: Project unique identifier. For example, in the URL:[https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), how-to-add-depth-to-your-data-with-the-us-census-acs is the unique identifier of the project. (required)
:param ProjectPatchRequest body:
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread. |
def findScopedPar(theDict, scope, name):
""" Find the given par. Return tuple: (its own (sub-)dict, its value). """
# Do not search (like findFirstPar), but go right to the correct
# sub-section, and pick it up. Assume it is there as stated.
if len(scope):
theDict = theDict[scope] # ! only goes one level deep - enhance !
return theDict, theDict[name] | Find the given par. Return tuple: (its own (sub-)dict, its value). |
def trace(self, name, chain=-1):
"""Return the trace of a tallyable object stored in the database.
:Parameters:
name : string
The name of the tallyable object.
chain : int
The trace index. Setting `chain=i` will return the trace created by
the ith call to `sample`.
"""
trace = copy.copy(self._traces[name])
trace._chain = chain
return trace | Return the trace of a tallyable object stored in the database.
:Parameters:
name : string
The name of the tallyable object.
chain : int
The trace index. Setting `chain=i` will return the trace created by
the ith call to `sample`. |
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e | Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file |
def _process(self, project, build_system, job_priorities):
'''Return list of ref_data_name for job_priorities'''
jobs = []
# we cache the reference data names in order to reduce API calls
cache_key = '{}-{}-ref_data_names_cache'.format(project, build_system)
ref_data_names_map = cache.get(cache_key)
if not ref_data_names_map:
# cache expired so re-build the reference data names map; the map
# contains the ref_data_name of every treeherder *test* job for this project
ref_data_names_map = self._build_ref_data_names(project, build_system)
# update the cache
cache.set(cache_key, ref_data_names_map, SETA_REF_DATA_NAMES_CACHE_TIMEOUT)
# now check the JobPriority table against the list of valid runnable
for jp in job_priorities:
# if this JobPriority entry is no longer supported in SETA then ignore it
if not valid_platform(jp.platform):
continue
if is_job_blacklisted(jp.testtype):
continue
key = jp.unique_identifier()
if key in ref_data_names_map:
# e.g. desktop-test-linux64-pgo/opt-reftest-13 or builder name
jobs.append(ref_data_names_map[key])
else:
logger.warning('Job priority (%s) not found in accepted jobs list', jp)
return jobs | Return list of ref_data_name for job_priorities |
def vote_count(self):
"""
Returns the total number of votes cast for this
poll options.
"""
return Vote.objects.filter(
content_type=ContentType.objects.get_for_model(self),
object_id=self.id
).aggregate(Sum('vote'))['vote__sum'] or 0 | Returns the total number of votes cast for this
poll options. |
def get_symbol_train(network, num_classes, from_layers, num_filters, strides, pads,
sizes, ratios, normalizations=-1, steps=[], min_filter=128,
nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):
"""Build network symbol for training SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
"""
label = mx.sym.Variable('label')
body = import_module(network).get_symbol(num_classes, **kwargs)
layers = multi_layer_feature(body, from_layers, num_filters, strides, pads,
min_filter=min_filter)
loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_filters, clip=False, interm_layer=0, steps=steps)
tmp = mx.symbol.contrib.MultiBoxTarget(
*[anchor_boxes, label, cls_preds], overlap_threshold=.5, \
ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \
negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),
name="multibox_target")
loc_target = tmp[0]
loc_target_mask = tmp[1]
cls_target = tmp[2]
cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \
ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \
normalization='valid', name="cls_prob")
loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \
data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)
loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \
normalization='valid', name="loc_loss")
# monitoring training status
cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label")
det = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out")
# group output
out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det])
return out | Build network symbol for training SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol |
def average_patterson_f3(acc, aca, acb, blen, normed=True):
"""Estimate F3(C; A, B) and standard error using the block-jackknife.
Parameters
----------
acc : array_like, int, shape (n_variants, 2)
Allele counts for the test population (C).
aca : array_like, int, shape (n_variants, 2)
Allele counts for the first source population (A).
acb : array_like, int, shape (n_variants, 2)
Allele counts for the second source population (B).
blen : int
Block size (number of variants).
normed : bool, optional
If False, use un-normalised f3 values.
Returns
-------
f3 : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
z : float
Z-score (number of standard errors from zero).
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
Notes
-----
See Patterson (2012), main text and Appendix A.
See Also
--------
allel.stats.admixture.patterson_f3
"""
# calculate per-variant values
T, B = patterson_f3(acc, aca, acb)
# N.B., nans can occur if any of the populations have completely missing
# genotype calls at a variant (i.e., allele number is zero). Here we
# assume that is rare enough to be negligible.
# calculate overall value of statistic
if normed:
f3 = np.nansum(T) / np.nansum(B)
else:
f3 = np.nanmean(T)
# calculate value of statistic within each block
if normed:
T_bsum = moving_statistic(T, statistic=np.nansum, size=blen)
B_bsum = moving_statistic(B, statistic=np.nansum, size=blen)
vb = T_bsum / B_bsum
_, se, vj = jackknife((T_bsum, B_bsum),
statistic=lambda t, b: np.sum(t) / np.sum(b))
else:
vb = moving_statistic(T, statistic=np.nanmean, size=blen)
_, se, vj = jackknife(vb, statistic=np.mean)
# compute Z score
z = f3 / se
return f3, se, z, vb, vj | Estimate F3(C; A, B) and standard error using the block-jackknife.
Parameters
----------
acc : array_like, int, shape (n_variants, 2)
Allele counts for the test population (C).
aca : array_like, int, shape (n_variants, 2)
Allele counts for the first source population (A).
acb : array_like, int, shape (n_variants, 2)
Allele counts for the second source population (B).
blen : int
Block size (number of variants).
normed : bool, optional
If False, use un-normalised f3 values.
Returns
-------
f3 : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
z : float
Z-score (number of standard errors from zero).
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
Notes
-----
See Patterson (2012), main text and Appendix A.
See Also
--------
allel.stats.admixture.patterson_f3 |
def plot_entropy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational entrpy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$S$ (J/K/mol)"
else:
ylabel = r"$S$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.entropy, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig | Plots the vibrational entrpy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure |
def nl_send_iovec(sk, msg, iov, _):
"""Transmit Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L342
This function is identical to nl_send().
This function triggers the `NL_CB_MSG_OUT` callback.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance).
iov -- data payload to be sent (bytearray).
Returns:
Number of bytes sent on success or a negative error code.
"""
hdr = msghdr(msg_name=sk.s_peer, msg_iov=iov)
# Overwrite destination if specified in the message itself, defaults to the peer address of the socket.
dst = nlmsg_get_dst(msg)
if dst.nl_family == socket.AF_NETLINK:
hdr.msg_name = dst
# Add credentials if present.
creds = nlmsg_get_creds(msg)
if creds:
raise NotImplementedError # TODO https://github.com/Robpol86/libnl/issues/2
return nl_sendmsg(sk, msg, hdr) | Transmit Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L342
This function is identical to nl_send().
This function triggers the `NL_CB_MSG_OUT` callback.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance).
iov -- data payload to be sent (bytearray).
Returns:
Number of bytes sent on success or a negative error code. |
def bucket_exists(self, bucket_name):
"""
Check if the bucket exists and if the user has access to it.
:param bucket_name: To test the existence and user access.
:return: True on success.
"""
is_valid_bucket_name(bucket_name)
try:
self._url_open('HEAD', bucket_name=bucket_name)
# If the bucket has not been created yet, MinIO will return a "NoSuchBucket" error.
except NoSuchBucket:
return False
except ResponseError:
raise
return True | Check if the bucket exists and if the user has access to it.
:param bucket_name: To test the existence and user access.
:return: True on success. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.