code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = walker.next()
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf | Walk an unpacked egg's contents, skipping the metadata directory |
def log(self, uuid=None, organization=None, from_date=None, to_date=None):
""""List enrollment information available in the registry.
Method that returns a list of enrollments. If <uuid> parameter is set,
it will return the enrollments related to that unique identity;
if <organization> parameter is given, it will return the enrollments
related to that organization; if both parameters are set, the function
will return the list of enrollments of <uuid> on the <organization>.
Enrollments between a period can also be listed using <from_date> and
<to_date> parameters. When these are set, the method will return
all those enrollments where Enrollment.start >= from_date AND
Enrollment.end <= to_date. Defaults values for these dates are
1900-01-01 and 2100-01-01.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
"""
try:
enrollments = api.enrollments(self.db, uuid, organization,
from_date, to_date)
self.display('log.tmpl', enrollments=enrollments)
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS | List enrollment information available in the registry.
Method that returns a list of enrollments. If <uuid> parameter is set,
it will return the enrollments related to that unique identity;
if <organization> parameter is given, it will return the enrollments
related to that organization; if both parameters are set, the function
will return the list of enrollments of <uuid> on the <organization>.
Enrollments between a period can also be listed using <from_date> and
<to_date> parameters. When these are set, the method will return
all those enrollments where Enrollment.start >= from_date AND
Enrollment.end <= to_date. Defaults values for these dates are
1900-01-01 and 2100-01-01.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends |
def if_has_delegate(delegate):
"""Wrap a delegated instance attribute function.
Creates a decorator for methods that are delegated in the presence of a
results wrapper. This enables duck-typing by ``hasattr`` returning True
according to the sub-estimator.
This function was adapted from scikit-learn, which defines
``if_delegate_has_method``, but operates differently by injecting methods
not based on method presence, but by delegate presence.
Examples
--------
>>> from pmdarima.utils.metaestimators import if_has_delegate
>>>
>>> class A(object):
... @if_has_delegate('d')
... def func(self):
... return True
>>>
>>> a = A()
>>> # the delegate does not exist yet
>>> assert not hasattr(a, 'func')
>>> # inject the attribute
>>> a.d = None
>>> assert hasattr(a, 'func') and a.func()
Parameters
----------
delegate : string, list of strings or tuple of strings
Name of the sub-estimator that can be accessed as an attribute of the
base object. If a list or a tuple of names are provided, the first
sub-estimator that is an attribute of the base object will be used.
"""
if isinstance(delegate, list):
delegate = tuple(delegate)
if not isinstance(delegate, tuple):
delegate = (delegate,)
return lambda fn: _IffHasDelegate(fn, delegate) | Wrap a delegated instance attribute function.
Creates a decorator for methods that are delegated in the presence of a
results wrapper. This enables duck-typing by ``hasattr`` returning True
according to the sub-estimator.
This function was adapted from scikit-learn, which defines
``if_delegate_has_method``, but operates differently by injecting methods
not based on method presence, but by delegate presence.
Examples
--------
>>> from pmdarima.utils.metaestimators import if_has_delegate
>>>
>>> class A(object):
... @if_has_delegate('d')
... def func(self):
... return True
>>>
>>> a = A()
>>> # the delegate does not exist yet
>>> assert not hasattr(a, 'func')
>>> # inject the attribute
>>> a.d = None
>>> assert hasattr(a, 'func') and a.func()
Parameters
----------
delegate : string, list of strings or tuple of strings
Name of the sub-estimator that can be accessed as an attribute of the
base object. If a list or a tuple of names are provided, the first
sub-estimator that is an attribute of the base object will be used. |
def ping(self):
""" Notify the queue that this task is still active. """
if self.finished is not None:
raise AlreadyFinished()
with self._db_conn() as conn:
success = conn.query('''
UPDATE %s
SET
last_contact=%%(now)s,
update_count=update_count + 1
WHERE
id = %%(task_id)s
AND execution_id = %%(execution_id)s
AND last_contact > %%(now)s - INTERVAL %%(ttl)s SECOND
''' % self._queue.table_name,
now=datetime.utcnow(),
task_id=self.task_id,
execution_id=self.execution_id,
ttl=self._queue.execution_ttl)
if success != 1:
raise TaskDoesNotExist() | Notify the queue that this task is still active. |
def isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0):
"""
Python 3.4 does not have math.isclose, so we need to steal it and add it here.
"""
try:
return math.isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol)
except AttributeError:
# Running on older version of python, fall back to hand-rolled implementation
if (rel_tol < 0.0) or (abs_tol < 0.0):
raise ValueError("Tolerances must be non-negative, but are rel_tol: {} and abs_tol: {}".format(rel_tol, abs_tol))
if math.isnan(a) or math.isnan(b):
return False # NaNs are never close to anything, even other NaNs
if (a == b):
return True
if math.isinf(a) or math.isinf(b):
return False # Infinity is only close to itself, and we already handled that case
diff = abs(a - b)
return (diff <= rel_tol * abs(b)) or (diff <= rel_tol * abs(a)) or (diff <= abs_tol) | Python 3.4 does not have math.isclose, so we need to steal it and add it here. |
def run_foreach_or_conditional(self, context):
"""Run the foreach sequence or the conditional evaluation.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# friendly reminder [] list obj (i.e empty) evals False
if self.foreach_items:
self.foreach_loop(context)
else:
# since no looping required, don't pollute output with looping info
self.run_conditional_decorators(context)
logger.debug("done") | Run the foreach sequence or the conditional evaluation.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. |
def toggle_item(self, item, test_func, field_name=None):
"""
Toggles the section based on test_func.
test_func takes an item and returns a boolean. If it returns True, the
item will be added to the given section. It will be removed from the
section otherwise.
Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL.
Behavior on other items is undefined.
"""
if test_func(item):
self.add_item(item, field_name)
return True
else:
self.remove_item(item, field_name)
return False | Toggles the section based on test_func.
test_func takes an item and returns a boolean. If it returns True, the
item will be added to the given section. It will be removed from the
section otherwise.
Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL.
Behavior on other items is undefined. |
def get_beam(header):
"""
Create a :class:`AegeanTools.fits_image.Beam` object from a fits header.
BPA may be missing but will be assumed to be zero.
if BMAJ or BMIN are missing then return None instead of a beam object.
Parameters
----------
header : HDUHeader
The fits header.
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
Beam object, with a, b, and pa in degrees.
"""
if "BPA" not in header:
log.warning("BPA not present in fits header, using 0")
bpa = 0
else:
bpa = header["BPA"]
if "BMAJ" not in header:
log.warning("BMAJ not present in fits header.")
bmaj = None
else:
bmaj = header["BMAJ"]
if "BMIN" not in header:
log.warning("BMIN not present in fits header.")
bmin = None
else:
bmin = header["BMIN"]
if None in [bmaj, bmin, bpa]:
return None
beam = Beam(bmaj, bmin, bpa)
return beam | Create a :class:`AegeanTools.fits_image.Beam` object from a fits header.
BPA may be missing but will be assumed to be zero.
if BMAJ or BMIN are missing then return None instead of a beam object.
Parameters
----------
header : HDUHeader
The fits header.
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
Beam object, with a, b, and pa in degrees. |
def _elect_source_replication_group(
self,
over_replicated_rgs,
partition,
):
"""Decide source replication-group based as group with highest replica
count.
"""
return max(
over_replicated_rgs,
key=lambda rg: rg.count_replica(partition),
) | Decide source replication-group based as group with highest replica
count. |
def _remove_qs(self, url):
'''
Removes a query string from a URL before signing.
:param url: The URL to strip.
:type url: str
'''
scheme, netloc, path, query, fragment = urlsplit(url)
return urlunsplit((scheme, netloc, path, '', fragment)) | Removes a query string from a URL before signing.
:param url: The URL to strip.
:type url: str |
def config(self, config):
"""Set config values from config dictionary."""
for section, data in config.items():
for variable, value in data.items():
self.set_value(section, variable, value) | Set config values from config dictionary. |
def _Load(self,location):
"""Load all networks associated with the given location.
https://www.centurylinkcloud.com/api-docs/v2/#get-network-list#request
"""
# https://api.ctl.io/v2-experimental/networks/ALIAS/WA1
for network in clc.v2.API.Call('GET','/v2-experimental/networks/%s/%s' % (self.alias,location),{},session=self.session):
self.networks.append(Network(id=network['id'],alias=self.alias,network_obj=network,session=self.session)) | Load all networks associated with the given location.
https://www.centurylinkcloud.com/api-docs/v2/#get-network-list#request |
def update_store_credit_by_id(cls, store_credit_id, store_credit, **kwargs):
"""Update StoreCredit
Update attributes of StoreCredit
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_store_credit_by_id(store_credit_id, store_credit, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to update. (required)
:param StoreCredit store_credit: Attributes of storeCredit to update. (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs)
else:
(data) = cls._update_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs)
return data | Update StoreCredit
Update attributes of StoreCredit
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_store_credit_by_id(store_credit_id, store_credit, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to update. (required)
:param StoreCredit store_credit: Attributes of storeCredit to update. (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread. |
def compile_foreign_key(line, context, attributes, primary_key, attr_sql, foreign_key_sql, index_sql):
"""
:param line: a line from a table definition
:param context: namespace containing referenced objects
:param attributes: list of attribute names already in the declaration -- to be updated by this function
:param primary_key: None if the current foreign key is made from the dependent section. Otherwise it is the list
of primary key attributes thus far -- to be updated by the function
:param attr_sql: list of sql statements defining attributes -- to be updated by this function.
:param foreign_key_sql: list of sql statements specifying foreign key constraints -- to be updated by this function.
:param index_sql: list of INDEX declaration statements, duplicate or redundant indexes are ok.
"""
# Parse and validate
from .table import Table
from .expression import Projection
new_style = True # See issue #436. Old style to be deprecated in a future release
try:
result = foreign_key_parser.parseString(line)
except pp.ParseException:
try:
result = foreign_key_parser_old.parseString(line)
except pp.ParseBaseException as err:
raise DataJointError('Parsing error in line "%s". %s.' % (line, err)) from None
else:
new_style = False
try:
ref = eval(result.ref_table, context)
except Exception if new_style else NameError:
raise DataJointError('Foreign key reference %s could not be resolved' % result.ref_table)
options = [opt.upper() for opt in result.options]
for opt in options: # check for invalid options
if opt not in {'NULLABLE', 'UNIQUE'}:
raise DataJointError('Invalid foreign key option "{opt}"'.format(opt=opt))
is_nullable = 'NULLABLE' in options
is_unique = 'UNIQUE' in options
if is_nullable and primary_key is not None:
raise DataJointError('Primary dependencies cannot be nullable in line "{line}"'.format(line=line))
if not new_style:
if not isinstance(ref, type) or not issubclass(ref, Table):
raise DataJointError('Foreign key reference %r must be a valid query' % result.ref_table)
if isinstance(ref, type) and issubclass(ref, Table):
ref = ref()
# check that dependency is of supported type
if (not isinstance(ref, (Table, Projection)) or len(ref.restriction) or
(isinstance(ref, Projection) and (not isinstance(ref._arg, Table) or len(ref._arg.restriction)))):
raise DataJointError('Dependency "%s" is not supported (yet). Use a base table or its projection.' %
result.ref_table)
if not new_style:
# for backward compatibility with old-style dependency declarations. See issue #436
if not isinstance(ref, Table):
DataJointError('Dependency "%s" is not supported. Check documentation.' % result.ref_table)
if not all(r in ref.primary_key for r in result.ref_attrs):
raise DataJointError('Invalid foreign key attributes in "%s"' % line)
try:
raise DataJointError('Duplicate attributes "{attr}" in "{line}"'.format(
attr=next(attr for attr in result.new_attrs if attr in attributes),
line=line))
except StopIteration:
pass # the normal outcome
# Match the primary attributes of the referenced table to local attributes
new_attrs = list(result.new_attrs)
ref_attrs = list(result.ref_attrs)
# special case, the renamed attribute is implicit
if new_attrs and not ref_attrs:
if len(new_attrs) != 1:
raise DataJointError('Renamed foreign key must be mapped to the primary key in "%s"' % line)
if len(ref.primary_key) == 1:
# if the primary key has one attribute, allow implicit renaming
ref_attrs = ref.primary_key
else:
# if only one primary key attribute remains, then allow implicit renaming
ref_attrs = [attr for attr in ref.primary_key if attr not in attributes]
if len(ref_attrs) != 1:
raise DataJointError('Could not resovle which primary key attribute should be referenced in "%s"' % line)
if len(new_attrs) != len(ref_attrs):
raise DataJointError('Mismatched attributes in foreign key "%s"' % line)
if ref_attrs:
# convert to projected dependency
ref = ref.proj(**dict(zip(new_attrs, ref_attrs)))
# declare new foreign key attributes
base = ref._arg if isinstance(ref, Projection) else ref # base reference table
for attr, ref_attr in zip(ref.primary_key, base.primary_key):
if attr not in attributes:
attributes.append(attr)
if primary_key is not None:
primary_key.append(attr)
attr_sql.append(
base.heading[ref_attr].sql.replace(ref_attr, attr, 1).replace('NOT NULL ', '', int(is_nullable)))
# declare the foreign key
foreign_key_sql.append(
'FOREIGN KEY (`{fk}`) REFERENCES {ref} (`{pk}`) ON UPDATE CASCADE ON DELETE RESTRICT'.format(
fk='`,`'.join(ref.primary_key),
pk='`,`'.join(base.primary_key),
ref=base.full_table_name))
# declare unique index
if is_unique:
index_sql.append('UNIQUE INDEX ({attrs})'.format(attrs='`,`'.join(ref.primary_key))) | :param line: a line from a table definition
:param context: namespace containing referenced objects
:param attributes: list of attribute names already in the declaration -- to be updated by this function
:param primary_key: None if the current foreign key is made from the dependent section. Otherwise it is the list
of primary key attributes thus far -- to be updated by the function
:param attr_sql: list of sql statements defining attributes -- to be updated by this function.
:param foreign_key_sql: list of sql statements specifying foreign key constraints -- to be updated by this function.
:param index_sql: list of INDEX declaration statements, duplicate or redundant indexes are ok. |
def gen_table(self, inner_widths, inner_heights, outer_widths):
"""Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return:
"""
# Yield top border.
if self.outer_border:
yield self.horizontal_border('top', outer_widths)
# Yield table body.
row_count = len(self.table_data)
last_row_index, before_last_row_index = row_count - 1, row_count - 2
for i, row in enumerate(self.table_data):
# Yield the row line by line (e.g. multi-line rows).
if self.inner_heading_row_border and i == 0:
style = 'heading'
elif self.inner_footing_row_border and i == last_row_index:
style = 'footing'
else:
style = 'row'
for line in self.gen_row_lines(row, style, inner_widths, inner_heights[i]):
yield line
# If this is the last row then break. No separator needed.
if i == last_row_index:
break
# Yield heading separator.
if self.inner_heading_row_border and i == 0:
yield self.horizontal_border('heading', outer_widths)
# Yield footing separator.
elif self.inner_footing_row_border and i == before_last_row_index:
yield self.horizontal_border('footing', outer_widths)
# Yield row separator.
elif self.inner_row_border:
yield self.horizontal_border('row', outer_widths)
# Yield bottom border.
if self.outer_border:
yield self.horizontal_border('bottom', outer_widths) | Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return: |
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
.. versionchanged:: 2015.8.0
Added support for SunOS
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' network.traceroute archlinux.org
'''
ret = []
if not salt.utils.path.which('traceroute'):
log.info('This minion does not have traceroute installed')
return ret
cmd = 'traceroute {0}'.format(salt.utils.network.sanitize_host(host))
out = __salt__['cmd.run'](cmd)
# Parse version of traceroute
if salt.utils.platform.is_sunos() or salt.utils.platform.is_aix():
traceroute_version = [0, 0, 0]
else:
cmd2 = 'traceroute --version'
out2 = __salt__['cmd.run'](cmd2)
try:
# Linux traceroute version looks like:
# Modern traceroute for Linux, version 2.0.19, Dec 10 2012
# Darwin and FreeBSD traceroute version looks like: Version 1.4a12+[FreeBSD|Darwin]
traceroute_version_raw = re.findall(r'.*[Vv]ersion (\d+)\.([\w\+]+)\.*(\w*)', out2)[0]
log.debug('traceroute_version_raw: %s', traceroute_version_raw)
traceroute_version = []
for t in traceroute_version_raw:
try:
traceroute_version.append(int(t))
except ValueError:
traceroute_version.append(t)
if len(traceroute_version) < 3:
traceroute_version.append(0)
log.debug('traceroute_version: %s', traceroute_version)
except IndexError:
traceroute_version = [0, 0, 0]
for line in out.splitlines():
if ' ' not in line:
continue
if line.startswith('traceroute'):
continue
if salt.utils.platform.is_aix():
if line.startswith('trying to get source for'):
continue
if line.startswith('source should be'):
continue
if line.startswith('outgoing MTU'):
continue
if line.startswith('fragmentation required'):
continue
if 'Darwin' in six.text_type(traceroute_version[1]) or \
'FreeBSD' in six.text_type(traceroute_version[1]) or \
__grains__['kernel'] in ('SunOS', 'AIX'):
try:
traceline = re.findall(r'\s*(\d*)\s+(.*)\s+\((.*)\)\s+(.*)$', line)[0]
except IndexError:
traceline = re.findall(r'\s*(\d*)\s+(\*\s+\*\s+\*)', line)[0]
log.debug('traceline: %s', traceline)
delays = re.findall(r'(\d+\.\d+)\s*ms', six.text_type(traceline))
try:
if traceline[1] == '* * *':
result = {
'count': traceline[0],
'hostname': '*'
}
else:
result = {
'count': traceline[0],
'hostname': traceline[1],
'ip': traceline[2],
}
for idx in range(0, len(delays)):
result['ms{0}'.format(idx + 1)] = delays[idx]
except IndexError:
result = {}
elif (traceroute_version[0] >= 2 and traceroute_version[2] >= 14
or traceroute_version[0] >= 2 and traceroute_version[1] > 0):
comps = line.split(' ')
if comps[1] == '* * *':
result = {
'count': int(comps[0]),
'hostname': '*'}
else:
result = {
'count': int(comps[0]),
'hostname': comps[1].split()[0],
'ip': comps[1].split()[1].strip('()'),
'ms1': float(comps[2].split()[0]),
'ms2': float(comps[3].split()[0]),
'ms3': float(comps[4].split()[0])}
else:
comps = line.split()
result = {
'count': comps[0],
'hostname': comps[1],
'ip': comps[2],
'ms1': comps[4],
'ms2': comps[6],
'ms3': comps[8],
'ping1': comps[3],
'ping2': comps[5],
'ping3': comps[7]}
ret.append(result)
return ret | Performs a traceroute to a 3rd party host
.. versionchanged:: 2015.8.0
Added support for SunOS
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' network.traceroute archlinux.org |
def create(self, create_missing=None):
"""Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1381129
<https://bugzilla.redhat.com/show_bug.cgi?id=1381129>`_.
"""
return type(self)(
self._server_config,
id=self.create_json(create_missing)['id'],
).read() | Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1381129
<https://bugzilla.redhat.com/show_bug.cgi?id=1381129>`_. |
def compile(self, session=None):
"""
Before calling the standard compile function, check to see if the size
of the data has changed and add variational parameters appropriately.
This is necessary because the shape of the parameters depends on the
shape of the data.
"""
if not self.num_data == self.X.shape[0]:
self.num_data = self.X.shape[0]
self.q_alpha = Parameter(np.zeros((self.num_data, self.num_latent)))
self.q_lambda = Parameter(np.ones((self.num_data, self.num_latent)),
transforms.positive)
return super(VGP_opper_archambeau, self).compile(session=session) | Before calling the standard compile function, check to see if the size
of the data has changed and add variational parameters appropriately.
This is necessary because the shape of the parameters depends on the
shape of the data. |
def config_xml_to_dict(contents, result, parse_job=True):
"""
Convert the contents of a XML config file
into the corresponding dictionary ::
dictionary[key_1] = value_1
dictionary[key_2] = value_2
...
dictionary[key_n] = value_n
:param bytes contents: the XML configuration contents
:param bool parse_job: if ``True``, parse the job properties;
if ``False``, parse the tasks properties
:rtype: dict (``parse_job=True``) or list of dict (``parse_job=False``)
"""
from lxml import etree
try:
root = etree.fromstring(contents)
pairs = []
if parse_job:
# parse job
for elem in root:
if (elem.tag != gc.CONFIG_XML_TASKS_TAG) and (elem.text is not None):
pairs.append(u"%s%s%s" % (
safe_unicode(elem.tag),
gc.CONFIG_STRING_ASSIGNMENT_SYMBOL,
safe_unicode(elem.text.strip())
))
return pairs_to_dict(pairs)
else:
# parse tasks
output_list = []
for task in root.find(gc.CONFIG_XML_TASKS_TAG):
if task.tag == gc.CONFIG_XML_TASK_TAG:
pairs = []
for elem in task:
if elem.text is not None:
pairs.append(u"%s%s%s" % (
safe_unicode(elem.tag),
gc.CONFIG_STRING_ASSIGNMENT_SYMBOL,
safe_unicode(elem.text.strip())
))
output_list.append(pairs_to_dict(pairs))
return output_list
except:
if result is not None:
result.passed = False
result.add_error("An error occurred while parsing XML file")
if parse_job:
return {}
else:
return [] | Convert the contents of a XML config file
into the corresponding dictionary ::
dictionary[key_1] = value_1
dictionary[key_2] = value_2
...
dictionary[key_n] = value_n
:param bytes contents: the XML configuration contents
:param bool parse_job: if ``True``, parse the job properties;
if ``False``, parse the tasks properties
:rtype: dict (``parse_job=True``) or list of dict (``parse_job=False``) |
def authorized_purchase_object(self, oid, price, huid):
"""Does delegated (pre-authorized) purchase of `oid` in the name of
`huid`, at price `price` (vingd transferred from `huid` to consumer's
acc).
:raises GeneralException:
:resource: ``objects/<oid>/purchases``
:access: authorized users with ACL flag ``purchase.object.authorize`` +
delegate permission required for the requester to charge the
user: ``purchase.object``
"""
return self.request(
'post',
safeformat('objects/{:int}/purchases', oid),
json.dumps({
'price': price,
'huid': huid,
'autocommit': True
})) | Does delegated (pre-authorized) purchase of `oid` in the name of
`huid`, at price `price` (vingd transferred from `huid` to consumer's
acc).
:raises GeneralException:
:resource: ``objects/<oid>/purchases``
:access: authorized users with ACL flag ``purchase.object.authorize`` +
delegate permission required for the requester to charge the
user: ``purchase.object`` |
def move_editorstack_data(self, start, end):
"""Reorder editorstack.data so it is synchronized with the tab bar when
tabs are moved."""
if start < 0 or end < 0:
return
else:
steps = abs(end - start)
direction = (end-start) // steps # +1 for right, -1 for left
data = self.data
self.blockSignals(True)
for i in range(start, end, direction):
data[i], data[i+direction] = data[i+direction], data[i]
self.blockSignals(False)
self.refresh() | Reorder editorstack.data so it is synchronized with the tab bar when
tabs are moved. |
def groups_setPurpose(self, *, channel: str, purpose: str, **kwargs) -> SlackResponse:
"""Sets the purpose for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("groups.setPurpose", json=kwargs) | Sets the purpose for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose' |
def optimize(function, x0, cons=[], ftol=0.2, disp=0, plot=False):
"""
**Optimization method based on Brent's method**
First, a bracket (a b c) is sought that contains the minimum (b value is
smaller than both a or c).
The bracket is then recursively halfed. Here we apply some modifications
to ensure our suggested point is not too close to either a or c,
because that could be problematic with the local approximation.
Also, if the bracket does not seem to include the minimum,
it is expanded generously in the right direction until it covers it.
Thus, this function is fail safe, and will always find a local minimum.
"""
if disp > 0:
print
print ' ===== custom 1d optimization routine ==== '
print
print 'initial suggestion on', function, ':', x0
points = []
values = []
def recordfunction(x):
v = function(x)
points.append(x)
values.append(v)
return v
(a, b, c), (va, vb, vc) = seek_minimum_bracket(recordfunction, x0, cons=cons, ftol=ftol, disp=disp, plot=plot)
if disp > 0:
print '---------------------------------------------------'
print 'found useable minimum bracker after %d evaluations:' % len(points), (a, b, c), (va, vb, vc)
if disp > 2:
if plot:
plot_values(values, points, lastpoint=-1, ftol=ftol)
pause()
result = brent(recordfunction, a, b, c, va, vb, vc, cons=cons, ftol=ftol, disp=disp, plot=plot)
if disp > 0:
print '---------------------------------------------------'
print 'found minimum after %d evaluations:' % len(points), result
if disp > 1 or len(points) > 20:
if plot:
plot_values(values, points, lastpoint=-1, ftol=ftol)
if disp > 2:
pause()
if disp > 0:
print '---------------------------------------------------'
print
print ' ===== end of custom 1d optimization routine ==== '
print
global neval
neval += len(points)
return result | **Optimization method based on Brent's method**
First, a bracket (a b c) is sought that contains the minimum (b value is
smaller than both a or c).
The bracket is then recursively halfed. Here we apply some modifications
to ensure our suggested point is not too close to either a or c,
because that could be problematic with the local approximation.
Also, if the bracket does not seem to include the minimum,
it is expanded generously in the right direction until it covers it.
Thus, this function is fail safe, and will always find a local minimum. |
def __process_equalities(self, equalities, momentequalities):
"""Generate localizing matrices
Arguments:
equalities -- list of equality constraints
equalities -- list of moment equality constraints
"""
monomial_sets = []
n_rows = 0
le = 0
if equalities is not None:
for equality in equalities:
le += 1
# Find the order of the localizing matrix
if equality.is_Relational:
equality = convert_relational(equality)
eq_order = ncdegree(equality)
if eq_order > 2 * self.level:
raise Exception("An equality constraint has degree %d. "
"Choose a higher level of relaxation."
% eq_order)
localization_order = (2 * self.level - eq_order)//2
index = find_variable_set(self.variables, equality)
localizing_monomials = \
pick_monomials_up_to_degree(self.monomial_sets[index],
localization_order)
if len(localizing_monomials) == 0:
localizing_monomials = [S.One]
localizing_monomials = unique(localizing_monomials)
monomial_sets.append(localizing_monomials)
n_rows += len(localizing_monomials) * \
(len(localizing_monomials) + 1) // 2
if momentequalities is not None:
for _ in momentequalities:
le += 1
monomial_sets.append([S.One])
n_rows += 1
A = np.zeros((n_rows, self.n_vars + 1), dtype=self.F.dtype)
n_rows = 0
if self._parallel:
pool = Pool()
for i, equality in enumerate(flatten([equalities, momentequalities])):
func = partial(moment_of_entry, monomials=monomial_sets[i],
ineq=equality, substitutions=self.substitutions)
lm = len(monomial_sets[i])
if self._parallel and lm > 1:
chunksize = max(int(np.sqrt(lm*lm/2) /
cpu_count()), 1)
iter_ = pool.map(func, ([row, column] for row in range(lm)
for column in range(row, lm)),
chunksize)
else:
iter_ = imap(func, ([row, column] for row in range(lm)
for column in range(row, lm)))
# Process M_y(gy)(u,w) entries
for row, column, polynomial in iter_:
# Calculate the moments of polynomial entries
if isinstance(polynomial, str):
self.__parse_expression(equality, -1, A[n_rows])
else:
A[n_rows] = self._get_facvar(polynomial)
n_rows += 1
if self.verbose > 0:
sys.stdout.write("\r\x1b[KProcessing %d/%d equalities..." %
(i+1, le))
sys.stdout.flush()
if self._parallel:
pool.close()
pool.join()
if self.verbose > 0:
sys.stdout.write("\n")
return A | Generate localizing matrices
Arguments:
equalities -- list of equality constraints
equalities -- list of moment equality constraints |
def create_topology(self, topologyName, topology):
""" crate topology """
if not topology or not topology.IsInitialized():
raise_(StateException("Topology protobuf not init properly",
StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])
path = self.get_topology_path(topologyName)
LOG.info("Adding topology: {0} to path: {1}".format(
topologyName, path))
topologyString = topology.SerializeToString()
try:
self.client.create(path, value=topologyString, makepath=True)
return True
except NoNodeError:
raise_(StateException("NoNodeError while creating topology",
StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])
except NodeExistsError:
raise_(StateException("NodeExistsError while creating topology",
StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2])
except ZookeeperError:
raise_(StateException("Zookeeper while creating topology",
StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])
except Exception:
# Just re raise the exception.
raise | crate topology |
def _collect_block_lines(self, msgs_store, node, msg_state):
"""Recursively walk (depth first) AST to collect block level options
line numbers.
"""
for child in node.get_children():
self._collect_block_lines(msgs_store, child, msg_state)
first = node.fromlineno
last = node.tolineno
# first child line number used to distinguish between disable
# which are the first child of scoped node with those defined later.
# For instance in the code below:
#
# 1. def meth8(self):
# 2. """test late disabling"""
# 3. # pylint: disable=E1102
# 4. print self.blip
# 5. # pylint: disable=E1101
# 6. print self.bla
#
# E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
#
# this is necessary to disable locally messages applying to class /
# function using their fromlineno
if (
isinstance(node, (nodes.Module, nodes.ClassDef, nodes.FunctionDef))
and node.body
):
firstchildlineno = node.body[0].fromlineno
else:
firstchildlineno = last
for msgid, lines in msg_state.items():
for lineno, state in list(lines.items()):
original_lineno = lineno
if first > lineno or last < lineno:
continue
# Set state for all lines for this block, if the
# warning is applied to nodes.
message_definitions = msgs_store.get_message_definitions(msgid)
for message_definition in message_definitions:
if message_definition.scope == WarningScope.NODE:
if lineno > firstchildlineno:
state = True
first_, last_ = node.block_range(lineno)
else:
first_ = lineno
last_ = last
for line in range(first_, last_ + 1):
# do not override existing entries
if line in self._module_msgs_state.get(msgid, ()):
continue
if line in lines: # state change in the same block
state = lines[line]
original_lineno = line
if not state:
self._suppression_mapping[(msgid, line)] = original_lineno
try:
self._module_msgs_state[msgid][line] = state
except KeyError:
self._module_msgs_state[msgid] = {line: state}
del lines[lineno] | Recursively walk (depth first) AST to collect block level options
line numbers. |
def api_version(self, v):
"""Set the api_version and associated configurations."""
self._api_version = v
if (self._api_version >= '2.0'):
self.default_quality = 'default'
self.allowed_qualities = ['default', 'color', 'bitonal', 'gray']
else: # versions 1.0 and 1.1
self.default_quality = 'native'
self.allowed_qualities = ['native', 'color', 'bitonal', 'grey'] | Set the api_version and associated configurations. |
def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for i, v in enumerate(tune_params.values()):
#create an evenly spaced linear space to map [0,1]-interval
#to actual values, giving each value an equal chance
#pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5*eps #use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v))
#snap value to nearest point in space, store index
idx = numpy.abs(linspace-x[i]).argmin()
#safeguard that should not be needed
idx = min(max(idx, 0), len(v)-1)
#use index into array of actual values
x_u[i] = v[idx]
return x_u | helper func that snaps a scaled variable to the nearest config |
def add_item(self, item, replace = False):
"""
Add an item to the roster.
This will not automatically update the roster on the server.
:Parameters:
- `item`: the item to add
- `replace`: if `True` then existing item will be replaced,
otherwise a `ValueError` will be raised on conflict
:Types:
- `item`: `RosterItem`
- `replace`: `bool`
"""
if item.jid in self._jids:
if replace:
self.remove_item(item.jid)
else:
raise ValueError("JID already in the roster")
index = len(self._items)
self._items.append(item)
self._jids[item.jid] = index | Add an item to the roster.
This will not automatically update the roster on the server.
:Parameters:
- `item`: the item to add
- `replace`: if `True` then existing item will be replaced,
otherwise a `ValueError` will be raised on conflict
:Types:
- `item`: `RosterItem`
- `replace`: `bool` |
def get_colors(n, cmap='viridis', start=0., stop=1., alpha=1., return_hex=False):
"""
Return n-length list of RGBa colors from the passed colormap name and alpha.
Parameters
----------
n : int
number of colors
cmap : string
name of a colormap
start : float
where to start in the colorspace
stop : float
where to end in the colorspace
alpha : float
opacity, the alpha channel for the RGBa colors
return_hex : bool
if True, convert RGBa colors to a hexadecimal string
Returns
-------
colors : list
"""
colors = [cm.get_cmap(cmap)(x) for x in np.linspace(start, stop, n)]
colors = [(r, g, b, alpha) for r, g, b, _ in colors]
if return_hex:
colors = rgb_color_list_to_hex(colors)
return colors | Return n-length list of RGBa colors from the passed colormap name and alpha.
Parameters
----------
n : int
number of colors
cmap : string
name of a colormap
start : float
where to start in the colorspace
stop : float
where to end in the colorspace
alpha : float
opacity, the alpha channel for the RGBa colors
return_hex : bool
if True, convert RGBa colors to a hexadecimal string
Returns
-------
colors : list |
def check_known_host(user=None, hostname=None, key=None, fingerprint=None,
config=None, port=None, fingerprint_hash_type=None):
'''
Check the record in known_hosts file, either by its value or by fingerprint
(it's enough to set up either key or fingerprint, you don't need to set up
both).
If provided key or fingerprint doesn't match with stored value, return
"update", if no value is found for a given host, return "add", otherwise
return "exists".
If neither key, nor fingerprint is defined, then additional validation is
not performed.
CLI Example:
.. code-block:: bash
salt '*' ssh.check_known_host <user> <hostname> key='AAAA...FAaQ=='
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
if not user:
config = config or '/etc/ssh/ssh_known_hosts'
else:
config = config or '.ssh/known_hosts'
known_host_entries = get_known_host_entries(user,
hostname,
config=config,
port=port,
fingerprint_hash_type=fingerprint_hash_type)
known_keys = [h['key'] for h in known_host_entries] if known_host_entries else []
known_fingerprints = [h['fingerprint'] for h in known_host_entries] if known_host_entries else []
if not known_host_entries:
return 'add'
if key:
return 'exists' if key in known_keys else 'update'
elif fingerprint:
return ('exists' if fingerprint in known_fingerprints
else 'update')
else:
return 'exists' | Check the record in known_hosts file, either by its value or by fingerprint
(it's enough to set up either key or fingerprint, you don't need to set up
both).
If provided key or fingerprint doesn't match with stored value, return
"update", if no value is found for a given host, return "add", otherwise
return "exists".
If neither key, nor fingerprint is defined, then additional validation is
not performed.
CLI Example:
.. code-block:: bash
salt '*' ssh.check_known_host <user> <hostname> key='AAAA...FAaQ==' |
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs) | A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior. |
def get_es_label(obj, def_obj):
"""
Returns object with label for an object that goes into the elacticsearch
'label' field
args:
obj: data object to update
def_obj: the class instance that has defintion values
"""
label_flds = LABEL_FIELDS
if def_obj.es_defs.get('kds_esLabel'):
label_flds = def_obj.es_defs['kds_esLabel'] + LABEL_FIELDS
try:
for label in label_flds:
if def_obj.cls_defs.get(label):
obj['label'] = def_obj.cls_defs[label][0]
break
if not obj.get('label'):
obj['label'] = def_obj.__class__.__name__.split("_")[-1]
except AttributeError:
# an attribute error is caused when the class is only
# an instance of the BaseRdfClass. We will search the rdf_type
# property and construct a label from rdf_type value
if def_obj.get('rdf_type'):
obj['label'] = def_obj['rdf_type'][-1].value[-1]
else:
obj['label'] = "no_label"
return obj | Returns object with label for an object that goes into the elacticsearch
'label' field
args:
obj: data object to update
def_obj: the class instance that has defintion values |
def p_param_def_type(p):
""" param_def : ID typedef
"""
if p[2] is not None:
api.check.check_type_is_explicit(p.lineno(1), p[1], p[2])
p[0] = make_param_decl(p[1], p.lineno(1), p[2]) | param_def : ID typedef |
def save_stream(self, key, binary=False):
"""
Return a managed file-like object into which the calling code can write
arbitrary data.
:param key:
:return: A managed stream-like object
"""
s = io.BytesIO() if binary else io.StringIO()
yield s
self.save_value(key, s.getvalue()) | Return a managed file-like object into which the calling code can write
arbitrary data.
:param key:
:return: A managed stream-like object |
def get_placeholders(self, format_string):
"""
Parses the format_string and returns a set of placeholders.
"""
placeholders = set()
# Tokenize the format string and process them
for token in self.tokens(format_string):
if token.group("placeholder"):
placeholders.add(token.group("key"))
elif token.group("command"):
# get any placeholders used in commands
commands = dict(parse_qsl(token.group("command")))
# placeholders only used in `if`
if_ = commands.get("if")
if if_:
placeholders.add(Condition(if_).variable)
return placeholders | Parses the format_string and returns a set of placeholders. |
def reset(self, value=None):
"""
Resets the start time of the interval to now or the specified value.
"""
if value is None:
value = time.clock()
self.start = value
if self.value_on_reset:
self.value = self.value_on_reset | Resets the start time of the interval to now or the specified value. |
def featurewise_norm(x, mean=None, std=None, epsilon=1e-7):
"""Normalize every pixels by the same given mean and std, which are usually
compute from all examples.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
mean : float
Value for subtraction.
std : float
Value for division.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
"""
if mean:
x = x - mean
if std:
x = x / (std + epsilon)
return x | Normalize every pixels by the same given mean and std, which are usually
compute from all examples.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
mean : float
Value for subtraction.
std : float
Value for division.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image. |
def _getStrippedValue(value, strip):
"""Like the strip() string method, except the strip argument describes
different behavior:
If strip is None, whitespace is stripped.
If strip is a string, the characters in the string are stripped.
If strip is False, nothing is stripped."""
if strip is None:
value = value.strip() # Call strip() with no arguments to strip whitespace.
elif isinstance(strip, str):
value = value.strip(strip) # Call strip(), passing the strip argument.
elif strip is False:
pass # Don't strip anything.
return value | Like the strip() string method, except the strip argument describes
different behavior:
If strip is None, whitespace is stripped.
If strip is a string, the characters in the string are stripped.
If strip is False, nothing is stripped. |
def clean_helper(B, obj, clean_func):
"""
Clean object, intercepting and collecting any missing-relation or
unique-constraint errors and returning the relevant resource ids/fields.
Returns:
- tuple: (<dict of non-unique fields>, <dict of missing refs>)
"""
try:
clean_func(obj)
except B.validation_error() as e:
# _debug.log_validation_errors(B, e, obj, k)
# Check if it's a uniqueness or missing relation error
fields = B.detect_uniqueness_error(e)
missing = B.detect_missing_relations(obj, e)
return fields, missing
return (None, None) | Clean object, intercepting and collecting any missing-relation or
unique-constraint errors and returning the relevant resource ids/fields.
Returns:
- tuple: (<dict of non-unique fields>, <dict of missing refs>) |
def _generate_event_listener_caller(executables: List[str]) -> LockEventListener:
"""
TODO
:param executables:
:return:
"""
def event_listener_caller(key: str):
for executable in executables:
try:
process = subprocess.Popen([executable, key], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output, stderr = process.communicate()
if len(stderr) > 0:
logger.info(f"stderr from executing \"{executable}\": {stderr.decode('utf-8').strip()}")
if process.returncode != 0:
logger.error(f"Error when executing \"{executable}\": return code was {process.returncode}")
# Not falling over if event listener does!
except OSError as e:
common_error_string = f"Could not execute \"{executable}\":"
if e.errno == errno.ENOEXEC:
logger.warning(f"{common_error_string} {e} (perhaps the executable needs a shebang?)")
else:
logger.warning(f"{common_error_string} {e}")
return event_listener_caller | TODO
:param executables:
:return: |
def skip(self, content):
"""
Get whether to skip this I{content}.
Should be skipped when the content is optional and value is either None
or an empty list.
@param content: Content to skip.
@type content: L{Object}
@return: True if content is to be skipped.
@rtype: bool
"""
if self.optional(content):
v = content.value
if v is None:
return True
if isinstance(v, (list, tuple)) and not v:
return True
return False | Get whether to skip this I{content}.
Should be skipped when the content is optional and value is either None
or an empty list.
@param content: Content to skip.
@type content: L{Object}
@return: True if content is to be skipped.
@rtype: bool |
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with tf.name_scope(name):
n = tf.convert_to_tensor(value=n, name="n")
counts = tf.convert_to_tensor(value=counts, name="counts")
total_permutations = tf.math.lgamma(n + 1)
counts_factorial = tf.math.lgamma(counts + 1)
redundant_permutations = tf.reduce_sum(
input_tensor=counts_factorial, axis=[-1])
return total_permutations - redundant_permutations | Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`. |
def report_error(title=None, data={}, caught=None, is_fatal=False):
"""Format a crash report and send it somewhere relevant. There are two
types of crashes: fatal crashes (backend errors) or non-fatal ones (just
reporting a glitch, but the api call did not fail)"""
# Don't report errors if NO_ERROR_REPORTING set to 1 (set by run_acceptance_tests)
if os.environ.get('DO_REPORT_ERROR', None):
# Force error reporting
pass
elif os.environ.get('NO_ERROR_REPORTING', '') == '1':
log.info("NO_ERROR_REPORTING is set: not reporting error!")
return
elif 'is_ec2_instance' in data:
if not data['is_ec2_instance']:
# Not running on amazon: no reporting
log.info("DATA[is_ec2_instance] is False: not reporting error!")
return
elif not is_ec2_instance():
log.info("Not running on an EC2 instance: not reporting error!")
return
# Fill error report with tons of usefull data
if 'user' not in data:
populate_error_report(data)
# Add the message
data['title'] = title
data['is_fatal_error'] = is_fatal
# Add the error caught, if any:
if caught:
data['error_caught'] = "%s" % caught
# Add a trace - Formatting traceback may raise a UnicodeDecodeError...
data['stack'] = []
try:
data['stack'] = [l for l in traceback.format_stack()]
except Exception:
data['stack'] = 'Skipped trace - contained non-ascii chars'
# inspect may raise a UnicodeDecodeError...
fname = ''
try:
fname = inspect.stack()[1][3]
except Exception as e:
fname = 'unknown-method'
# Format the error's title
status, code = 'unknown_status', 'unknown_error_code'
if 'response' in data:
status = data['response'].get('status', status)
code = data['response'].get('error_code', code)
title_details = "%s %s %s" % (ApiPool().current_server_name, status, code)
else:
title_details = "%s %s()" % (ApiPool().current_server_name, fname)
if is_fatal:
title_details = 'FATAL ERROR %s' % title_details
else:
title_details = 'NON-FATAL ERROR %s' % title_details
if title:
title = "%s: %s" % (title_details, title)
else:
title = title_details
global error_reporter
log.info("Reporting crash...")
try:
error_reporter(title, json.dumps(data, sort_keys=True, indent=4))
except Exception as e:
# Don't block on replying to api caller
log.error("Failed to send email report: %s" % str(e)) | Format a crash report and send it somewhere relevant. There are two
types of crashes: fatal crashes (backend errors) or non-fatal ones (just
reporting a glitch, but the api call did not fail) |
def get_response_handler(self):
"""Return the Endpoints defined :attr:`Endpoint.response_handler`.
:returns: A instance of the Endpoint specified :class:`ResonseHandler`.
:rtype: :class:`ResponseHandler`
"""
assert self.response_handler is not None, \
'Please define a response_handler ' \
' for Endpoint: %s' % self.__class__.__name__
return self.response_handler(self, **self.get_response_handler_params()) | Return the Endpoints defined :attr:`Endpoint.response_handler`.
:returns: A instance of the Endpoint specified :class:`ResonseHandler`.
:rtype: :class:`ResponseHandler` |
def load_HEP_data(
ROOT_filename = "output.root",
tree_name = "nominal",
maximum_number_of_events = None
):
"""
Load HEP data and return dataset.
"""
ROOT_file = open_ROOT_file(ROOT_filename)
tree = ROOT_file.Get(tree_name)
number_of_events = tree.GetEntries()
data = datavision.Dataset()
progress = shijian.Progress()
progress.engage_quick_calculation_mode()
# counters
number_of_events_loaded = 0
log.info("")
index = 0
for event in tree:
if maximum_number_of_events is not None and\
number_of_events_loaded >= int(maximum_number_of_events):
log.info(
"loaded maximum requested number of events " +
"({maximum_number_of_events})\r".format(
maximum_number_of_events = maximum_number_of_events
)
)
break
print progress.add_datum(fraction = (index + 2) / number_of_events),
if select_event(event):
index += 1
#event.GetReadEntry()
#data.variable(index = index, name = "eventNumber", value = event.eventNumber)
data.variable(index = index, name = "el_1_pt", value = event.el_pt[0])
#data.variable(index = index, name = "el_1_eta", value = event.el_eta[0])
#data.variable(index = index, name = "el_1_phi", value = event.el_phi[0])
##data.variable(index = index, name = "jet_1_pt", value = event.jet_pt[0])
#data.variable(index = index, name = "jet_1_eta", value = event.jet_eta[0])
#data.variable(index = index, name = "jet_1_phi", value = event.jet_phi[0])
##data.variable(index = index, name = "jet_1_e", value = event.jet_e[0])
##data.variable(index = index, name = "jet_2_pt", value = event.jet_pt[1])
#data.variable(index = index, name = "jet_2_eta", value = event.jet_eta[1])
#data.variable(index = index, name = "jet_2_phi", value = event.jet_phi[1])
##data.variable(index = index, name = "jet_2_e", value = event.jet_e[1])
#data.variable(index = index, name = "nJets", value = event.nJets)
##data.variable(index = index, name = "nBTags", value = event.nBTags)
##data.variable(index = index, name = "nLjets", value = event.nLjets)
##data.variable(index = index, name = "ljet_1_m", value = event.ljet_m[0])
#data.variable(index = index, name = "met", value = event.met_met)
#data.variable(index = index, name = "met_phi", value = event.met_phi)
#data.variable(index = index, name = "Centrality_all", value = event.Centrality_all)
#data.variable(index = index, name = "Mbb_MindR", value = event.Mbb_MindR)
#data.variable(index = index, name = "ljet_tau21", value = event.ljet_tau21),
#data.variable(index = index, name = "ljet_tau32", value = event.ljet_tau32),
#data.variable(index = index, name = "Aplan_bjets", value = event.Aplan_bjets),
#data.variable(index = index, name = "H4_all", value = event.H4_all),
#data.variable(index = index, name = "NBFricoNN_6jin4bin", value = event.NBFricoNN_6jin4bin),
#data.variable(index = index, name = "NBFricoNN_6jin3bex", value = event.NBFricoNN_6jin3bex),
#data.variable(index = index, name = "NBFricoNN_5jex4bin", value = event.NBFricoNN_5jex4bin),
#data.variable(index = index, name = "NBFricoNN_3jex3bex", value = event.NBFricoNN_3jex3bex),
#data.variable(index = index, name = "NBFricoNN_4jin3bex", value = event.NBFricoNN_4jin3bex),
#data.variable(index = index, name = "NBFricoNN_4jin4bin", value = event.NBFricoNN_4jin4bin)
number_of_events_loaded += 1
log.info("")
return data | Load HEP data and return dataset. |
def refresh_ip(self, si, logger, session, vcenter_data_model, resource_model, cancellation_context,
app_request_json):
"""
Refreshes IP address of virtual machine and updates Address property on the resource
:param vim.ServiceInstance si: py_vmomi service instance
:param logger:
:param vCenterShell.driver.SecureCloudShellApiSession session: cloudshell session
:param GenericDeployedAppResourceModel resource_model: UUID of Virtual Machine
:param VMwarevCenterResourceModel vcenter_data_model: the vcenter data model attributes
:param cancellation_context:
"""
self._do_not_run_on_static_vm(app_request_json=app_request_json)
default_network = VMLocation.combine(
[vcenter_data_model.default_datacenter, vcenter_data_model.holding_network])
match_function = self.ip_manager.get_ip_match_function(
self._get_ip_refresh_ip_regex(resource_model.vm_custom_params))
timeout = self._get_ip_refresh_timeout(resource_model.vm_custom_params)
vm = self.pyvmomi_service.find_by_uuid(si, resource_model.vm_uuid)
ip_res = self.ip_manager.get_ip(vm, default_network, match_function, cancellation_context, timeout, logger)
if ip_res.reason == IpReason.Timeout:
raise ValueError('IP address of VM \'{0}\' could not be obtained during {1} seconds'
.format(resource_model.fullname, timeout))
if ip_res.reason == IpReason.Success:
self._update_resource_address_with_retry(session=session,
resource_name=resource_model.fullname,
ip_address=ip_res.ip_address)
return ip_res.ip_address | Refreshes IP address of virtual machine and updates Address property on the resource
:param vim.ServiceInstance si: py_vmomi service instance
:param logger:
:param vCenterShell.driver.SecureCloudShellApiSession session: cloudshell session
:param GenericDeployedAppResourceModel resource_model: UUID of Virtual Machine
:param VMwarevCenterResourceModel vcenter_data_model: the vcenter data model attributes
:param cancellation_context: |
def _npy_num2fits(d, table_type='binary', write_bitcols=False):
"""
d is the full element from the descr
For vector,array columns the form is the total counts
followed by the code.
For array columns with dimension greater than 1, the dim is set to
(dim1, dim2, ...)
So it is treated like an extra dimension
"""
dim = None
name = d[0]
npy_dtype = d[1][1:]
if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
raise ValueError("got S or U type: use _npy_string2fits")
if npy_dtype not in _table_npy2fits_form:
raise ValueError("unsupported type '%s'" % npy_dtype)
if table_type == 'binary':
form = _table_npy2fits_form[npy_dtype]
else:
form = _table_npy2fits_form_ascii[npy_dtype]
# now the dimensions
if len(d) > 2:
if table_type == 'ascii':
raise ValueError(
"Ascii table columns must be scalar, got %s" % str(d))
if write_bitcols and npy_dtype == 'b1':
# multi-dimensional boolean
form = 'X'
# Note, depending on numpy version, even 1-d can be a tuple
if isinstance(d[2], tuple):
count = reduce(lambda x, y: x*y, d[2])
form = '%d%s' % (count, form)
if len(d[2]) > 1:
# this is multi-dimensional array column. the form
# should be total elements followed by A
dim = list(reversed(d[2]))
dim = [str(e) for e in dim]
dim = '(' + ','.join(dim)+')'
else:
# this is a vector (1d array) column
count = d[2]
form = '%d%s' % (count, form)
return name, form, dim | d is the full element from the descr
For vector,array columns the form is the total counts
followed by the code.
For array columns with dimension greater than 1, the dim is set to
(dim1, dim2, ...)
So it is treated like an extra dimension |
def hwstatus_send(self, Vcc, I2Cerr, force_mavlink1=False):
'''
Status of key hardware
Vcc : board voltage (mV) (uint16_t)
I2Cerr : I2C error count (uint8_t)
'''
return self.send(self.hwstatus_encode(Vcc, I2Cerr), force_mavlink1=force_mavlink1) | Status of key hardware
Vcc : board voltage (mV) (uint16_t)
I2Cerr : I2C error count (uint8_t) |
def delta(x_i, j, s, N):
""" delta_i_j_s """
flag = j == EMMMixPLAggregator.c(x_i, s)
if flag and s < len(x_i):
return 1
elif s == N:
found_equal = False
for l in range(len(x_i)):
if j == EMMMixPLAggregator.c(x_i, l):
found_equal = True
break
if not found_equal:
return 1
return 0 | delta_i_j_s |
def setAnimation(self,obj,animation,transition=None,force=False):
"""
Sets the animation to be used by the object.
See :py:meth:`Actor.setAnimation()` for more information.
"""
self.ensureModelData(obj)
data = obj._modeldata
# Validity check
if animation not in self.modeldata["animations"]:
raise ValueError("There is no animation of name '%s' for model '%s'"%(animation,self.modelname))
if data.get("_anidata",{}).get("anitype",None)==animation and not force:
return # animation is already running
# Cache the obj to improve readability
anim = self.modeldata["animations"][animation]
# Set to default if not set
if transition is None:
transition = anim.default_jt
# Notify the animation to allow it to initialize itself
anim.startAnimation(data,transition)
# initialize animation data
if "_anidata" not in data:
data["_anidata"]={}
adata = data["_anidata"]
adata["anitype"]=animation
if "_schedfunc" in adata:
# unschedule the old animation, if any
# prevents clashing and crashes
pyglet.clock.unschedule(adata["_schedfunc"])
# Schedule the animation function
def schedfunc(*args):
# This function is defined locally to create a closure
# The closure stores the local variables, e.g. anim and data even after the parent function has finished
# Note that this may also prevent the garbage collection of any objects defined in the parent scope
anim.tickEntity(data)
# register the function to pyglet
pyglet.clock.schedule_interval(schedfunc,1./(anim.kps if anim.atype=="keyframes" else 60))
# save it for later for de-initialization
adata["_schedfunc"] = schedfunc | Sets the animation to be used by the object.
See :py:meth:`Actor.setAnimation()` for more information. |
def get_detail_view(self, request, object, opts=None):
"""
Instantiates and returns the view class that will generate the actual
context for this plugin.
"""
view = self.get_view(request, self.view_class, opts)
view.object = object
return view | Instantiates and returns the view class that will generate the actual
context for this plugin. |
def coef_(self):
"""
Coefficients property
.. note:: Coefficients are defined only for linear learners
Coefficients are only defined when the linear model is chosen as base
learner (`booster=gblinear`). It is not defined for other base learner types, such
as tree learners (`booster=gbtree`).
Returns
-------
coef_ : array of shape ``[n_features]`` or ``[n_classes, n_features]``
"""
if getattr(self, 'booster', None) is not None and self.booster != 'gblinear':
raise AttributeError('Coefficients are not defined for Booster type {}'
.format(self.booster))
b = self.get_booster()
coef = np.array(json.loads(b.get_dump(dump_format='json')[0])['weight'])
# Logic for multiclass classification
n_classes = getattr(self, 'n_classes_', None)
if n_classes is not None:
if n_classes > 2:
assert len(coef.shape) == 1
assert coef.shape[0] % n_classes == 0
coef = coef.reshape((n_classes, -1))
return coef | Coefficients property
.. note:: Coefficients are defined only for linear learners
Coefficients are only defined when the linear model is chosen as base
learner (`booster=gblinear`). It is not defined for other base learner types, such
as tree learners (`booster=gbtree`).
Returns
-------
coef_ : array of shape ``[n_features]`` or ``[n_classes, n_features]`` |
def url_for(self, endpoint, explicit=False, **items):
'''Returns a valid XBMC plugin URL for the given endpoint name.
endpoint can be the literal name of a function, or it can
correspond to the name keyword arguments passed to the route
decorator.
Currently, view names must be unique across all plugins and
modules. There are not namespace prefixes for modules.
'''
# TODO: Enable items to be passed with keywords of other var names
# such as endpoing and explicit
# TODO: Figure out how to handle the case where a module wants to
# call a parent plugin view.
if not explicit and not endpoint.startswith(self._namespace):
endpoint = '%s.%s' % (self._namespace, endpoint)
return self._plugin.url_for(endpoint, **items) | Returns a valid XBMC plugin URL for the given endpoint name.
endpoint can be the literal name of a function, or it can
correspond to the name keyword arguments passed to the route
decorator.
Currently, view names must be unique across all plugins and
modules. There are not namespace prefixes for modules. |
def matchToString(dnaMatch, read1, read2, matchAmbiguous=True, indent='',
offsets=None):
"""
Format a DNA match as a string.
@param dnaMatch: A C{dict} returned by C{compareDNAReads}.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param matchAmbiguous: If C{True}, ambiguous nucleotides that are
possibly correct were counted as actually being correct. Otherwise,
the match was done strictly, insisting that only non-ambiguous
nucleotides could contribute to the matching nucleotide count.
@param indent: A C{str} to indent all returned lines with.
@param offsets: If not C{None}, a C{set} of offsets of interest that were
only considered when making C{match}.
@return: A C{str} describing the match.
"""
match = dnaMatch['match']
identicalMatchCount = match['identicalMatchCount']
ambiguousMatchCount = match['ambiguousMatchCount']
gapMismatchCount = match['gapMismatchCount']
gapGapMismatchCount = match['gapGapMismatchCount']
nonGapMismatchCount = match['nonGapMismatchCount']
if offsets:
len1 = len2 = len(offsets)
else:
len1, len2 = map(len, (read1, read2))
result = []
append = result.append
append(countPrint('%sExact matches' % indent, identicalMatchCount,
len1, len2))
append(countPrint('%sAmbiguous matches' % indent, ambiguousMatchCount,
len1, len2))
if ambiguousMatchCount and identicalMatchCount:
anyMatchCount = identicalMatchCount + ambiguousMatchCount
append(countPrint('%sExact or ambiguous matches' % indent,
anyMatchCount, len1, len2))
mismatchCount = (gapMismatchCount + gapGapMismatchCount +
nonGapMismatchCount)
append(countPrint('%sMismatches' % indent, mismatchCount, len1, len2))
conflicts = 'conflicts' if matchAmbiguous else 'conflicts or ambiguities'
append(countPrint('%s Not involving gaps (i.e., %s)' % (indent,
conflicts), nonGapMismatchCount, len1, len2))
append(countPrint('%s Involving a gap in one sequence' % indent,
gapMismatchCount, len1, len2))
append(countPrint('%s Involving a gap in both sequences' % indent,
gapGapMismatchCount, len1, len2))
for read, key in zip((read1, read2), ('read1', 'read2')):
append('%s Id: %s' % (indent, read.id))
length = len(read)
append('%s Length: %d' % (indent, length))
gapCount = len(dnaMatch[key]['gapOffsets'])
append(countPrint('%s Gaps' % indent, gapCount, length))
if gapCount:
append(
'%s Gap locations (1-based): %s' %
(indent,
', '.join(map(lambda offset: str(offset + 1),
sorted(dnaMatch[key]['gapOffsets'])))))
ambiguousCount = len(dnaMatch[key]['ambiguousOffsets'])
append(countPrint('%s Ambiguous' % indent, ambiguousCount, length))
extraCount = dnaMatch[key]['extraCount']
if extraCount:
append(countPrint('%s Extra nucleotides at end' % indent,
extraCount, length))
return '\n'.join(result) | Format a DNA match as a string.
@param dnaMatch: A C{dict} returned by C{compareDNAReads}.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param matchAmbiguous: If C{True}, ambiguous nucleotides that are
possibly correct were counted as actually being correct. Otherwise,
the match was done strictly, insisting that only non-ambiguous
nucleotides could contribute to the matching nucleotide count.
@param indent: A C{str} to indent all returned lines with.
@param offsets: If not C{None}, a C{set} of offsets of interest that were
only considered when making C{match}.
@return: A C{str} describing the match. |
def configure(self, component, all_dependencies):
''' Ensure all config-time files have been generated. Return a
dictionary of generated items.
'''
r = {}
builddir = self.buildroot
# only dependencies which are actually valid can contribute to the
# config data (which includes the versions of all dependencies in its
# build info) if the dependencies aren't available we can't tell what
# version they are. Anything missing here should always be a test
# dependency that isn't going to be used, otherwise the yotta build
# command will fail before we get here
available_dependencies = OrderedDict((k, v) for k, v in all_dependencies.items() if v)
self.set_toplevel_definitions = ''
if self.build_info_include_file is None:
self.build_info_include_file, build_info_definitions = self.getBuildInfo(component.path, builddir)
self.set_toplevel_definitions += build_info_definitions
if self.config_include_file is None:
self.config_include_file, config_definitions, self.config_json_file = self._getConfigData(available_dependencies, component, builddir, self.build_info_include_file)
self.set_toplevel_definitions += config_definitions
self.configured = True
return {
'merged_config_include': self.config_include_file,
'merged_config_json': self.config_json_file,
'build_info_include': self.build_info_include_file
} | Ensure all config-time files have been generated. Return a
dictionary of generated items. |
def sort_by_list_order(sortlist, reflist, reverse=False, fltr=False,
slemap=None):
"""
Sort a list according to the order of entries in a reference list.
Parameters
----------
sortlist : list
List to be sorted
reflist : list
Reference list defining sorting order
reverse : bool, optional (default False)
Flag indicating whether to sort in reverse order
fltr : bool, optional (default False)
Flag indicating whether to filter `sortlist` to remove any entries
that are not in `reflist`
slemap : function or None, optional (default None)
Function mapping a sortlist entry to the form of an entry in
`reflist`
Returns
-------
sortedlist : list
Sorted (and possibly filtered) version of sortlist
"""
def keyfunc(entry):
if slemap is not None:
rle = slemap(entry)
if rle in reflist:
# Ordering index taken from reflist
return reflist.index(rle)
else:
# Ordering index taken from sortlist, offset
# by the length of reflist so that entries
# that are not in reflist retain their order
# in sortlist
return sortlist.index(entry) + len(reflist)
if fltr:
if slemap:
sortlist = filter(lambda x: slemap(x) in reflist, sortlist)
else:
sortlist = filter(lambda x: x in reflist, sortlist)
return sorted(sortlist, key=keyfunc, reverse=reverse) | Sort a list according to the order of entries in a reference list.
Parameters
----------
sortlist : list
List to be sorted
reflist : list
Reference list defining sorting order
reverse : bool, optional (default False)
Flag indicating whether to sort in reverse order
fltr : bool, optional (default False)
Flag indicating whether to filter `sortlist` to remove any entries
that are not in `reflist`
slemap : function or None, optional (default None)
Function mapping a sortlist entry to the form of an entry in
`reflist`
Returns
-------
sortedlist : list
Sorted (and possibly filtered) version of sortlist |
def load_modules(self, data=None, proxy=None):
'''
Load up the modules for remote compilation via ssh
'''
self.functions = self.wrapper
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
self.states = salt.loader.states(self.opts, locals_, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions) | Load up the modules for remote compilation via ssh |
def remove_instance(self):
"""
Remove the instance from the related fields (delete the field if it's
a simple one, or remove the instance from the field if it's a set/list/
sorted_set)
"""
with fields.FieldLock(self.related_field):
related_pks = self()
for pk in related_pks:
# get the real related field
related_instance = self.related_field._model(pk)
related_field = getattr(related_instance, self.related_field.name)
# check if we have a dedicated remove method
remover = getattr(related_field, '_related_remover', None)
# then remove the instance from the related field
if remover is not None:
# if we have a remover method, it wants the instance as argument
# (the related field may be a set/list/sorted_set)
getattr(related_field, remover)(self.instance._pk)
else:
# no remover method, simple delete the field
related_field.delete() | Remove the instance from the related fields (delete the field if it's
a simple one, or remove the instance from the field if it's a set/list/
sorted_set) |
def artifacts(self):
"""
Property for accessing :class:`ArtifactManager` instance, which is used to manage artifacts.
:rtype: yagocd.resources.artifact.ArtifactManager
"""
if self._artifact_manager is None:
self._artifact_manager = ArtifactManager(session=self._session)
return self._artifact_manager | Property for accessing :class:`ArtifactManager` instance, which is used to manage artifacts.
:rtype: yagocd.resources.artifact.ArtifactManager |
def update_points(self):
""" 椭圆的近似图形:72边形 """
n = max(8, min(72, int(2*sqrt(self.r_x+self.r_y))))
d = pi * 2 / n
x, y, r_x, r_y = self.x, self.y, self.r_x, self.r_y
ps = []
for i in range(n):
ps += [(x + r_x * sin(d * i)), (y + r_y * cos(d * i))]
self.points = tuple(ps) | 椭圆的近似图形:72边形 |
def _raise_if_null(self, other):
"""
:raises ValueError: if either self or other is a null Interval
"""
if self.is_null():
raise ValueError("Cannot compare null Intervals!")
if hasattr(other, 'is_null') and other.is_null():
raise ValueError("Cannot compare null Intervals!") | :raises ValueError: if either self or other is a null Interval |
def populate_initial_services():
"""
Populate a fresh installed Hypermap instances with basic services.
"""
services_list = (
(
'Harvard WorldMap',
'Harvard WorldMap open source web geospatial platform',
'Hypermap:WorldMap',
'http://worldmap.harvard.edu'
),
(
'NYPL MapWarper',
'The New York Public Library (NYPL) MapWarper web site',
'Hypermap:WARPER',
'http://maps.nypl.org/warper/maps'
),
(
'Map Warper',
'The MapWarper web site developed, hosted and maintained by Tim Waters',
'Hypermap:WARPER',
'http://mapwarper.net/maps'
),
(
'WorldMap Warp',
'The MapWarper instance part of the Harvard WorldMap project',
'Hypermap:WARPER',
'http://warp.worldmap.harvard.edu/maps'
),
(
'WFP GeoNode',
'World Food Programme GeoNode',
'OGC:WMS',
'http://geonode.wfp.org/geoserver/ows?'
),
(
'NASA EARTHDATA',
'NASA EARTHDATA, powered by EOSDIS',
'OGC:WMTS',
'http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml'
),
)
esri_endpoint = 'https://gis.ngdc.noaa.gov/arcgis/rest/services'
LOGGER.debug('*** Importing esri endpoint: %s' % esri_endpoint)
create_services_from_endpoint(esri_endpoint)
for service in services_list:
LOGGER.debug('*** Importing %s' % service[0])
service = Service(
title=service[0],
abstract=service[1],
type=service[2],
url=service[3]
)
service.save() | Populate a fresh installed Hypermap instances with basic services. |
def _request(self, req_type, url, **kwargs):
"""
Make a request via the `requests` module. If the result has an HTTP
error status, convert that to a Python exception.
"""
logger.debug('%s %s' % (req_type, url))
result = self.session.request(req_type, url, **kwargs)
try:
result.raise_for_status()
except requests.HTTPError:
error = result.text
try:
error = json.loads(error)
except ValueError:
pass
if result.status_code in (401, 403):
error_class = LuminosoAuthError
elif result.status_code in (400, 404, 405):
error_class = LuminosoClientError
elif result.status_code >= 500:
error_class = LuminosoServerError
else:
error_class = LuminosoError
raise error_class(error)
return result | Make a request via the `requests` module. If the result has an HTTP
error status, convert that to a Python exception. |
def auto_no_thousands(self):
"""Like self.auto but calculates the next unit if >999.99."""
if self._value >= 1000000000000:
return self.TiB, 'TiB'
if self._value >= 1000000000:
return self.GiB, 'GiB'
if self._value >= 1000000:
return self.MiB, 'MiB'
if self._value >= 1000:
return self.KiB, 'KiB'
else:
return self.B, 'B' | Like self.auto but calculates the next unit if >999.99. |
def activate(self, tourfile=None, minsize=10000, backuptour=True):
"""
Select contigs in the current partition. This is the setup phase of the
algorithm, and supports two modes:
- "de novo": This is useful at the start of a new run where no tours
available. We select the strong contigs that have significant number
of links to other contigs in the partition. We build a histogram of
link density (# links per bp) and remove the contigs that appear as
outliers. The orientations are derived from the matrix decomposition
of the pairwise strandedness matrix O.
- "hotstart": This is useful when there was a past run, with a given
tourfile. In this case, the active contig list and orientations are
derived from the last tour in the file.
"""
if tourfile and (not op.exists(tourfile)):
logging.debug("Tourfile `{}` not found".format(tourfile))
tourfile = None
if tourfile:
logging.debug("Importing tourfile `{}`".format(tourfile))
tour, tour_o = iter_last_tour(tourfile, self)
self.active = set(tour)
tig_to_idx = self.tig_to_idx
tour = [tig_to_idx[x] for x in tour]
signs = sorted([(x, FF[o]) for (x, o) in zip(tour, tour_o)])
_, signs = zip(*signs)
self.signs = np.array(signs, dtype=int)
if backuptour:
backup(tourfile)
tour = array.array('i', tour)
else:
self.report_active()
while True:
logdensities = self.calculate_densities()
lb, ub = outlier_cutoff(logdensities.values())
logging.debug("Log10(link_densities) ~ [{}, {}]"
.format(lb, ub))
remove = set(x for x, d in logdensities.items() if
(d < lb and self.tig_to_size[x] < minsize * 10))
if remove:
self.active -= remove
self.report_active()
else:
break
logging.debug("Remove contigs with size < {}".format(minsize))
self.active = set(x for x in self.active if
self.tig_to_size[x] >= minsize)
tour = range(self.N) # Use starting (random) order otherwise
tour = array.array('i', tour)
# Determine orientations
self.flip_all(tour)
self.report_active()
self.tour = tour
return tour | Select contigs in the current partition. This is the setup phase of the
algorithm, and supports two modes:
- "de novo": This is useful at the start of a new run where no tours
available. We select the strong contigs that have significant number
of links to other contigs in the partition. We build a histogram of
link density (# links per bp) and remove the contigs that appear as
outliers. The orientations are derived from the matrix decomposition
of the pairwise strandedness matrix O.
- "hotstart": This is useful when there was a past run, with a given
tourfile. In this case, the active contig list and orientations are
derived from the last tour in the file. |
def transformer_clean():
"""No dropout, label smoothing, max_length."""
hparams = transformer_base_v2()
hparams.label_smoothing = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.max_length = 0
return hparams | No dropout, label smoothing, max_length. |
def image_bytes(b, filename=None, inline=1, width='auto', height='auto',
preserve_aspect_ratio=None):
"""
Return a bytes string that displays image given by bytes b in the terminal
If filename=None, the filename defaults to "Unnamed file"
width and height are strings, following the format
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
'auto': The image's inherent size will be used to determine an appropriate
dimension.
preserve_aspect_ratio sets whether the aspect ratio of the image is
preserved. The default (None) is True unless both width and height are
set.
See https://www.iterm2.com/documentation-images.html
"""
if preserve_aspect_ratio is None:
if width != 'auto' and height != 'auto':
preserve_aspect_ratio = False
else:
preserve_aspect_ratio = True
data = {
'name': base64.b64encode((filename or 'Unnamed file').encode('utf-8')).decode('ascii'),
'inline': inline,
'size': len(b),
'base64_img': base64.b64encode(b).decode('ascii'),
'width': width,
'height': height,
'preserve_aspect_ratio': int(preserve_aspect_ratio),
}
# IMAGE_CODE is a string because bytes doesn't support formatting
return IMAGE_CODE.format(**data).encode('ascii') | Return a bytes string that displays image given by bytes b in the terminal
If filename=None, the filename defaults to "Unnamed file"
width and height are strings, following the format
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
'auto': The image's inherent size will be used to determine an appropriate
dimension.
preserve_aspect_ratio sets whether the aspect ratio of the image is
preserved. The default (None) is True unless both width and height are
set.
See https://www.iterm2.com/documentation-images.html |
def decrease_user_property(self, user_id, property_name, value=0, headers=None, endpoint_url=None):
"""
Decrease a user's property by a value.
:param str user_id: identified user's ID
:param str property_name: user property name to increase
:param number value: amount by which to decrease the property
:param dict headers: custom request headers (if isn't set default values are used)
:param str endpoint_url: where to send the request (if isn't set default value is used)
:return: Response
"""
endpoint_url = endpoint_url or self._endpoint_url
url = endpoint_url + "/users/" + user_id + "/properties/" + property_name + "/decrease/" + value.__str__()
headers = headers or self._default_headers(content_type="")
response = requests.post(url, headers=headers)
return response | Decrease a user's property by a value.
:param str user_id: identified user's ID
:param str property_name: user property name to increase
:param number value: amount by which to decrease the property
:param dict headers: custom request headers (if isn't set default values are used)
:param str endpoint_url: where to send the request (if isn't set default value is used)
:return: Response |
def to_dict(self, properties=None):
"""Return a dictionary containing Compound data. Optionally specify a list of the desired properties.
synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is
because they each require an extra request.
"""
if not properties:
skip = {'aids', 'sids', 'synonyms'}
properties = [p for p in dir(Compound) if isinstance(getattr(Compound, p), property) and p not in skip]
return {p: [i.to_dict() for i in getattr(self, p)] if p in {'atoms', 'bonds'} else getattr(self, p) for p in properties} | Return a dictionary containing Compound data. Optionally specify a list of the desired properties.
synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is
because they each require an extra request. |
def to_n_ref(self, fill=0, dtype='i1'):
"""Transform each genotype call into the number of
reference alleles.
Parameters
----------
fill : int, optional
Use this value to represent missing calls.
dtype : dtype, optional
Output dtype.
Returns
-------
out : ndarray, int8, shape (n_variants, n_samples)
Array of ref alleles per genotype call.
Notes
-----
By default this function returns 0 for missing genotype calls
**and** for homozygous non-reference genotype calls. Use the
`fill` argument to change how missing calls are represented.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> g.to_n_ref()
array([[2, 1],
[1, 0],
[0, 0]], dtype=int8)
>>> g.to_n_ref(fill=-1)
array([[ 2, 1],
[ 1, 0],
[ 0, -1]], dtype=int8)
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(3, 2) dtype=int64>
0/0 0/2 2/2
>>> v.to_n_ref()
array([2, 1, 0], dtype=int8)
"""
# count number of alternate alleles
out = np.empty(self.shape[:-1], dtype=dtype)
np.sum(self.values == 0, axis=-1, out=out)
# fill missing calls
if fill != 0:
m = self.is_missing()
out[m] = fill
# handle mask
if self.mask is not None:
out[self.mask] = fill
return out | Transform each genotype call into the number of
reference alleles.
Parameters
----------
fill : int, optional
Use this value to represent missing calls.
dtype : dtype, optional
Output dtype.
Returns
-------
out : ndarray, int8, shape (n_variants, n_samples)
Array of ref alleles per genotype call.
Notes
-----
By default this function returns 0 for missing genotype calls
**and** for homozygous non-reference genotype calls. Use the
`fill` argument to change how missing calls are represented.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> g.to_n_ref()
array([[2, 1],
[1, 0],
[0, 0]], dtype=int8)
>>> g.to_n_ref(fill=-1)
array([[ 2, 1],
[ 1, 0],
[ 0, -1]], dtype=int8)
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(3, 2) dtype=int64>
0/0 0/2 2/2
>>> v.to_n_ref()
array([2, 1, 0], dtype=int8) |
def remove_targets(self, type, kept=None):
'''Remove targets of certain type'''
if kept is None:
kept = [
i for i, x in enumerate(self._targets)
if not isinstance(x, type)
]
if len(kept) == len(self._targets):
return self
self._targets = [self._targets[x] for x in kept]
self._labels = [self._labels[x] for x in kept]
if not self._groups:
return self
index_map = {
o_idx: n_idx
for n_idx, o_idx in zip(range(len(self._targets)), kept)
}
kept = set(kept)
for idx, grp in enumerate(self._groups):
self._groups[idx] = _sos_group(
[index_map[x] for x in grp._indexes if x in kept],
[y for x, y in zip(grp._indexes, grp._labels) if x in kept
]).set(**grp._dict)
return self | Remove targets of certain type |
def tile_to_path(self, tile):
'''return full path to a tile'''
return os.path.join(self.cache_path, self.service, tile.path()) | return full path to a tile |
def create_poll(title, options, multi=True, permissive=True, captcha=False, dupcheck='normal'):
""" Create a strawpoll.
Example:
new_poll = strawpy.create_poll('Is Python the best?', ['Yes', 'No'])
:param title:
:param options:
:param multi:
:param permissive:
:param captcha:
:param dupcheck:
:return: strawpy.Strawpoll object
"""
query = {
'title': title,
'options': options,
'multi': multi,
'permissive': permissive,
'captcha': captcha,
'dupcheck': dupcheck
}
return StrawPoll(requests.post('http://strawpoll.me/api/v2/polls', data=json.dumps(query))) | Create a strawpoll.
Example:
new_poll = strawpy.create_poll('Is Python the best?', ['Yes', 'No'])
:param title:
:param options:
:param multi:
:param permissive:
:param captcha:
:param dupcheck:
:return: strawpy.Strawpoll object |
def generate_namelist_file(self, rapid_namelist_file):
"""
Generate rapid_namelist file.
Parameters
----------
rapid_namelist_file: str
Path of namelist file to generate from
parameters added to the RAPID manager.
"""
log("Generating RAPID namelist file ...",
"INFO")
try:
os.remove(rapid_namelist_file)
except OSError:
pass
with open(rapid_namelist_file, 'w') as new_file:
new_file.write('&NL_namelist\n')
for attr, value in sorted(list(self.__dict__.items())):
if not attr.startswith('_'):
if attr.startswith('BS'):
new_file.write("{0} = .{1}.\n"
.format(attr, str(value).lower()))
elif isinstance(value, int):
new_file.write("%s = %s\n" % (attr, value))
else:
if value:
if os.name == "nt":
# if windows generate file with cygpath
value = self._get_cygwin_path(value)
new_file.write("%s = \'%s\'\n" % (attr, value))
new_file.write("/\n") | Generate rapid_namelist file.
Parameters
----------
rapid_namelist_file: str
Path of namelist file to generate from
parameters added to the RAPID manager. |
def get_extract_method(path):
"""Returns `ExtractMethod` to use on resource at path. Cannot be None."""
info_path = _get_info_path(path)
info = _read_info(info_path)
fname = info.get('original_fname', path) if info else path
return _guess_extract_method(fname) | Returns `ExtractMethod` to use on resource at path. Cannot be None. |
def instantiate(config):
"""
instantiate all registered vodka applications
Args:
config (dict or MungeConfig): configuration object
"""
for handle, cfg in list(config["apps"].items()):
if not cfg.get("enabled", True):
continue
app = get_application(handle)
instances[app.handle] = app(cfg) | instantiate all registered vodka applications
Args:
config (dict or MungeConfig): configuration object |
def _get_programs_dict():
"""
Builds and returns programs dictionary
This will have to import the packages in COLLABORATORS_S in order to get their absolute path.
Returns:
dictionary: {"packagename": [ExeInfo0, ...], ...}
"packagename" examples: "f311.explorer", "numpy"
"""
global __programs_dict
if __programs_dict is not None:
return __programs_dict
d = __programs_dict = OrderedDict()
for pkgname in COLLABORATORS_S:
try:
package = importlib.import_module(pkgname)
except ImportError:
# I think it is better to be silent when a collaborator package is not installed
continue
path_ = os.path.join(os.path.split(package.__file__)[0], "scripts")
bulk = a99.get_exe_info(path_, flag_protected=True)
d[pkgname] = {"description": a99.get_obj_doc0(package), "exeinfo": bulk}
return __programs_dict | Builds and returns programs dictionary
This will have to import the packages in COLLABORATORS_S in order to get their absolute path.
Returns:
dictionary: {"packagename": [ExeInfo0, ...], ...}
"packagename" examples: "f311.explorer", "numpy" |
def scalarVectorDecorator(func):
"""Decorator to return scalar outputs as a set"""
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[1]).shape == () \
and numpy.array(args[2]).shape == (): #only if both R and z are scalars
scalarOut= True
args= (args[0],numpy.array([args[1]]),numpy.array([args[2]]))
elif numpy.array(args[1]).shape == () \
and not numpy.array(args[2]).shape == (): #R scalar, z vector
scalarOut= False
args= (args[0],args[1]*numpy.ones_like(args[2]),args[2])
elif not numpy.array(args[1]).shape == () \
and numpy.array(args[2]).shape == (): #R vector, z scalar
scalarOut= False
args= (args[0],args[1],args[2]*numpy.ones_like(args[1]))
else:
scalarOut= False
result= func(*args,**kwargs)
if scalarOut:
return result[0]
else:
return result
return scalar_wrapper | Decorator to return scalar outputs as a set |
def get_en_words() -> Set[str]:
"""
Returns a list of English words which can be used to filter out
code-switched sentences.
"""
pull_en_words()
with open(config.EN_WORDS_PATH) as words_f:
raw_words = words_f.readlines()
en_words = set([word.strip().lower() for word in raw_words])
NA_WORDS_IN_EN_DICT = set(["kore", "nani", "karri", "imi", "o", "yaw", "i",
"bi", "aye", "imi", "ane", "kubba", "kab", "a-",
"ad", "a", "mak", "selim", "ngai", "en", "yo",
"wud", "mani", "yak", "manu", "ka-", "mong",
"manga", "ka-", "mane", "kala", "name", "kayo",
"kare", "laik", "bale", "ni", "rey", "bu",
"re", "iman", "bom", "wam",
"alu", "nan", "kure", "kuri", "wam", "ka", "ng",
"yi", "na", "m", "arri", "e", "kele", "arri", "nga",
"kakan", "ai", "ning", "mala", "ti", "wolk",
"bo", "andi", "ken", "ba", "aa", "kun", "bini",
"wo", "bim", "man", "bord", "al", "mah", "won",
"ku", "ay", "belen", "wen", "yah", "muni",
"bah", "di", "mm", "anu", "nane", "ma", "kum",
"birri", "ray", "h", "kane", "mumu", "bi", "ah",
"i-", "n", "mi", "bedman", "rud", "le", "babu",
"da", "kakkak", "yun", "ande", "naw", "kam", "bolk",
"woy", "u", "bi-",
])
EN_WORDS_NOT_IN_EN_DICT = set(["screenprinting"])
en_words = en_words.difference(NA_WORDS_IN_EN_DICT)
en_words = en_words | EN_WORDS_NOT_IN_EN_DICT
return en_words | Returns a list of English words which can be used to filter out
code-switched sentences. |
def load_stubs(self, log_mem=False):
"""Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode.
"""
# Initialize parameter related to diagnostic output of memory usage
if log_mem:
import psutil
process = psutil.Process(os.getpid())
rss = process.memory_info().rss
LOG_MEMORY_INT = 1000
MEMORY_LIMIT = 1000.0
def _add_stub_manually(_fname):
"""Create and add a 'stub' by manually loading parameters from
JSON files.
Previously this was done by creating a full `Entry` instance, then
using the `Entry.get_stub()` method to trim it down. This was very
slow and memory intensive, hence this improved approach.
"""
# FIX: should this be ``fi.endswith(``.gz')`` ?
fname = uncompress_gz(_fname) if '.gz' in _fname else _fname
stub = None
stub_name = None
with codecs.open(fname, 'r') as jfil:
# Load the full JSON file
data = json.load(jfil, object_pairs_hook=OrderedDict)
# Extract the top-level keys (should just be the name of the
# entry)
stub_name = list(data.keys())
# Make sure there is only a single top-level entry
if len(stub_name) != 1:
err = "json file '{}' has multiple keys: {}".format(
fname, list(stub_name))
self._log.error(err)
raise ValueError(err)
stub_name = stub_name[0]
# Make sure a non-stub entry doesnt already exist with this
# name
if stub_name in self.entries and not self.entries[
stub_name]._stub:
err_str = (
"ERROR: non-stub entry already exists with name '{}'"
.format(stub_name))
self.log.error(err_str)
raise RuntimeError(err_str)
# Remove the outmost dict level
data = data[stub_name]
# Create a new `Entry` (subclass) instance
proto = self.proto
stub = proto(catalog=self, name=stub_name, stub=True)
# Add stub parameters if they are available
if proto._KEYS.ALIAS in data:
stub[proto._KEYS.ALIAS] = data[proto._KEYS.ALIAS]
if proto._KEYS.DISTINCT_FROM in data:
stub[proto._KEYS.DISTINCT_FROM] = data[
proto._KEYS.DISTINCT_FROM]
if proto._KEYS.RA in data:
stub[proto._KEYS.RA] = data[proto._KEYS.RA]
if proto._KEYS.DEC in data:
stub[proto._KEYS.DEC] = data[proto._KEYS.DEC]
if proto._KEYS.DISCOVER_DATE in data:
stub[proto._KEYS.DISCOVER_DATE] = data[
proto._KEYS.DISCOVER_DATE]
if proto._KEYS.SOURCES in data:
stub[proto._KEYS.SOURCES] = data[
proto._KEYS.SOURCES]
# Store the stub
self.entries[stub_name] = stub
self.log.debug("Added stub for '{}'".format(stub_name))
currenttask = 'Loading entry stubs'
files = self.PATHS.get_repo_output_file_list()
for ii, _fname in enumerate(pbar(files, currenttask)):
# Run normally
# _add_stub(_fname)
# Run 'manually' (extract stub parameters directly from JSON)
_add_stub_manually(_fname)
if log_mem:
rss = process.memory_info().rss / 1024 / 1024
if ii % LOG_MEMORY_INT == 0 or rss > MEMORY_LIMIT:
log_memory(self.log, "\nLoaded stub {}".format(ii),
logging.INFO)
if rss > MEMORY_LIMIT:
err = (
"Memory usage {}, has exceeded {} on file {} '{}'".
format(rss, MEMORY_LIMIT, ii, _fname))
self.log.error(err)
raise RuntimeError(err)
return self.entries | Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode. |
def get_gammadot(F, mc, q, e):
"""
Compute gamma dot from Barack and Cutler (2004)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:param e: Eccentricity of binary
:returns: dgamma/dt
"""
# chirp mass
mc *= SOLAR2S
#total mass
m = (((1+q)**2)/q)**(3/5) * mc
dgdt = 6*np.pi*F * (2*np.pi*F*m)**(2/3) / (1-e**2) * \
(1 + 0.25*(2*np.pi*F*m)**(2/3)/(1-e**2)*(26-15*e**2))
return dgdt | Compute gamma dot from Barack and Cutler (2004)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:param e: Eccentricity of binary
:returns: dgamma/dt |
def is_std_string(type_):
"""
Returns True, if type represents C++ `std::string`, False otherwise.
"""
if utils.is_str(type_):
return type_ in string_equivalences
type_ = remove_alias(type_)
type_ = remove_reference(type_)
type_ = remove_cv(type_)
return type_.decl_string in string_equivalences | Returns True, if type represents C++ `std::string`, False otherwise. |
def Write(self, packet):
"""See base class."""
out = bytearray([0] + packet) # Prepend the zero-byte (report ID)
os.write(self.dev, out) | See base class. |
def add_batch(self, nlive=500, wt_function=None, wt_kwargs=None,
maxiter=None, maxcall=None, save_bounds=True,
print_progress=True, print_func=None, stop_val=None):
"""
Allocate an additional batch of (nested) samples based on
the combined set of previous samples using the specified
weight function.
Parameters
----------
nlive : int, optional
The number of live points used when adding additional samples
in the batch. Default is `500`.
wt_function : func, optional
A cost function that takes a `Results` instance
and returns a log-likelihood range over which a new batch of
samples should be generated. The default function simply
computes a weighted average of the posterior and evidence
information content as::
weight = pfrac * pweight + (1. - pfrac) * zweight
wt_kwargs : dict, optional
Extra arguments to be passed to the weight function.
maxiter : int, optional
Maximum number of iterations allowed. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations allowed.
Default is `sys.maxsize` (no limit).
save_bounds : bool, optional
Whether or not to save distributions used to bound
the live points internally during dynamic live point allocations.
Default is `True`.
print_progress : bool, optional
Whether to output a simple summary of the current run that
updates each iteration. Default is `True`.
print_func : function, optional
A function that prints out the current state of the sampler.
If not provided, the default :meth:`results.print_fn` is used.
stop_val : float, optional
The value of the stopping criteria to be passed to
:meth:`print_func`. Used internally within :meth:`run_nested` to
keep track of progress.
"""
# Initialize values.
if maxcall is None:
maxcall = sys.maxsize
if maxiter is None:
maxiter = sys.maxsize
if wt_function is None:
wt_function = weight_function
if wt_kwargs is None:
wt_kwargs = dict()
if print_func is None:
print_func = print_fn
# If we have either likelihood calls or iterations remaining,
# add our new batch of live points.
ncall, niter, n = self.ncall, self.it - 1, self.batch
if maxcall > 0 and maxiter > 0:
# Compute our sampling bounds using the provided
# weight function.
res = self.results
lnz, lnzerr = res.logz[-1], res.logzerr[-1]
logl_bounds = wt_function(res, wt_kwargs)
for results in self.sample_batch(nlive_new=nlive,
logl_bounds=logl_bounds,
maxiter=maxiter,
maxcall=maxcall,
save_bounds=save_bounds):
(worst, ustar, vstar, loglstar, nc,
worst_it, boundidx, bounditer, eff) = results
# When initializing a batch (i.e. when `worst < 0`),
# don't increment our call counter or our current
# number of iterations.
if worst >= 0:
ncall += nc
niter += 1
# Reorganize results.
results = (worst, ustar, vstar, loglstar, np.nan, np.nan,
lnz, lnzerr**2, np.nan, nc, worst_it, boundidx,
bounditer, eff, np.nan)
# Print progress.
if print_progress:
print_func(results, niter, ncall, nbatch=n+1,
stop_val=stop_val,
logl_min=logl_bounds[0],
logl_max=logl_bounds[1])
# Combine batch with previous runs.
self.combine_runs()
# Pass back info.
return ncall, niter, logl_bounds, results | Allocate an additional batch of (nested) samples based on
the combined set of previous samples using the specified
weight function.
Parameters
----------
nlive : int, optional
The number of live points used when adding additional samples
in the batch. Default is `500`.
wt_function : func, optional
A cost function that takes a `Results` instance
and returns a log-likelihood range over which a new batch of
samples should be generated. The default function simply
computes a weighted average of the posterior and evidence
information content as::
weight = pfrac * pweight + (1. - pfrac) * zweight
wt_kwargs : dict, optional
Extra arguments to be passed to the weight function.
maxiter : int, optional
Maximum number of iterations allowed. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations allowed.
Default is `sys.maxsize` (no limit).
save_bounds : bool, optional
Whether or not to save distributions used to bound
the live points internally during dynamic live point allocations.
Default is `True`.
print_progress : bool, optional
Whether to output a simple summary of the current run that
updates each iteration. Default is `True`.
print_func : function, optional
A function that prints out the current state of the sampler.
If not provided, the default :meth:`results.print_fn` is used.
stop_val : float, optional
The value of the stopping criteria to be passed to
:meth:`print_func`. Used internally within :meth:`run_nested` to
keep track of progress. |
def row(self):
"""
Game Dataset(Row)
:return: {
'retro_game_id': Retrosheet Game id
'game_type': Game Type(S/R/F/D/L/W)
'game_type_des': Game Type Description
(Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series)
'st_fl': Spring Training FLAG(T or F)
'regseason_fl': Regular Season FLAG(T or F)
'playoff_fl': Play Off Flag(T or F)
'local_game_time': Game Time(UTC -5)
'game_id': Game Id
'home_team_id': Home Team Id
'home_team_lg': Home Team league(AL or NL)
'away_team_id': Away Team Id
'away_team_lg': Away Team league(AL or NL)
'home_team_name': Home Team Name
'away_team_name': Away Team Name
'home_team_name_full': Home Team Name(Full Name)
'away_team_name_full': Away Team Name(Full Name)
'interleague_fl': Inter League Flag(T or F)
'park_id': Park Id
'park_name': Park Name
'park_loc': Park Location
}
"""
row = OrderedDict()
row['retro_game_id'] = self.retro_game_id
row['game_type'] = self.game_type
row['game_type_des'] = self.game_type_des
row['st_fl'] = self.st_fl
row['regseason_fl'] = self.regseason_fl
row['playoff_fl'] = self.playoff_fl
row['local_game_time'] = self.local_game_time
row['game_id'] = self.game_id
row['home_team_id'] = self.home_team_id
row['home_team_lg'] = self.home_team_lg
row['away_team_id'] = self.away_team_id
row['away_team_lg'] = self.away_team_lg
row['home_team_name'] = self.home_team_name
row['away_team_name'] = self.away_team_name
row['home_team_name_full'] = self.home_team_name_full
row['away_team_name_full'] = self.away_team_name_full
row['interleague_fl'] = self.interleague_fl
row['park_id'] = self.park_id
row['park_name'] = self.park_name
row['park_loc'] = self.park_loc
return row | Game Dataset(Row)
:return: {
'retro_game_id': Retrosheet Game id
'game_type': Game Type(S/R/F/D/L/W)
'game_type_des': Game Type Description
(Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series)
'st_fl': Spring Training FLAG(T or F)
'regseason_fl': Regular Season FLAG(T or F)
'playoff_fl': Play Off Flag(T or F)
'local_game_time': Game Time(UTC -5)
'game_id': Game Id
'home_team_id': Home Team Id
'home_team_lg': Home Team league(AL or NL)
'away_team_id': Away Team Id
'away_team_lg': Away Team league(AL or NL)
'home_team_name': Home Team Name
'away_team_name': Away Team Name
'home_team_name_full': Home Team Name(Full Name)
'away_team_name_full': Away Team Name(Full Name)
'interleague_fl': Inter League Flag(T or F)
'park_id': Park Id
'park_name': Park Name
'park_loc': Park Location
} |
def check_coin_a_phrase_from(text):
"""Check the text."""
err = "misc.illogic.coin"
msg = "You can't coin an existing phrase. Did you mean 'borrow'?"
regex = "to coin a phrase from"
return existence_check(text, [regex], err, msg, offset=1) | Check the text. |
def size(self):
'''
Return the size in bits
Return None if the size is not known
Returns:
int
'''
t = self._type
if t.startswith('uint'):
return int(t[len('uint'):])
if t.startswith('int'):
return int(t[len('int'):])
if t == 'bool':
return int(8)
if t == 'address':
return int(160)
if t.startswith('bytes'):
return int(t[len('bytes'):])
return None | Return the size in bits
Return None if the size is not known
Returns:
int |
def _add_blockhash_to_state_changes(storage: SQLiteStorage, cache: BlockHashCache) -> None:
"""Adds blockhash to ContractReceiveXXX and ActionInitChain state changes"""
batch_size = 50
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.state_change.ContractReceive%'),
('_type', 'raiden.transfer.state_change.ActionInitChain'),
],
logical_and=False,
)
for state_changes_batch in batch_query:
# Gather query records to pass to gevent pool imap to have concurrent RPC calls
query_records = []
for state_change in state_changes_batch:
data = json.loads(state_change.data)
assert 'block_hash' not in data, 'v18 state changes cant contain blockhash'
record = BlockQueryAndUpdateRecord(
block_number=int(data['block_number']),
data=data,
state_change_identifier=state_change.state_change_identifier,
cache=cache,
)
query_records.append(record)
# Now perform the queries in parallel with gevent.Pool.imap and gather the
# updated tuple entries that will update the DB
updated_state_changes = []
pool_generator = Pool(batch_size).imap(
_query_blocknumber_and_update_statechange_data,
query_records,
)
for entry in pool_generator:
updated_state_changes.append(entry)
# Finally update the DB with a batched executemany()
storage.update_state_changes(updated_state_changes) | Adds blockhash to ContractReceiveXXX and ActionInitChain state changes |
def ray_shooting(self, x, y, kwargs, k=None):
"""
maps image to source position (inverse deflection)
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: source plane positions corresponding to (x, y) in the image plane
"""
return self.lens_model.ray_shooting(x, y, kwargs, k=k) | maps image to source position (inverse deflection)
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: source plane positions corresponding to (x, y) in the image plane |
def write_matrix_to_tsv(net, filename=None, df=None):
'''
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
'''
import pandas as pd
if df is None:
df = net.dat_to_df()
return df['mat'].to_csv(filename, sep='\t') | This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object. |
def generate_variables(name, n_vars=1, hermitian=None, commutative=True):
"""Generates a number of commutative or noncommutative variables
:param name: The prefix in the symbolic representation of the noncommuting
variables. This will be suffixed by a number from 0 to
n_vars-1 if n_vars > 1.
:type name: str.
:param n_vars: The number of variables.
:type n_vars: int.
:param hermitian: Optional parameter to request Hermitian variables .
:type hermitian: bool.
:param commutative: Optional parameter to request commutative variables.
Commutative variables are Hermitian by default.
:type commutative: bool.
:returns: list of :class:`sympy.physics.quantum.operator.Operator` or
:class:`sympy.physics.quantum.operator.HermitianOperator`
variables or `sympy.Symbol`
:Example:
>>> generate_variables('y', 2, commutative=True)
[y0, y1]
"""
variables = []
for i in range(n_vars):
if n_vars > 1:
var_name = '%s%s' % (name, i)
else:
var_name = '%s' % name
if commutative:
if hermitian is None or hermitian:
variables.append(Symbol(var_name, real=True))
else:
variables.append(Symbol(var_name, complex=True))
elif hermitian is not None and hermitian:
variables.append(HermitianOperator(var_name))
else:
variables.append(Operator(var_name))
return variables | Generates a number of commutative or noncommutative variables
:param name: The prefix in the symbolic representation of the noncommuting
variables. This will be suffixed by a number from 0 to
n_vars-1 if n_vars > 1.
:type name: str.
:param n_vars: The number of variables.
:type n_vars: int.
:param hermitian: Optional parameter to request Hermitian variables .
:type hermitian: bool.
:param commutative: Optional parameter to request commutative variables.
Commutative variables are Hermitian by default.
:type commutative: bool.
:returns: list of :class:`sympy.physics.quantum.operator.Operator` or
:class:`sympy.physics.quantum.operator.HermitianOperator`
variables or `sympy.Symbol`
:Example:
>>> generate_variables('y', 2, commutative=True)
[y0, y1] |
def analog_write(self, pin, value):
"""
Set the specified pin to the specified value.
:param pin: Pin number
:param value: Pin value
:return: No return value
"""
if self._command_handler.ANALOG_MESSAGE + pin < 0xf0:
command = [self._command_handler.ANALOG_MESSAGE + pin, value & 0x7f, (value >> 7) & 0x7f]
self._command_handler.send_command(command)
else:
self.extended_analog(pin, value) | Set the specified pin to the specified value.
:param pin: Pin number
:param value: Pin value
:return: No return value |
def shutdown(self):
"""Close all file handles and stop all motors."""
self.stop_balance.set() # Stop balance thread
self.motor_left.stop()
self.motor_right.stop()
self.gyro_file.close()
self.touch_file.close()
self.encoder_left_file.close()
self.encoder_right_file.close()
self.dc_left_file.close()
self.dc_right_file.close() | Close all file handles and stop all motors. |
async def _unwatch(self, conn):
"Unwatches all previously specified keys"
await conn.send_command('UNWATCH')
res = await conn.read_response()
return self.watching and res or True | Unwatches all previously specified keys |
def get_mean_threshold_from_calibration(gdac, mean_threshold_calibration):
'''Calculates the mean threshold from the threshold calibration at the given gdac settings. If the given gdac value was not used during caluibration
the value is determined by interpolation.
Parameters
----------
gdacs : array like
The GDAC settings where the threshold should be determined from the calibration
mean_threshold_calibration : pytable
The table created during the calibration scan.
Returns
-------
numpy.array, shape=(len(gdac), )
The mean threshold values at each value in gdacs.
'''
interpolation = interp1d(mean_threshold_calibration['parameter_value'], mean_threshold_calibration['mean_threshold'], kind='slinear', bounds_error=True)
return interpolation(gdac) | Calculates the mean threshold from the threshold calibration at the given gdac settings. If the given gdac value was not used during caluibration
the value is determined by interpolation.
Parameters
----------
gdacs : array like
The GDAC settings where the threshold should be determined from the calibration
mean_threshold_calibration : pytable
The table created during the calibration scan.
Returns
-------
numpy.array, shape=(len(gdac), )
The mean threshold values at each value in gdacs. |
def plotAccuracyDuringSequenceInference(dirName, title="", yaxis=""):
"""
Plot accuracy vs number of locations
"""
# Read in results file
with open(os.path.join(dirName,
"sequence_batch_high_dec_normal_features.pkl"), "rb") as f:
results = cPickle.load(f)
locationRange = []
featureRange = []
for r in results:
if r["numLocations"] not in locationRange: locationRange.append(r["numLocations"])
if r["numFeatures"] not in featureRange: featureRange.append(r["numFeatures"])
locationRange.sort()
featureRange.sort()
if 10 in featureRange: featureRange.remove(10)
print "locationRange=",locationRange
print "featureRange=",featureRange
########################################################################
#
# Accumulate the L2 accuracies for each condition in a list and compute mean
# and stdeviations
# For TM we average across all feature ranges
L2Accuracies = defaultdict(list)
TMAccuracies = defaultdict(list)
for r in results:
if r["numFeatures"] in featureRange:
L2Accuracies[(r["numLocations"], r["numFeatures"])].append(r["sequenceAccuracyL2"])
TMAccuracies[r["numLocations"]].append(r["sequenceCorrectSparsityTM"])
# meanAccuracy[o,f] = accuracy of TM with o objects and f unique features.
meanL2Accuracy = numpy.zeros((max(locationRange)+1, max(featureRange) + 1))
stdevL2 = numpy.zeros((max(locationRange)+1, max(featureRange) + 1))
meanTMAccuracy = numpy.zeros(max(locationRange)+1)
stdevTM = numpy.zeros(max(locationRange)+1)
for o in locationRange:
for f in featureRange:
a = numpy.array(L2Accuracies[(o, f)])
meanL2Accuracy[o, f] = 100.0*a.mean()
stdevL2[o, f] = 100.0*a.std()
# Accuracies for TM
a = numpy.array(TMAccuracies[o])
meanTMAccuracy[o] = 100.0*a.mean()
stdevTM[o] = 100.0*a.std()
########################################################################
#
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "accuracy_during_sequence_inference.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
legendList.append('Sensorimotor layer, feature pool size: {}'.format(f))
plt.errorbar(locationRange, meanL2Accuracy[locationRange, f],
yerr=stdevL2[locationRange, f],
color=colorList[i])
plt.errorbar(locationRange, meanTMAccuracy[locationRange],
yerr=stdevTM[locationRange],
color=colorList[len(featureRange)])
legendList.append('Temporal sequence layer')
# format
plt.legend(legendList, bbox_to_anchor=(0., 0.65, 1., .102), loc="right", prop={'size':10})
plt.xlabel("Size of location pool")
# plt.xticks(range(0,max(locationRange)+1,10))
# plt.yticks(range(0,int(accuracy.max())+2,10))
plt.ylim(-10.0, 110.0)
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close() | Plot accuracy vs number of locations |
def get_sample_size(self, key=None):
""" Returns the number of samples in the input data
@ In, key, an optional 2-tuple specifying a min-max id pair
used for determining which partition size should be
returned. If not specified then the size of the entire data
set will be returned.
@ Out, an integer specifying the number of samples.
"""
if key is None:
return len(self.Y)
else:
return len(self.get_partitions(self.persistence)[key]) | Returns the number of samples in the input data
@ In, key, an optional 2-tuple specifying a min-max id pair
used for determining which partition size should be
returned. If not specified then the size of the entire data
set will be returned.
@ Out, an integer specifying the number of samples. |
def _request_auth(self, registry):
"""
self, username, password=None, email=None, registry=None,
reauth=False, insecure_registry=False, dockercfg_path=None):
"""
if registry:
if registry.auth:
registry.auth.load_dockercfg()
try:
self._client_session.login(username=registry.auth.user,
password=registry.auth.passwd,
dockercfg_path=registry.auth.config_path,
reauth=True if registry.auth.auth_type == 'registry_rubber' else False,
registry=registry.auth.registry)
except Exception:
raise
else:
raise Exception("a registry is required when requesting auth.") | self, username, password=None, email=None, registry=None,
reauth=False, insecure_registry=False, dockercfg_path=None): |
def main(args):
"""first we need sorted genepreds"""
cmd = ['sort',args.reference,'--gpd','--tempdir',args.tempdir,'--threads',
str(args.threads),'-o',args.tempdir+'/ref.sorted.gpd']
sys.stderr.write(cmd+"\n")
gpd_sort(cmd)
cmd = ['sort',args.gpd,'--gpd','--tempdir',args.tempdir,'--threads',
str(args.threads),'-o',args.tempdir+'/my.sorted.gpd']
sys.stderr.write(cmd+"\n")
gpd_sort(cmd)
rstream = GPDStream(open(args.tempdir+'/ref.sorted.gpd'))
mstream = GPDStream(open(args.tempdir+'/my.sorted.gpd'))
stream = MultiLocusStream([rstream,mstream])
of = sys.stdout
if args.output != '-':
if args.output[-3:] == '.gz': of = gzip.open(args.output,'w')
else: of = open(args.output,'w')
for locus_rng in stream:
(rgpds, mgpds) = locus_rng.get_payload()
if len(mgpds) == 0: continue
sys.stderr.write(locus_rng.get_range_string()+" "+str(len(rgpds))+" "+str(len(mgpds))+" \r")
ref_juncs = {}
for ref in rgpds: ref_juncs[ref.get_junction_string()] = ref
annotated = []
unannotated = []
annotated = [ref_juncs[x.get_junction_string()] for x in mgpds if x.get_exon_count() > 1 and x.get_junction_string() in ref_juncs]
unannotated = [x for x in mgpds if x.get_exon_count() > 1 and x.get_junction_string() not in ref_juncs]
# now unannotated needs an annotation.
my_unannotated = [x for x in mgpds if x.get_exon_count() == 1]
single_reference = [x for x in rgpds if x.get_exon_count() == 1]
single_annotated = []
single_unannotated = []
#print len(single_reference)
#print len(single_unannotated)
for gpd in my_unannotated:
overs = sorted([x for x in single_reference if x.overlap_size(gpd) > 0],\
key=lambda y: y.avg_mutual_coverage(gpd), reverse=True)
if len(overs) > 0:
single_annotated.append(overs[0])
else: single_unannotated.append(gpd)
# now annotated and single_annotated are done
unannotated += single_unannotated
# now single or multi we need to annotated unanotated
gene_annotated = []
no_annotation = []
for m in unannotated:
overs = sorted([x for x in rgpds if x.overlap_size(m) > 0],\
key=lambda y: y.avg_mutual_coverage(m), reverse=True)
if len(overs) > 0:
gname = overs[0].value('gene_name')
f = overs[0].get_gpd_line().rstrip().split("\t")
f[0] = gname
f[1] = str(uuid.uuid4())
g = GPD("\t".join(f))
gene_annotated.append(g)
else: no_annotation.append(m)
finished = []
# now we need to annotate no_annotation
while len(no_annotation) > 0:
m = no_annotation.pop(0)
matched = False
for i in range(0,len(finished)):
if len([x for x in finished[i] if x.overlap_size(m) > 0]) > 0:
finished[i].append(m)
matched = True
break
if not matched: finished.append([m])
# now finished has gene groups
original = []
for group in finished:
gname = str(uuid.uuid4())
for member in group:
tname = str(uuid.uuid4())
f = member.get_gpd_line().rstrip().split("\t")
f[0] = gname
f[1] = tname
g = GPD("\t".join(f))
original.append(g)
for gpd in original + annotated + single_annotated + gene_annotated:
of.write(gpd.get_gpd_line()+"\n")
of.close()
sys.stderr.write("\n")
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir) | first we need sorted genepreds |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.