code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_mark_css(aes_name, css_value):
"""Generate CSS class for <mark> tag.
Parameters
----------
aes_name: str
The name of the class.
css_value: str
The value for the CSS property defined by aes_name.
Returns
-------
list of str
The CSS codeblocks
"""
css_prop = AES_CSS_MAP[aes_name]
if isinstance(css_value, list):
return get_mark_css_for_rules(aes_name, css_prop, css_value)
else:
return get_mark_simple_css(aes_name, css_prop, css_value) | Generate CSS class for <mark> tag.
Parameters
----------
aes_name: str
The name of the class.
css_value: str
The value for the CSS property defined by aes_name.
Returns
-------
list of str
The CSS codeblocks |
def pack(self, value=None):
"""Pack the struct in a binary representation.
Merge some fields to ensure correct packing.
If no arguments are set for a particular instance, it is interpreted as
abscence of VLAN information, and the pack() method will return an
empty binary string.
Returns:
bytes: Binary representation of this instance.
"""
if isinstance(value, type(self)):
return value.pack()
if self.pcp is None and self.cfi is None and self.vid is None:
return b''
self.pcp = self.pcp if self.pcp is not None else 0
self.cfi = self.cfi if self.cfi is not None else 0
self.vid = self.vid if self.vid is not None else 0
self._tci = self.pcp << 13 | self.cfi << 12 | self.vid
return super().pack() | Pack the struct in a binary representation.
Merge some fields to ensure correct packing.
If no arguments are set for a particular instance, it is interpreted as
abscence of VLAN information, and the pack() method will return an
empty binary string.
Returns:
bytes: Binary representation of this instance. |
def qos_map_cos_traffic_class_cos3(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
map = ET.SubElement(qos, "map")
cos_traffic_class = ET.SubElement(map, "cos-traffic-class")
name_key = ET.SubElement(cos_traffic_class, "name")
name_key.text = kwargs.pop('name')
cos3 = ET.SubElement(cos_traffic_class, "cos3")
cos3.text = kwargs.pop('cos3')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def _deserialize_primitive(data, klass):
"""Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = six.u(data)
except TypeError:
value = data
return value | Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool |
def embed(self, name, data=None):
"""Attach an image file and prepare for HTML embedding.
This method should only be used to embed images.
:param name: Path to the image to embed if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the image to embed, or None if the data is to
be read from the file pointed to by the ``name`` argument
"""
if data is None:
with open(name, 'rb') as fp:
data = fp.read()
name = os.path.basename(name)
elif isinstance(data, bytes):
pass
elif hasattr(data, 'read'):
data = data.read()
else:
raise TypeError("Unable to read image contents")
subtype = imghdr.what(None, data)
self.attach(name, data, 'image', subtype, True) | Attach an image file and prepare for HTML embedding.
This method should only be used to embed images.
:param name: Path to the image to embed if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the image to embed, or None if the data is to
be read from the file pointed to by the ``name`` argument |
def deleteByPk(self, pk):
'''
deleteByPk - Delete object associated with given primary key
'''
obj = self.mdl.objects.getOnlyIndexedFields(pk)
if not obj:
return 0
return self.deleteOne(obj) | deleteByPk - Delete object associated with given primary key |
def backward(self, speed=1):
"""
Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed).
"""
if isinstance(self.enable_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'backward speed must be 0 or 1 with non-PWM Motors')
self.enable_device.off()
self.phase_device.on()
self.enable_device.value = speed | Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed). |
def design_list(self):
"""
List all design documents for the current bucket.
:return: A :class:`~couchbase.result.HttpResult` containing
a dict, with keys being the ID of the design document.
.. note::
This information is derived using the
``pools/default/buckets/<bucket>ddocs`` endpoint, but the return
value has been modified to match that of :meth:`design_get`.
.. note::
This function returns both 'production' and 'development' mode
views. These two can be distinguished by the name of the
design document being prefixed with the ``dev_`` identifier.
The keys of the dict in ``value`` will be of the form
``_design/<VIEWNAME>`` where ``VIEWNAME`` may either be e.g.
``foo`` or ``dev_foo`` depending on whether ``foo`` is a
production or development mode view.
::
for name, ddoc in mgr.design_list().value.items():
if name.startswith('_design/dev_'):
print "Development view!"
else:
print "Production view!"
Example::
for name, ddoc in mgr.design_list().value.items():
print 'Design name {0}. Contents {1}'.format(name, ddoc)
.. seealso:: :meth:`design_get`
"""
ret = self._http_request(
type=_LCB.LCB_HTTP_TYPE_MANAGEMENT,
path="/pools/default/buckets/{0}/ddocs".format(self._cb.bucket),
method=_LCB.LCB_HTTP_METHOD_GET)
real_rows = {}
for r in ret.value['rows']:
real_rows[r['doc']['meta']['id']] = r['doc']['json']
# Can't use normal assignment because 'value' is read-only
ret.value.clear()
ret.value.update(real_rows)
return ret | List all design documents for the current bucket.
:return: A :class:`~couchbase.result.HttpResult` containing
a dict, with keys being the ID of the design document.
.. note::
This information is derived using the
``pools/default/buckets/<bucket>ddocs`` endpoint, but the return
value has been modified to match that of :meth:`design_get`.
.. note::
This function returns both 'production' and 'development' mode
views. These two can be distinguished by the name of the
design document being prefixed with the ``dev_`` identifier.
The keys of the dict in ``value`` will be of the form
``_design/<VIEWNAME>`` where ``VIEWNAME`` may either be e.g.
``foo`` or ``dev_foo`` depending on whether ``foo`` is a
production or development mode view.
::
for name, ddoc in mgr.design_list().value.items():
if name.startswith('_design/dev_'):
print "Development view!"
else:
print "Production view!"
Example::
for name, ddoc in mgr.design_list().value.items():
print 'Design name {0}. Contents {1}'.format(name, ddoc)
.. seealso:: :meth:`design_get` |
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = b'\x00'
true_byte = b'\x01'
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField | Returns an encoder for a boolean field. |
def _read_quoted(ctx: ReaderContext) -> llist.List:
"""Read a quoted form from the input stream."""
start = ctx.reader.advance()
assert start == "'"
next_form = _read_next_consuming_comment(ctx)
return llist.l(_QUOTE, next_form) | Read a quoted form from the input stream. |
def mobile_sign(self, id_code, country, phone_nr, language=None, signing_profile='LT_TM'):
""" This can be used to add a signature to existing data files
WARNING: Must have at least one datafile in the session
"""
if not (self.container and isinstance(self.container, PreviouslyCreatedContainer)):
assert self.data_files, 'To use MobileSign endpoint the application must ' \
'add at least one data file to users session'
response = self.__invoke('MobileSign', {
'SignerIDCode': id_code,
'SignersCountry': country,
'SignerPhoneNo': phone_nr,
'Language': self.parse_language(language),
'Role': SkipValue,
'City': SkipValue,
'StateOrProvince': SkipValue,
'PostalCode': SkipValue,
'CountryName': SkipValue,
'ServiceName': self.service_name,
'AdditionalDataToBeDisplayed': self.mobile_message,
# Either LT or LT_TM, see: http://sk-eid.github.io/dds-documentation/api/api_docs/#mobilesign
'SigningProfile': signing_profile,
'MessagingMode': 'asynchClientServer',
'AsyncConfiguration': SkipValue,
'ReturnDocInfo': SkipValue,
'ReturnDocData': SkipValue,
})
return response | This can be used to add a signature to existing data files
WARNING: Must have at least one datafile in the session |
def process_input_graph(func):
"""Decorator, ensuring first argument is a networkx graph object.
If the first arg is a dict {node: succs}, a networkx graph equivalent
to the dict will be send in place of it."""
@wraps(func)
def wrapped_func(*args, **kwargs):
input_graph = args[0]
if isinstance(input_graph, nx.DiGraph):
return func(*args, **kwargs)
else:
nx_graph = dict_to_nx(args[0], oriented=True)
args = [nx_graph] + list(args[1:])
return func(*args, **kwargs)
return wrapped_func | Decorator, ensuring first argument is a networkx graph object.
If the first arg is a dict {node: succs}, a networkx graph equivalent
to the dict will be send in place of it. |
def xcorr_plot(template, image, shift=None, cc=None, cc_vec=None, **kwargs):
"""
Plot a template overlying an image aligned by correlation.
:type template: numpy.ndarray
:param template: Short template image
:type image: numpy.ndarray
:param image: Long master image
:type shift: int
:param shift: Shift to apply to template relative to image, in samples
:type cc: float
:param cc: Cross-correlation at shift
:type cc_vec: numpy.ndarray
:param cc_vec: Cross-correlation vector.
:type save: bool
:param save: Whether to save the plot or not.
:type savefile: str
:param savefile: File name to save to
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import xcorr_plot
>>> from eqcorrscan.utils.stacking import align_traces
>>> st = read().detrend('simple').filter('bandpass', freqmin=2, freqmax=15)
>>> shifts, ccs = align_traces([st[0], st[1]], 40)
>>> shift = shifts[1] * st[1].stats.sampling_rate
>>> cc = ccs[1]
>>> xcorr_plot(template=st[1].data, image=st[0].data, shift=shift,
... cc=cc) # doctest: +SKIP
.. image:: ../../plots/xcorr_plot.png
"""
import matplotlib.pyplot as plt
if cc is None or shift is None:
if not isinstance(cc_vec, np.ndarray):
print('Given cc: %s and shift: %s' % (cc, shift))
raise IOError('Must provide either cc_vec, or cc and shift')
shift = np.abs(cc_vec).argmax()
cc = cc_vec[shift]
x = np.arange(len(image))
plt.plot(x, image / abs(image).max(), 'k', lw=1.3, label='Image')
x = np.arange(len(template)) + shift
plt.plot(x, template / abs(template).max(), 'r', lw=1.1, label='Template')
plt.title('Shift=%s, Correlation=%s' % (shift, cc))
fig = plt.gcf()
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | Plot a template overlying an image aligned by correlation.
:type template: numpy.ndarray
:param template: Short template image
:type image: numpy.ndarray
:param image: Long master image
:type shift: int
:param shift: Shift to apply to template relative to image, in samples
:type cc: float
:param cc: Cross-correlation at shift
:type cc_vec: numpy.ndarray
:param cc_vec: Cross-correlation vector.
:type save: bool
:param save: Whether to save the plot or not.
:type savefile: str
:param savefile: File name to save to
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import xcorr_plot
>>> from eqcorrscan.utils.stacking import align_traces
>>> st = read().detrend('simple').filter('bandpass', freqmin=2, freqmax=15)
>>> shifts, ccs = align_traces([st[0], st[1]], 40)
>>> shift = shifts[1] * st[1].stats.sampling_rate
>>> cc = ccs[1]
>>> xcorr_plot(template=st[1].data, image=st[0].data, shift=shift,
... cc=cc) # doctest: +SKIP
.. image:: ../../plots/xcorr_plot.png |
def _call_and_format(self, req, props=None):
"""
Invokes a single request against a handler using _call() and traps any errors,
formatting them using _err(). If the request is successful it is wrapped in a
JSON-RPC 2.0 compliant dict with keys: 'jsonrpc', 'id', 'result'.
:Parameters:
req
A single dict representing a single JSON-RPC request
props
Application defined properties to set on RequestContext for use with filters.
For example: authentication headers. Must be a dict.
"""
if not isinstance(req, dict):
return err_response(None, ERR_INVALID_REQ,
"Invalid Request. %s is not an object." % str(req))
reqid = None
if req.has_key("id"):
reqid = req["id"]
if props == None:
props = { }
context = RequestContext(props, req)
if self.filters:
for f in self.filters:
f.pre(context)
if context.error:
return context.error
resp = None
try:
result = self._call(context)
resp = { "jsonrpc": "2.0", "id": reqid, "result": result }
except RpcException, e:
resp = err_response(reqid, e.code, e.msg, e.data)
except:
self.log.exception("Error processing request: %s" % str(req))
resp = err_response(reqid, ERR_UNKNOWN, "Server error. Check logs for details.")
if self.filters:
context.response = resp
for f in self.filters:
f.post(context)
return resp | Invokes a single request against a handler using _call() and traps any errors,
formatting them using _err(). If the request is successful it is wrapped in a
JSON-RPC 2.0 compliant dict with keys: 'jsonrpc', 'id', 'result'.
:Parameters:
req
A single dict representing a single JSON-RPC request
props
Application defined properties to set on RequestContext for use with filters.
For example: authentication headers. Must be a dict. |
def create_cmdclass(prerelease_cmd=None, package_data_spec=None,
data_files_spec=None):
"""Create a command class with the given optional prerelease class.
Parameters
----------
prerelease_cmd: (name, Command) tuple, optional
The command to run before releasing.
package_data_spec: dict, optional
A dictionary whose keys are the dotted package names and
whose values are a list of glob patterns.
data_files_spec: list, optional
A list of (path, dname, pattern) tuples where the path is the
`data_files` install path, dname is the source directory, and the
pattern is a glob pattern.
Notes
-----
We use specs so that we can find the files *after* the build
command has run.
The package data glob patterns should be relative paths from the package
folder containing the __init__.py file, which is given as the package
name.
e.g. `dict(foo=['./bar/*', './baz/**'])`
The data files directories should be absolute paths or relative paths
from the root directory of the repository. Data files are specified
differently from `package_data` because we need a separate path entry
for each nested folder in `data_files`, and this makes it easier to
parse.
e.g. `('share/foo/bar', 'pkgname/bizz, '*')`
"""
wrapped = [prerelease_cmd] if prerelease_cmd else []
if package_data_spec or data_files_spec:
wrapped.append('handle_files')
wrapper = functools.partial(_wrap_command, wrapped)
handle_files = _get_file_handler(package_data_spec, data_files_spec)
if 'bdist_egg' in sys.argv:
egg = wrapper(bdist_egg, strict=True)
else:
egg = bdist_egg_disabled
cmdclass = dict(
build_py=wrapper(build_py, strict=is_repo),
bdist_egg=egg,
sdist=wrapper(sdist, strict=True),
handle_files=handle_files,
)
if bdist_wheel:
cmdclass['bdist_wheel'] = wrapper(bdist_wheel, strict=True)
cmdclass['develop'] = wrapper(develop, strict=True)
return cmdclass | Create a command class with the given optional prerelease class.
Parameters
----------
prerelease_cmd: (name, Command) tuple, optional
The command to run before releasing.
package_data_spec: dict, optional
A dictionary whose keys are the dotted package names and
whose values are a list of glob patterns.
data_files_spec: list, optional
A list of (path, dname, pattern) tuples where the path is the
`data_files` install path, dname is the source directory, and the
pattern is a glob pattern.
Notes
-----
We use specs so that we can find the files *after* the build
command has run.
The package data glob patterns should be relative paths from the package
folder containing the __init__.py file, which is given as the package
name.
e.g. `dict(foo=['./bar/*', './baz/**'])`
The data files directories should be absolute paths or relative paths
from the root directory of the repository. Data files are specified
differently from `package_data` because we need a separate path entry
for each nested folder in `data_files`, and this makes it easier to
parse.
e.g. `('share/foo/bar', 'pkgname/bizz, '*')` |
def parse_unit(item, group, slash):
"""Parse surface and power from unit text."""
surface = item.group(group).replace('.', '')
power = re.findall(r'\-?[0-9%s]+' % r.SUPERSCRIPTS, surface)
if power:
power = [r.UNI_SUPER[i] if i in r.UNI_SUPER else i for i
in power]
power = ''.join(power)
new_power = (-1 * int(power) if slash else int(power))
surface = re.sub(r'\^?\-?[0-9%s]+' % r.SUPERSCRIPTS, '', surface)
elif re.findall(r'\bcubed\b', surface):
new_power = (-3 if slash else 3)
surface = re.sub(r'\bcubed\b', '', surface).strip()
elif re.findall(r'\bsquared\b', surface):
new_power = (-2 if slash else 2)
surface = re.sub(r'\bsquared\b', '', surface).strip()
else:
new_power = (-1 if slash else 1)
return surface, new_power | Parse surface and power from unit text. |
def _build_kernel(self):
"""Private method to build kernel matrix
Runs public method to build kernel matrix and runs
additional checks to ensure that the result is okay
Returns
-------
Kernel matrix, shape=[n_samples, n_samples]
Raises
------
RuntimeWarning : if K is not symmetric
"""
kernel = self.build_kernel()
kernel = self.symmetrize_kernel(kernel)
kernel = self.apply_anisotropy(kernel)
if (kernel - kernel.T).max() > 1e-5:
warnings.warn("K should be symmetric", RuntimeWarning)
if np.any(kernel.diagonal == 0):
warnings.warn("K should have a non-zero diagonal", RuntimeWarning)
return kernel | Private method to build kernel matrix
Runs public method to build kernel matrix and runs
additional checks to ensure that the result is okay
Returns
-------
Kernel matrix, shape=[n_samples, n_samples]
Raises
------
RuntimeWarning : if K is not symmetric |
def start(self):
'''
Serving loop
'''
print('Waiting for a client to connect to url http://%s:%d/' % (self.host, self.port))
self.state = RpcServer._STATE_RUN
while self.state == RpcServer._STATE_RUN:
self.server.handle_request()
self.server.server_close()
self.state = RpcServer._STATE_IDLE | Serving loop |
def _compute_rtfilter_map(self):
"""Returns neighbor's RT filter (permit/allow filter based on RT).
Walks RT filter tree and computes current RT filters for each peer that
have advertised RT NLRIs.
Returns:
dict of peer, and `set` of rts that a particular neighbor is
interested in.
"""
rtfilter_map = {}
def get_neigh_filter(neigh):
neigh_filter = rtfilter_map.get(neigh)
# Lazy creation of neighbor RT filter
if neigh_filter is None:
neigh_filter = set()
rtfilter_map[neigh] = neigh_filter
return neigh_filter
# Check if we have to use all paths or just best path
if self._common_config.max_path_ext_rtfilter_all:
# We have to look at all paths for a RtDest
for rtcdest in self._table_manager.get_rtc_table().values():
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
# We ignore NC
if neigh is None:
continue
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
# We iterate over all destination of the RTC table and for iBGP
# peers we use all known paths' RTs for RT filter and for eBGP
# peers we only consider best-paths' RTs for RT filter
for rtcdest in self._table_manager.get_rtc_table().values():
path = rtcdest.best_path
# If this destination does not have any path, we continue
if not path:
continue
neigh = path.source
# Consider only eBGP peers and ignore NC
if neigh and neigh.is_ebgp_peer():
# For eBGP peers we use only best-path to learn RT filter
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
# For iBGP peers we use all known paths to learn RT filter
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
# We ignore NC, and eBGP peers
if neigh and not neigh.is_ebgp_peer():
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
return rtfilter_map | Returns neighbor's RT filter (permit/allow filter based on RT).
Walks RT filter tree and computes current RT filters for each peer that
have advertised RT NLRIs.
Returns:
dict of peer, and `set` of rts that a particular neighbor is
interested in. |
def start(self, test_connection=True):
"""Checks for forking and starts/restarts if desired"""
self._detect_fork()
super(ForkAwareLockerClient, self).start(test_connection) | Checks for forking and starts/restarts if desired |
def set_reverb(self, roomsize=-1.0, damping=-1.0, width=-1.0, level=-1.0):
"""
roomsize Reverb room size value (0.0-1.2)
damping Reverb damping value (0.0-1.0)
width Reverb width value (0.0-100.0)
level Reverb level value (0.0-1.0)
"""
set=0
if roomsize>=0:
set+=0b0001
if damping>=0:
set+=0b0010
if width>=0:
set+=0b0100
if level>=0:
set+=0b1000
return fluid_synth_set_reverb_full(self.synth, set, roomsize, damping, width, level) | roomsize Reverb room size value (0.0-1.2)
damping Reverb damping value (0.0-1.0)
width Reverb width value (0.0-100.0)
level Reverb level value (0.0-1.0) |
def handle_notification(self, data):
"""Handle Callback from a Bluetooth (GATT) request."""
_LOGGER.debug("Received notification from the device..")
if data[0] == PROP_INFO_RETURN and data[1] == 1:
_LOGGER.debug("Got status: %s" % codecs.encode(data, 'hex'))
status = Status.parse(data)
_LOGGER.debug("Parsed status: %s", status)
self._raw_mode = status.mode
self._valve_state = status.valve
self._target_temperature = status.target_temp
if status.mode.BOOST:
self._mode = Mode.Boost
elif status.mode.AWAY:
self._mode = Mode.Away
self._away_end = status.away
elif status.mode.MANUAL:
if status.target_temp == EQ3BT_OFF_TEMP:
self._mode = Mode.Closed
elif status.target_temp == EQ3BT_ON_TEMP:
self._mode = Mode.Open
else:
self._mode = Mode.Manual
else:
self._mode = Mode.Auto
_LOGGER.debug("Valve state: %s", self._valve_state)
_LOGGER.debug("Mode: %s", self.mode_readable)
_LOGGER.debug("Target temp: %s", self._target_temperature)
_LOGGER.debug("Away end: %s", self._away_end)
elif data[0] == PROP_SCHEDULE_RETURN:
parsed = self.parse_schedule(data)
self._schedule[parsed.day] = parsed
else:
_LOGGER.debug("Unknown notification %s (%s)", data[0], codecs.encode(data, 'hex')) | Handle Callback from a Bluetooth (GATT) request. |
def environment_schedule_unset(self, name):
"""Schedules unsetting (removing) an environment variable when creating
the next guest process. This affects the
:py:func:`IGuestSession.environment_changes` attribute.
in name of type str
Name of the environment variable to unset. This cannot be empty
nor can it contain any equal signs.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
self._call("environmentScheduleUnset",
in_p=[name]) | Schedules unsetting (removing) an environment variable when creating
the next guest process. This affects the
:py:func:`IGuestSession.environment_changes` attribute.
in name of type str
Name of the environment variable to unset. This cannot be empty
nor can it contain any equal signs. |
def find(cls, name):
'''Find the exception class by name'''
if not cls.mapping: # pragma: no branch
for _, obj in inspect.getmembers(exceptions):
if inspect.isclass(obj):
if issubclass(obj, exceptions.NSQException): # pragma: no branch
if hasattr(obj, 'name'):
cls.mapping[obj.name] = obj
klass = cls.mapping.get(name)
if klass == None:
raise TypeError('No matching exception for %s' % name)
return klass | Find the exception class by name |
def p_expr_GT_expr(p):
""" expr : expr GT expr
"""
p[0] = make_binary(p.lineno(2), 'GT', p[1], p[3], lambda x, y: x > y) | expr : expr GT expr |
def Increment(self, delta, fields=None):
"""Increments counter value by a given delta."""
if delta < 0:
raise ValueError(
"Counter increment should not be < 0 (received: %d)" % delta)
self._metric_values[_FieldsToKey(fields)] = self.Get(fields=fields) + delta | Increments counter value by a given delta. |
def path_in_cache(self, filename, metahash):
"""Generates the path to a file in the mh cache.
The generated path does not imply the file's existence!
Args:
filename: Filename relative to buildroot
rule: A targets.SomeBuildRule object
metahash: hash object
"""
cpath = self._genpath(filename, metahash)
if os.path.exists(cpath):
return cpath
else:
raise CacheMiss | Generates the path to a file in the mh cache.
The generated path does not imply the file's existence!
Args:
filename: Filename relative to buildroot
rule: A targets.SomeBuildRule object
metahash: hash object |
def check_attr(node, n):
""" Check if ATTR has to be normalized
after this instruction has been translated
to intermediate code.
"""
if len(node.children) > n:
return node.children[n] | Check if ATTR has to be normalized
after this instruction has been translated
to intermediate code. |
def escape(msg):
"""Takes a raw IRC message and returns a girc-escaped message."""
msg = msg.replace(escape_character, 'girc-escaped-character')
for escape_key, irc_char in format_dict.items():
msg = msg.replace(irc_char, escape_character + escape_key)
# convert colour codes
new_msg = ''
while len(msg):
if msg.startswith(escape_character + 'c'):
new_msg += msg[:2]
msg = msg[2:]
if not len(msg):
new_msg += '[]'
continue
colours, msg = extract_irc_colours(msg)
new_msg += colours
else:
new_msg += msg[0]
msg = msg[1:]
new_msg = new_msg.replace('girc-escaped-character', escape_character + escape_character)
return new_msg | Takes a raw IRC message and returns a girc-escaped message. |
def get_encoding_name(self, encoding):
"""Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437.
"""
encoding = CodePages.get_encoding_name(encoding)
if encoding not in self.codepages:
raise ValueError((
'Encoding "{}" cannot be used for the current profile. '
'Valid encodings are: {}'
).format(encoding, ','.join(self.codepages.keys())))
return encoding | Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437. |
def _retrieve_config_xml(config_xml, saltenv):
'''
Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path.
'''
ret = __salt__['cp.cache_file'](config_xml, saltenv)
if not ret:
raise CommandExecutionError('Failed to retrieve {0}'.format(config_xml))
return ret | Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path. |
def _fill_get_item_cache(self, catalog, key):
"""
get from redis, cache locally then return
:param catalog: catalog name
:param key:
:return:
"""
lang = self._get_lang()
keylist = self.get_all(catalog)
self.ITEM_CACHE[lang][catalog] = dict([(i['value'], i['name']) for i in keylist])
return self.ITEM_CACHE[lang][catalog].get(key) | get from redis, cache locally then return
:param catalog: catalog name
:param key:
:return: |
def merge_dicts(d1, d2, _path=None):
"""
Merge dictionary d2 into d1, overriding entries in d1 with values from d2.
d1 is mutated.
_path is for internal, recursive use.
"""
if _path is None:
_path = ()
if isinstance(d1, dict) and isinstance(d2, dict):
for k, v in d2.items():
if isinstance(v, MissingValue) and v.name is None:
v.name = '.'.join(_path + (k,))
if isinstance(v, DeletedValue):
d1.pop(k, None)
elif k not in d1:
if isinstance(v, dict):
d1[k] = merge_dicts({}, v, _path + (k,))
else:
d1[k] = v
else:
if isinstance(d1[k], dict) and isinstance(v, dict):
d1[k] = merge_dicts(d1[k], v, _path + (k,))
elif isinstance(d1[k], list) and isinstance(v, list):
# Lists are only supported as leaves
d1[k] += v
elif isinstance(d1[k], MissingValue):
d1[k] = v
elif d1[k] is None:
d1[k] = v
elif type(d1[k]) == type(v):
d1[k] = v
else:
raise TypeError('Refusing to replace a %s with a %s'
% (type(d1[k]), type(v)))
else:
raise TypeError('Cannot merge a %s with a %s' % (type(d1), type(d2)))
return d1 | Merge dictionary d2 into d1, overriding entries in d1 with values from d2.
d1 is mutated.
_path is for internal, recursive use. |
def startswith(haystack, prefix):
"""
py3 comp startswith
:param haystack:
:param prefix:
:return:
"""
if haystack is None:
return None
if sys.version_info[0] < 3:
return haystack.startswith(prefix)
return to_bytes(haystack).startswith(to_bytes(prefix)) | py3 comp startswith
:param haystack:
:param prefix:
:return: |
def remove_nans_1D(*args) -> tuple:
"""Remove nans in a set of 1D arrays.
Removes indicies in all arrays if any array is nan at that index.
All input arrays must have the same size.
Parameters
----------
args : 1D arrays
Returns
-------
tuple
Tuple of 1D arrays in same order as given, with nan indicies removed.
"""
vals = np.isnan(args[0])
for a in args:
vals |= np.isnan(a)
return tuple(np.array(a)[~vals] for a in args) | Remove nans in a set of 1D arrays.
Removes indicies in all arrays if any array is nan at that index.
All input arrays must have the same size.
Parameters
----------
args : 1D arrays
Returns
-------
tuple
Tuple of 1D arrays in same order as given, with nan indicies removed. |
def query_edges_by_pubmed_identifiers(self, pubmed_identifiers: List[str]) -> List[Edge]:
"""Get all edges annotated to the documents identified by the given PubMed identifiers."""
fi = and_(Citation.type == CITATION_TYPE_PUBMED, Citation.reference.in_(pubmed_identifiers))
return self.session.query(Edge).join(Evidence).join(Citation).filter(fi).all() | Get all edges annotated to the documents identified by the given PubMed identifiers. |
def ks_synth(freq):
"""
Synthesize the given frequency into a Stream by using a model based on
Karplus-Strong.
"""
ks_mem = (sum(lz.sinusoid(x * freq) for x in [1, 3, 9]) +
lz.white_noise() + lz.Stream(-1, 1)) / 5
return lz.karplus_strong(freq, memory=ks_mem) | Synthesize the given frequency into a Stream by using a model based on
Karplus-Strong. |
def get_author_tags(index_page):
"""
Parse `authors` from HTML ``<meta>`` and dublin core.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of :class:`.SourceString` objects.
"""
dom = dhtmlparser.parseString(index_page)
authors = [
get_html_authors(dom),
get_dc_authors(dom),
]
return sum(authors, []) | Parse `authors` from HTML ``<meta>`` and dublin core.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of :class:`.SourceString` objects. |
def transform(self, attrs):
"""Perform all actions on a given attribute dict."""
self.collect(attrs)
self.add_missing_implementations()
self.fill_attrs(attrs) | Perform all actions on a given attribute dict. |
def diff_commonPrefix(self, text1, text2):
"""Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: https://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid | Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string. |
def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
args = parser.parse_args()
if args.order < 0 or args.order > 5:
parser.error('The order has to be a number between 0 and 5.')
return args | Provides additional validation of the arguments collected by argparse. |
def alignment_changed(self, settings, key, user_data):
"""If the gconf var window_halignment be changed, this method will
be called and will call the move function in guake.
"""
RectCalculator.set_final_window_rect(self.settings, self.guake.window)
self.guake.set_tab_position()
self.guake.force_move_if_shown() | If the gconf var window_halignment be changed, this method will
be called and will call the move function in guake. |
def makeAudibleSong(self):
"""Use mass to render wav soundtrack.
"""
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav") | Use mass to render wav soundtrack. |
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message %s() has no "%s" field.' %
(message_descriptor.name, field_name))
if field in self._fields:
# To match the C++ implementation, we need to invalidate iterators
# for map fields when ClearField() happens.
if hasattr(self._fields[field], 'InvalidateIterators'):
self._fields[field].InvalidateIterators()
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField | Helper for _AddMessageMethods(). |
def login_to_portal(username, password, client, retries=2, delay=0):
"""Log `username` into the MemberSuite Portal.
Returns a PortalUser object if successful, raises
LoginToPortalError if not.
Will retry logging in if a GeneralException occurs, up to `retries`.
Will pause `delay` seconds between retries.
"""
if not client.session_id:
client.request_session()
concierge_request_header = client.construct_concierge_header(
url=("http://membersuite.com/contracts/IConciergeAPIService/"
"LoginToPortal"))
attempts = 0
while attempts < retries:
if attempts:
time.sleep(delay)
result = client.client.service.LoginToPortal(
_soapheaders=[concierge_request_header],
portalUserName=username,
portalPassword=password)
login_to_portal_result = result["body"]["LoginToPortalResult"]
if login_to_portal_result["Success"]:
portal_user = login_to_portal_result["ResultValue"]["PortalUser"]
session_id = get_session_id(result=result)
return PortalUser(membersuite_object_data=portal_user,
session_id=session_id)
else:
attempts += 1
try:
error_code = login_to_portal_result[
"Errors"]["ConciergeError"][0]["Code"]
except IndexError: # Not a ConciergeError
continue
else:
if attempts < retries and error_code == "GeneralException":
continue
raise LoginToPortalError(result=result) | Log `username` into the MemberSuite Portal.
Returns a PortalUser object if successful, raises
LoginToPortalError if not.
Will retry logging in if a GeneralException occurs, up to `retries`.
Will pause `delay` seconds between retries. |
def ws_disconnect(message):
"""
Channels connection close.
Deregister the client
"""
language = message.channel_session['knocker']
gr = Group('knocker-{0}'.format(language))
gr.discard(message.reply_channel) | Channels connection close.
Deregister the client |
def wheel(self, package, options=None):
"""Creates a wheel of the given package from this virtual environment,
as specified in pip's package syntax or a tuple of ('name', 'ver'),
only if it is not already installed. Some valid examples:
'Django'
'Django==1.5'
('Django', '1.5')
The `options` is a list of strings that can be used to pass to
pip."""
if self.readonly:
raise VirtualenvReadonlyException()
if options is None:
options = []
if isinstance(package, tuple):
package = '=='.join(package)
if not self.is_installed('wheel'):
raise PackageWheelException((0, "Wheel package must be installed in the virtual environment", package))
if not isinstance(options, list):
raise ValueError("Options must be a list of strings.")
try:
self._execute_pip(['wheel', package] + options)
except subprocess.CalledProcessError as e:
raise PackageWheelException((e.returncode, e.output, package)) | Creates a wheel of the given package from this virtual environment,
as specified in pip's package syntax or a tuple of ('name', 'ver'),
only if it is not already installed. Some valid examples:
'Django'
'Django==1.5'
('Django', '1.5')
The `options` is a list of strings that can be used to pass to
pip. |
def get_help(self, is_category, item):
"""Sends documentation on <item> to <callback>.
This can be used for programmatically accessing documentation.
Keyword arguments:
is_category -- <bool>; Set this to <True> if <item> is for
getting documentation on a permission level and
<False> if <item> is for getting documentation on
a command.
item -- <str>; If <is_category> is <True>, this should be one of
<"core">, <"mod"> or <"admin"> to get
documentation on the commands specific to that
permission level. If <is_category> is <False>,
this should be the name of the command to get
documentation on.
"""
data = {"cmd": "help"}
if is_category:
data["category"] = item
else:
data["command"] = item
self._send_packet(data) | Sends documentation on <item> to <callback>.
This can be used for programmatically accessing documentation.
Keyword arguments:
is_category -- <bool>; Set this to <True> if <item> is for
getting documentation on a permission level and
<False> if <item> is for getting documentation on
a command.
item -- <str>; If <is_category> is <True>, this should be one of
<"core">, <"mod"> or <"admin"> to get
documentation on the commands specific to that
permission level. If <is_category> is <False>,
this should be the name of the command to get
documentation on. |
def ang2pix(nside, theta, phi, nest=False, lonlat=False):
"""Drop-in replacement for healpy `~healpy.pixelfunc.ang2pix`."""
lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat)
return lonlat_to_healpix(lon, lat, nside, order='nested' if nest else 'ring') | Drop-in replacement for healpy `~healpy.pixelfunc.ang2pix`. |
def registerItem(self, regItem):
""" Adds a ClassRegItem object to the registry.
"""
super(RtiRegistry, self).registerItem(regItem)
for ext in regItem.extensions:
self._registerExtension(ext, regItem) | Adds a ClassRegItem object to the registry. |
def add_media_description(self, media_description):
"""Adds a media_description.
arg: media_description (displayText): the new media_description
raise: InvalidArgument - ``media_description`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``media_description`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_media_descriptions_metadata().is_read_only():
raise NoAccess()
self.add_or_replace_value('mediaDescriptions', media_description) | Adds a media_description.
arg: media_description (displayText): the new media_description
raise: InvalidArgument - ``media_description`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``media_description`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
args_parser = argparse.ArgumentParser(
description='Validates an artifact definitions file.')
args_parser.add_argument(
'filename',
nargs='?',
action='store',
metavar='artifacts.yaml',
default=None,
help=('path of the file that contains the artifact '
'definitions.'))
options = args_parser.parse_args()
if not options.filename:
print('Source value is missing.')
print('')
args_parser.print_help()
print('')
return False
if not os.path.isfile(options.filename):
print('No such file: {0:s}'.format(options.filename))
print('')
return False
print('Validating: {0:s}'.format(options.filename))
validator = ArtifactDefinitionsValidator()
if not validator.CheckFile(options.filename):
print('FAILURE')
return False
print('SUCCESS')
return True | The main program function.
Returns:
bool: True if successful or False if not. |
def _FormatDescription(self, event):
"""Formats the description.
Args:
event (EventObject): event.
Returns:
str: formatted description field.
"""
date_time_string = timelib.Timestamp.CopyToIsoFormat(
event.timestamp, timezone=self._output_mediator.timezone)
timestamp_description = event.timestamp_desc or 'UNKNOWN'
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
description = '{0:s}; {1:s}; {2:s}'.format(
date_time_string, timestamp_description,
message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' '))
return self._SanitizeField(description) | Formats the description.
Args:
event (EventObject): event.
Returns:
str: formatted description field. |
def maximum_cut(G, sampler=None, **sampler_args):
"""Returns an approximate maximum cut.
Defines an Ising problem with ground states corresponding to
a maximum cut and uses the sampler to sample from it.
A maximum cut is a subset S of the vertices of G such that
the number of edges between S and the complementary subset
is as large as possible.
Parameters
----------
G : NetworkX graph
The graph on which to find a maximum cut.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
S : set
A maximum cut of G.
Example
-------
This example uses a sampler from
`dimod <https://github.com/dwavesystems/dimod>`_ to find a maximum cut
for a graph of a Chimera unit cell created using the `chimera_graph()`
function.
>>> import dimod
>>> import dwave_networkx as dnx
>>> samplerSA = dimod.SimulatedAnnealingSampler()
>>> G = dnx.chimera_graph(1, 1, 4)
>>> cut = dnx.maximum_cut(G, samplerSA)
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample.
"""
# In order to form the Ising problem, we want to increase the
# energy by 1 for each edge between two nodes of the same color.
# The linear biases can all be 0.
h = {v: 0. for v in G}
J = {(u, v): 1 for u, v in G.edges}
# draw the lowest energy sample from the sampler
response = sampler.sample_ising(h, J, **sampler_args)
sample = next(iter(response))
return set(v for v in G if sample[v] >= 0) | Returns an approximate maximum cut.
Defines an Ising problem with ground states corresponding to
a maximum cut and uses the sampler to sample from it.
A maximum cut is a subset S of the vertices of G such that
the number of edges between S and the complementary subset
is as large as possible.
Parameters
----------
G : NetworkX graph
The graph on which to find a maximum cut.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
S : set
A maximum cut of G.
Example
-------
This example uses a sampler from
`dimod <https://github.com/dwavesystems/dimod>`_ to find a maximum cut
for a graph of a Chimera unit cell created using the `chimera_graph()`
function.
>>> import dimod
>>> import dwave_networkx as dnx
>>> samplerSA = dimod.SimulatedAnnealingSampler()
>>> G = dnx.chimera_graph(1, 1, 4)
>>> cut = dnx.maximum_cut(G, samplerSA)
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample. |
def get_comments_data(self, slug):
"""
Returns a flat list of all comments in XML dump. Formatted as the JSON
output from Wordpress API.
Keys:
('content', 'slug', 'date', 'status', 'author', 'ID', 'parent')
date format: '%Y-%m-%dT%H:%M:%S'
author: {'username': 'Name', 'URL': ''}
"""
all_the_data = []
for item in self.chan.findall("item"):
if not item.find('{wp}post_name').text == slug:
continue
item_dict = self.item_dict(item)
if not item_dict or not item_dict.get('title'):
continue
slug = item_dict.get('{wp}post_name') or re.sub(item_dict['title'],' ','-')
for comment in item.findall("{wp}comment"):
comment = self.translate_wp_comment(comment)
comment['slug'] = slug
all_the_data.append(comment)
return all_the_data | Returns a flat list of all comments in XML dump. Formatted as the JSON
output from Wordpress API.
Keys:
('content', 'slug', 'date', 'status', 'author', 'ID', 'parent')
date format: '%Y-%m-%dT%H:%M:%S'
author: {'username': 'Name', 'URL': ''} |
def get_provider(self, name):
"""Allows for lazy instantiation of providers (Jinja2 templating is heavy, so only instantiate it if
necessary)."""
if name not in self.providers:
cls = self.provider_classes[name]
# instantiate the provider
self.providers[name] = cls(self)
return self.providers[name] | Allows for lazy instantiation of providers (Jinja2 templating is heavy, so only instantiate it if
necessary). |
def mdwarf_subtype_from_sdsscolor(ri_color, iz_color):
'''This calculates the M-dwarf subtype given SDSS `r-i` and `i-z` colors.
Parameters
----------
ri_color : float
The SDSS `r-i` color of the object.
iz_color : float
The SDSS `i-z` color of the object.
Returns
-------
(subtype, index1, index2) : tuple
`subtype`: if the star appears to be an M dwarf, will return an int
between 0 and 9 indicating its subtype, e.g. will return 4 for an M4
dwarf. If the object isn't an M dwarf, will return None
`index1`, `index2`: the M-dwarf color locus value and spread of this
object calculated from the `r-i` and `i-z` colors.
'''
# calculate the spectral type index and the spectral type spread of the
# object. sti is calculated by fitting a line to the locus in r-i and i-z
# space for M dwarfs in West+ 2007
if np.isfinite(ri_color) and np.isfinite(iz_color):
obj_sti = 0.875274*ri_color + 0.483628*(iz_color + 0.00438)
obj_sts = -0.483628*ri_color + 0.875274*(iz_color + 0.00438)
else:
obj_sti = np.nan
obj_sts = np.nan
# possible M star if sti is >= 0.666 but <= 3.4559
if (np.isfinite(obj_sti) and np.isfinite(obj_sts) and
(obj_sti > 0.666) and (obj_sti < 3.4559)):
# decide which M subclass object this is
if ((obj_sti > 0.6660) and (obj_sti < 0.8592)):
m_class = 'M0'
if ((obj_sti > 0.8592) and (obj_sti < 1.0822)):
m_class = 'M1'
if ((obj_sti > 1.0822) and (obj_sti < 1.2998)):
m_class = 'M2'
if ((obj_sti > 1.2998) and (obj_sti < 1.6378)):
m_class = 'M3'
if ((obj_sti > 1.6378) and (obj_sti < 2.0363)):
m_class = 'M4'
if ((obj_sti > 2.0363) and (obj_sti < 2.2411)):
m_class = 'M5'
if ((obj_sti > 2.2411) and (obj_sti < 2.4126)):
m_class = 'M6'
if ((obj_sti > 2.4126) and (obj_sti < 2.9213)):
m_class = 'M7'
if ((obj_sti > 2.9213) and (obj_sti < 3.2418)):
m_class = 'M8'
if ((obj_sti > 3.2418) and (obj_sti < 3.4559)):
m_class = 'M9'
else:
m_class = None
return m_class, obj_sti, obj_sts | This calculates the M-dwarf subtype given SDSS `r-i` and `i-z` colors.
Parameters
----------
ri_color : float
The SDSS `r-i` color of the object.
iz_color : float
The SDSS `i-z` color of the object.
Returns
-------
(subtype, index1, index2) : tuple
`subtype`: if the star appears to be an M dwarf, will return an int
between 0 and 9 indicating its subtype, e.g. will return 4 for an M4
dwarf. If the object isn't an M dwarf, will return None
`index1`, `index2`: the M-dwarf color locus value and spread of this
object calculated from the `r-i` and `i-z` colors. |
def _has_branch(branch):
"""Return True if the target branch exists."""
ret = temple.utils.shell('git rev-parse --verify {}'.format(branch),
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
check=False)
return ret.returncode == 0 | Return True if the target branch exists. |
def start_log_child(self):
'''Start the logging child process.'''
self.stop_log_child()
gconfig = yakonfig.get_global_config()
read_end, write_end = os.pipe()
pid = os.fork()
if pid == 0:
# We are the child
self.clear_signal_handlers()
os.close(write_end)
yakonfig.clear_global_config()
self.log_spewer(gconfig, read_end)
sys.exit(0)
else:
# We are the parent
self.debug('children', 'new log child with pid {0}'.format(pid))
self.log_child = pid
os.close(read_end)
self.log_fd = write_end | Start the logging child process. |
def get(self):
'''This handles GET requests to the index page.
TODO: provide the correct baseurl from the checkplotserver options dict,
so the frontend JS can just read that off immediately.
'''
# generate the project's list of checkplots
project_checkplots = self.currentproject['checkplots']
project_checkplotbasenames = [os.path.basename(x)
for x in project_checkplots]
project_checkplotindices = range(len(project_checkplots))
# get the sortkey and order
project_cpsortkey = self.currentproject['sortkey']
if self.currentproject['sortorder'] == 'asc':
project_cpsortorder = 'ascending'
elif self.currentproject['sortorder'] == 'desc':
project_cpsortorder = 'descending'
# get the filterkey and condition
project_cpfilterstatements = self.currentproject['filterstatements']
self.render('cpindex.html',
project_checkplots=project_checkplots,
project_cpsortorder=project_cpsortorder,
project_cpsortkey=project_cpsortkey,
project_cpfilterstatements=project_cpfilterstatements,
project_checkplotbasenames=project_checkplotbasenames,
project_checkplotindices=project_checkplotindices,
project_checkplotfile=self.cplistfile,
readonly=self.readonly,
baseurl=self.baseurl) | This handles GET requests to the index page.
TODO: provide the correct baseurl from the checkplotserver options dict,
so the frontend JS can just read that off immediately. |
def _parse_spectral_data(
self,
content,
TNSId):
"""*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
"""
self.log.info('starting the ``_parse_spectral_data`` method')
specData = []
relatedFilesTable = []
# CLASSIFICATION BLOCK
classBlock = re.search(
r"""<tr class=[^\n]*?Classification reportings.*$""",
content,
flags=re.S # re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r"""<tr class="row-[^"]*"><td class="cell-id">.*?</tbody>\s*</table>\s*</div></td> </tr>\s*</tbody>\s*</table>\s*</div></td> </tr>""",
classBlock,
flags=re.S #
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r"""<tr class="row.*?time_received">(?P<reportAddedDate>[^<]*).*?user_name">(?P<TNSuser>[^<]*).*?classifier_name">(?P<reporters>[^<]*).*?source_group_name">(?P<survey>[^<]*).*?-type">(?P<specType>[^<]*).*?-redshift">(?P<transRedshift>[^<]*).*?-related_files">(?P<relatedFiles>[^<]*).*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<sourceComment>[^<]*)</td>""",
r.group(),
flags=re.S # re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header['sourceComment']
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace('"', "'")[0:750]
spec = re.finditer(
r"""<tr class="class-results-.*?-obsdate">(?P<obsdate>[^<]*).*?-tel_inst">(?P<telescope>[^<]*).*?-exptime">(?P<exptime>[^<]*).*?-observer">(?P<sender>[^<]*).*?-reducer">(?P<reducer>[^<]*).*?-source_group_name">(?P<survey>[^<]*).*?-asciifile">(.*?<a href="(?P<filepath>[^"]*)".*?</a>)?.*?-fitsfile">(.*?<a href="(?P<fitsFilepath>[^"]*)".*?</a>)?.*?-groups">(?P<surveyGroup>[^<]*).*?-remarks">(?P<remarks>[^<]*)""",
r.group(),
flags=0 # re.S
)
filesAppended = False
for s in spec:
s = s.groupdict()
del s["sender"]
del s["surveyGroup"]
del s["reducer"]
if not self.comments:
del s["remarks"]
else:
s["remarks"] = s["remarks"].replace('"', "'")[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
# ORDER THE DICTIONARY FOR THIS ROW OF
# RESULTS
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
# ORDER THE DICTIONARY FOR THIS ROW OF RESULTS
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info('completed the ``_parse_spectral_data`` method')
return specData, relatedFilesTable | *parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files |
def _replace(variables, match):
"""
Return the appropriate replacement for `match` using the passed variables
"""
expression = match.group(1)
# Look-up chars and functions for the specified operator
(prefix_char, separator_char, split_fn, escape_fn,
format_fn) = operator_map.get(expression[0], defaults)
replacements = []
for key, modify_fn, explode in split_fn(expression):
if key in variables:
variable = modify_fn(variables[key])
replacement = format_fn(
explode, separator_char, escape_fn, key, variable)
replacements.append(replacement)
if not replacements:
return ''
return prefix_char + separator_char.join(replacements) | Return the appropriate replacement for `match` using the passed variables |
def download(path, source_url):
"""
Download a file to a given path from a given URL, if it does not exist.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_url: str
The URL from which to download the file
Returns
-------
str
The path of the file
"""
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if not os.path.exists(path):
print('Downloading {} to {}'.format(source_url, path))
filename = source_url.split('/')[-1]
def _progress(count, block_size, total_size):
sys.stdout.write('\rDownloading {} {:.2%}'.format(
filename, float(count * block_size) / float(total_size)))
sys.stdout.flush()
try:
urlretrieve(source_url, path, reporthook=_progress)
except:
sys.stdout.write('\r')
# Exception; remove any partially downloaded file and re-raise
if os.path.exists(path):
os.remove(path)
raise
sys.stdout.write('\r')
return path | Download a file to a given path from a given URL, if it does not exist.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_url: str
The URL from which to download the file
Returns
-------
str
The path of the file |
def set_zone_order(self, zone_ids):
""" reorder zones per the passed in list
:param zone_ids:
:return:
"""
reordered_zones = []
current_zone_ids = [z['id'] for z in self.my_osid_object_form._my_map['zones']]
if set(zone_ids) != set(current_zone_ids):
raise IllegalState('zone_ids do not match existing zones')
for zone_id in zone_ids:
for current_zone in self.my_osid_object_form._my_map['zones']:
if zone_id == current_zone['id']:
reordered_zones.append(current_zone)
break
self.my_osid_object_form._my_map['zones'] = reordered_zones | reorder zones per the passed in list
:param zone_ids:
:return: |
def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check if we have the tped and the tfam files
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Check the indep-pairwise option
# The two first must be int, the last one float
try:
for i in xrange(2):
tmp = int(args.indep_pairwise[i])
tmp = float(args.indep_pairwise[2])
except ValueError:
msg = "indep-pairwise: need INT INT FLOAT"
raise ProgramError(msg)
# Check the maf value
tmpMAF = None
try:
tmpMAF = float(args.maf)
except ValueError:
msg = "maf: must be a float, not %s" % args.maf
raise ProgramError(msg)
if (tmpMAF > 0.5) or (tmpMAF < 0.0):
msg = "maf: must be between 0.0 and 0.5, not %s" % args.maf
raise ProgramError(msg)
# Check the number of line per file
if args.line_per_file_for_sge < 1:
msg = "line-per-file-for-sge: must be above 0, not " \
"%d" % args.line_per_file_for_sge
raise ProgramError(msg)
# Check the minimum number of SNPs
if args.min_nb_snp < 1:
msg = "min-nb-snp: must be above 1"
raise ProgramError(msg)
return True | Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1. |
def _set_trunk_vlans(self, v, load=False):
"""
Setter method for trunk_vlans, mapped from YANG variable /interfaces/interface/aggregation/switched_vlan/state/trunk_vlans (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_vlans() directly.
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..65535"]},
int_size=16,
),
restriction_dict={"range": ["1..4094"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.((409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])|\\*)"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.((409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])|\\*)"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(\\*|(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9]))\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])"
},
),
]
),
is_leaf=False,
yang_name="trunk-vlans",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/vlan",
defining_module="openconfig-vlan",
yang_type="union",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """trunk_vlans must be of a type compatible with union""",
"defined-type": "openconfig-vlan:union",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.((409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])|\\*)'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.((409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])|\\*)'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(\\*|(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9]))\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)""",
}
)
self.__trunk_vlans = t
if hasattr(self, "_set"):
self._set() | Setter method for trunk_vlans, mapped from YANG variable /interfaces/interface/aggregation/switched_vlan/state/trunk_vlans (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_vlans() directly.
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y. |
def unset_env(self, key):
"""Removes an environment variable using the prepended app_name convention with `key`."""
os.environ.pop(make_env_key(self.appname, key), None)
self._registered_env_keys.discard(key)
self._clear_memoization() | Removes an environment variable using the prepended app_name convention with `key`. |
def backward(A, pobs, T=None, beta_out=None, dtype=np.float32):
"""Compute all backward coefficients. With scaling!
Parameters
----------
A : ndarray((N,N), dtype = float)
transition matrix of the hidden states
pobs : ndarray((T,N), dtype = float)
pobs[t,i] is the observation probability for observation at time t given hidden state i
beta_out : ndarray((T,N), dtype = float), optional, default = None
containter for the beta result variables. If None, a new container will be created.
dtype : type, optional, default = np.float32
data type of the result.
Returns
-------
beta : ndarray((T,N), dtype = float), optional, default = None
beta[t,i] is the ith backward coefficient of time t. These can be
used in many different algorithms related to HMMs.
"""
# set T
if T is None:
T = pobs.shape[0] # if not set, use the length of pobs as trajectory length
elif T > pobs.shape[0]:
raise ValueError('T must be at most the length of pobs.')
# set N
N = A.shape[0]
# initialize output if necessary
if beta_out is None:
beta_out = np.zeros((T, N), dtype=dtype)
elif T > beta_out.shape[0]:
raise ValueError('beta_out must at least have length T in order to fit trajectory.')
# initialization
beta_out[T-1, :] = 1.0
# scaling factor
scale = np.sum(beta_out[T-1, :])
# scale
beta_out[T-1, :] /= scale
# induction
for t in range(T-2, -1, -1):
# beta_i(t) = sum_j A_i,j * beta_j(t+1) * B_j,ob(t+1)
np.dot(A, beta_out[t+1, :] * pobs[t+1, :], out=beta_out[t, :])
# scaling factor
scale = np.sum(beta_out[t, :])
# scale
beta_out[t, :] /= scale
return beta_out | Compute all backward coefficients. With scaling!
Parameters
----------
A : ndarray((N,N), dtype = float)
transition matrix of the hidden states
pobs : ndarray((T,N), dtype = float)
pobs[t,i] is the observation probability for observation at time t given hidden state i
beta_out : ndarray((T,N), dtype = float), optional, default = None
containter for the beta result variables. If None, a new container will be created.
dtype : type, optional, default = np.float32
data type of the result.
Returns
-------
beta : ndarray((T,N), dtype = float), optional, default = None
beta[t,i] is the ith backward coefficient of time t. These can be
used in many different algorithms related to HMMs. |
def finalize(self) -> None:
''' split statement and task by last directive '''
self.wrap_script()
if not self.statements:
self.task = ''
return
# handle tasks
input_directive = [
idx for idx, statement in enumerate(self.statements)
if statement[0] == ':' and statement[1] == 'input'
]
task_directive = [
idx for idx, statement in enumerate(self.statements)
if statement[0] == ':' and statement[1] == 'task'
]
if len(task_directive) > 1:
raise ValueError('Only one task statement is allowed in a step')
# handle parameter
for idx, statement in enumerate(self.statements):
if statement[0] == ':' and statement[1] == 'parameter':
if task_directive and task_directive[0] < idx:
raise ValueError(
'Parameter statement is not allowed in tasks.')
if '=' not in statement[2]:
if ':' in statement[2]:
if not is_type_hint(statement[2]):
raise ValueError(
f'Invalid type trait in parameter specification {statement[2]}'
)
name, value = statement[2].split(':')
else:
name = statement[2]
value = 'str'
else:
name, value = statement[2].split('=', 1)
# ignore type trait if a default value is specified
name = name.split(':')[0]
name = name.strip()
if name.startswith('_'):
raise ValueError(
f'Invalid parameter name {name}: names with leading underscore is not allowed.'
)
if not value.strip():
raise ValueError(
f'{self.step_name()}: Invalid parameter definition: {statement[2]}'
)
# there is a possibility that value contains # so sos_handle_parameter(name, val # aaa) will fail
self.statements[idx] = [
'!',
f'#begin_parameter {name}\n{name} = sos_handle_parameter_({name.strip()!r}, {value}\n) #end_parameter {name}\n',
statement[2].strip()
]
self.parameters[name] = (value, statement[3])
if input_directive and input_directive[0] < idx:
self.substep_parameters.add(name)
# handle tasks
if not task_directive:
self.task = ''
else:
start_task = task_directive[0] + 1
# convert statement to task
self.task = ''
for statement in self.statements[start_task:]:
if statement[0] == ':':
if statement[1] in ('input', 'output', 'depends'):
raise ValueError(
f'{self.step_name()}: Step task should be defined as the last item in a SoS step'
)
elif statement[1] == 'task':
raise ValueError(
f'{self.step_name()}: Only one task is allowed for a step'
)
elif statement[1] == 'parameter':
raise ValueError(
f'{self.step_name()}: Parameters should be defined before step task'
)
# ignore ...
self.task += '\n'
else:
self.task += statement[1]
self.task_params = self.statements[task_directive[0]][2]
self.statements = self.statements[:task_directive[0]]
# merge multiple statments at the end
if len(self.statements) > 1 and self.statements[-1][0] == '!':
starting = len(self.statements) - 1
for idx in range(starting - 1, -1, -1):
if self.statements[idx][0] == '!':
starting = idx
else:
break
# merge
for idx in range(starting + 1, len(self.statements)):
self.statements[starting][1] += self.statements[idx][1]
# remove the rest of the statements
self.statements = self.statements[:starting + 1]
#
# auto provides #859
if not any(opt in self.options for opt in ('provides', 'shared')) and \
len([x for x in self.statements if x[0] == ':' and x[1] == 'output']) == 1:
output_stmt = [
x for x in self.statements if x[0] == ':' and x[1] == 'output'
][0][2]
output_names = get_names_of_kwargs(output_stmt)
self.options['namedprovides'] = repr(output_names) | split statement and task by last directive |
def _parseStylesheet(self, src):
"""stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
;
"""
# FIXME: BYTES to STR
if type(src) == six.binary_type:
src=six.text_type(src)
# Get rid of the comments
src = self.re_comment.sub('', src)
# [ CHARSET_SYM S* STRING S* ';' ]?
src = self._parseAtCharset(src)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
# [ import [S|CDO|CDC]* ]*
src, stylesheetImports = self._parseAtImports(src)
# [ namespace [S|CDO|CDC]* ]*
src = self._parseAtNamespace(src)
stylesheetElements = []
# [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]*
while src: # due to ending with ]*
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None and atResults != NotImplemented:
stylesheetElements.extend(atResults)
else:
# ruleset
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports)
return src, stylesheet | stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
; |
def scheduled_times(self, earliest_time='now', latest_time='+1h'):
"""Returns the times when this search is scheduled to run.
By default this method returns the times in the next hour. For different
time ranges, set *earliest_time* and *latest_time*. For example,
for all times in the last day use "earliest_time=-1d" and
"latest_time=now".
:param earliest_time: The earliest time.
:type earliest_time: ``string``
:param latest_time: The latest time.
:type latest_time: ``string``
:return: The list of search times.
"""
response = self.get("scheduled_times",
earliest_time=earliest_time,
latest_time=latest_time)
data = self._load_atom_entry(response)
rec = _parse_atom_entry(data)
times = [datetime.fromtimestamp(int(t))
for t in rec.content.scheduled_times]
return times | Returns the times when this search is scheduled to run.
By default this method returns the times in the next hour. For different
time ranges, set *earliest_time* and *latest_time*. For example,
for all times in the last day use "earliest_time=-1d" and
"latest_time=now".
:param earliest_time: The earliest time.
:type earliest_time: ``string``
:param latest_time: The latest time.
:type latest_time: ``string``
:return: The list of search times. |
def isbinary(*args):
"""Checks if value can be part of binary/bitwise operations."""
return all(map(lambda c: isnumber(c) or isbool(c), args)) | Checks if value can be part of binary/bitwise operations. |
def from_legacy_urlsafe(cls, urlsafe):
"""Convert urlsafe string to :class:`~google.cloud.datastore.key.Key`.
This is intended to work with the "legacy" representation of a
datastore "Key" used within Google App Engine (a so-called
"Reference"). This assumes that ``urlsafe`` was created within an App
Engine app via something like ``ndb.Key(...).urlsafe()``.
:type urlsafe: bytes or unicode
:param urlsafe: The base64 encoded (ASCII) string corresponding to a
datastore "Key" / "Reference".
:rtype: :class:`~google.cloud.datastore.key.Key`.
:returns: The key corresponding to ``urlsafe``.
"""
urlsafe = _to_bytes(urlsafe, encoding="ascii")
padding = b"=" * (-len(urlsafe) % 4)
urlsafe += padding
raw_bytes = base64.urlsafe_b64decode(urlsafe)
reference = _app_engine_key_pb2.Reference()
reference.ParseFromString(raw_bytes)
project = _clean_app(reference.app)
namespace = _get_empty(reference.name_space, u"")
_check_database_id(reference.database_id)
flat_path = _get_flat_path(reference.path)
return cls(*flat_path, project=project, namespace=namespace) | Convert urlsafe string to :class:`~google.cloud.datastore.key.Key`.
This is intended to work with the "legacy" representation of a
datastore "Key" used within Google App Engine (a so-called
"Reference"). This assumes that ``urlsafe`` was created within an App
Engine app via something like ``ndb.Key(...).urlsafe()``.
:type urlsafe: bytes or unicode
:param urlsafe: The base64 encoded (ASCII) string corresponding to a
datastore "Key" / "Reference".
:rtype: :class:`~google.cloud.datastore.key.Key`.
:returns: The key corresponding to ``urlsafe``. |
def schedule(cron, name, params):
'''
Schedule the job <name> to run periodically given the <cron> expression.
Jobs args and kwargs are given as parameters without dashes.
Ex:
udata job schedule "* * 0 * *" my-job arg1 arg2 key1=value key2=value
'''
if name not in celery.tasks:
exit_with_error('Job %s not found', name)
args = [p for p in params if '=' not in p]
kwargs = dict(p.split('=') for p in params if '=' in p)
label = 'Job {0}'.format(job_label(name, args, kwargs))
try:
task = PeriodicTask.objects.get(task=name, args=args, kwargs=kwargs)
task.modify(crontab=PeriodicTask.Crontab.parse(cron))
except PeriodicTask.DoesNotExist:
task = PeriodicTask.objects.create(
task=name,
name=label,
description='Periodic {0} job'.format(name),
enabled=True,
args=args,
kwargs=kwargs,
crontab=PeriodicTask.Crontab.parse(cron),
)
msg = 'Scheduled {label} with the following crontab: {cron}'
log.info(msg.format(label=label, cron=task.schedule_display)) | Schedule the job <name> to run periodically given the <cron> expression.
Jobs args and kwargs are given as parameters without dashes.
Ex:
udata job schedule "* * 0 * *" my-job arg1 arg2 key1=value key2=value |
def subdomain_check_pending(self, subrec, atlasdb_path, cur=None):
"""
Determine whether or not a subdomain record's domain is missing zone files
(besides the ones we expect) that could invalidate its history.
"""
_, _, domain = is_address_subdomain(subrec.get_fqn())
sql = 'SELECT missing FROM {} WHERE domain = ? ORDER BY parent_zonefile_index DESC LIMIT 1;'.format(self.subdomain_table)
args = (domain,)
cursor = None
if cur is None:
cursor = self.conn.cursor()
else:
cursor= cur
rows = db_query_execute(cursor, sql, args)
missing_str = ""
try:
rowdata = rows.fetchone()
assert rowdata
missing_str = rowdata['missing']
except:
pass
known_missing = [int(i) for i in missing_str.split(',')] if missing_str is not None and len(missing_str) > 0 else []
num_missing = atlasdb_get_zonefiles_missing_count_by_name(domain, indexes_exclude=known_missing, path=atlasdb_path)
if num_missing > 0:
log.debug("Subdomain is missing {} zone files: {}".format(num_missing, subrec))
return num_missing > 0 | Determine whether or not a subdomain record's domain is missing zone files
(besides the ones we expect) that could invalidate its history. |
def staticfy(html_file, args=argparse.ArgumentParser()):
"""
Staticfy method.
Loop through each line of the file and replaces the old links
"""
# unpack arguments
static_endpoint = args.static_endpoint or 'static'
framework = args.framework or os.getenv('STATICFY_FRAMEWORK', 'flask')
add_tags = args.add_tags or {}
exc_tags = args.exc_tags or {}
namespace = args.namespace or {}
# default tags
tags = {('img', 'src'), ('link', 'href'), ('script', 'src')}
# generate additional_tags
add_tags = {(tag, attr) for tag, attr in add_tags.items()}
tags.update(add_tags)
# remove tags if any was specified
exc_tags = {(tag, attr) for tag, attr in exc_tags.items()}
tags = tags - exc_tags
# get elements we're interested in
matches = get_elements(html_file, tags)
# transform old links to new links
transformed = transform(matches, framework, namespace, static_endpoint)
return replace_lines(html_file, transformed) | Staticfy method.
Loop through each line of the file and replaces the old links |
def strel_disk(radius):
"""Create a disk structuring element for morphological operations
radius - radius of the disk
"""
iradius = int(radius)
x,y = np.mgrid[-iradius:iradius+1,-iradius:iradius+1]
radius2 = radius * radius
strel = np.zeros(x.shape)
strel[x*x+y*y <= radius2] = 1
return strel | Create a disk structuring element for morphological operations
radius - radius of the disk |
def normalize_name(self, header_name):
""" Return header name as it is recommended (required) by corresponding http protocol. For
protocol switching use :meth:`.WHTTPHeaders.switch_name_style` method.
All current available protocols (0.9-2) compare header names in a case-insensitive fashion. However,
previous protocol versions (0.9-1.1) recommends to use camel-case names like Foo or Foo-Bar. But
HTTP/2 (RFC 7540) strictly requires lowercase only header names.
:param header_name: name to convert
:return: str
"""
if self.__normalization_mode in ['0.9', '1.0', '1.1']:
return '-'.join([x.capitalize() for x in header_name.split('-')])
elif self.__normalization_mode == '2':
return header_name.lower()
raise RuntimeError('Internal error: unknown http protocol: %s' % self.__normalization_mode) | Return header name as it is recommended (required) by corresponding http protocol. For
protocol switching use :meth:`.WHTTPHeaders.switch_name_style` method.
All current available protocols (0.9-2) compare header names in a case-insensitive fashion. However,
previous protocol versions (0.9-1.1) recommends to use camel-case names like Foo or Foo-Bar. But
HTTP/2 (RFC 7540) strictly requires lowercase only header names.
:param header_name: name to convert
:return: str |
def delete_user(self, username):
"""Deletes a JIRA User.
:param username: Username to delete
:type username: str
:return: Success of user deletion
:rtype: bool
"""
url = self._options['server'] + '/rest/api/latest/user/?username=%s' % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False | Deletes a JIRA User.
:param username: Username to delete
:type username: str
:return: Success of user deletion
:rtype: bool |
def sub_retab(match):
r"""Remove all tabs and convert them into spaces.
PARAMETERS:
match -- regex match; uses re_retab pattern: \1 is text before tab,
\2 is a consecutive string of tabs.
A simple substitution of 4 spaces would result in the following:
to\tlive # original
to live # simple substitution
Instead, we convert tabs like the following:
to\tlive # original
to live # the tab *looks* like two spaces, so we convert
# it to two spaces
"""
before = match.group(1)
tabs = len(match.group(2))
return before + (' ' * (TAB_SIZE * tabs - len(before) % TAB_SIZE)) | r"""Remove all tabs and convert them into spaces.
PARAMETERS:
match -- regex match; uses re_retab pattern: \1 is text before tab,
\2 is a consecutive string of tabs.
A simple substitution of 4 spaces would result in the following:
to\tlive # original
to live # simple substitution
Instead, we convert tabs like the following:
to\tlive # original
to live # the tab *looks* like two spaces, so we convert
# it to two spaces |
def merge_up(self, target_branch=None, feature_branch=None, delete=True, create=True):
"""
Merge a change into one or more release branches and the default branch.
:param target_branch: The name of the release branch where merging of
the feature branch starts (a string or
:data:`None`, defaults to
:attr:`current_branch`).
:param feature_branch: The feature branch to merge in (any value
accepted by :func:`coerce_feature_branch()`).
:param delete: :data:`True` (the default) to delete or close the
feature branch after it is merged, :data:`False`
otherwise.
:param create: :data:`True` to automatically create the target branch
when it doesn't exist yet, :data:`False` otherwise.
:returns: If `feature_branch` is given the global revision id of the
feature branch is returned, otherwise the global revision id
of the target branch (before any merges performed by
:func:`merge_up()`) is returned. If the target branch is
created by :func:`merge_up()` and `feature_branch` isn't
given then :data:`None` is returned.
:raises: The following exceptions can be raised:
- :exc:`~exceptions.TypeError` when `target_branch` and
:attr:`current_branch` are both :data:`None`.
- :exc:`~exceptions.ValueError` when the given target branch
doesn't exist (based on :attr:`branches`) and `create` is
:data:`False`.
- :exc:`~executor.ExternalCommandFailed` if a command fails.
"""
timer = Timer()
repository_was_created = self.create()
revision_to_merge = None
# Default the target branch to the current branch.
if not target_branch:
target_branch = self.current_branch
if not target_branch:
raise TypeError("You need to specify the target branch! (where merging starts)")
# Parse the feature branch specification.
feature_branch = coerce_feature_branch(feature_branch) if feature_branch else None
# Make sure we start with a clean working tree.
self.ensure_clean()
# Make sure we're up to date with our upstream repository (if any).
if not repository_was_created:
self.pull()
# Checkout or create the target branch.
logger.debug("Checking if target branch exists (%s) ..", target_branch)
if target_branch in self.branches:
self.checkout(revision=target_branch)
# Get the global revision id of the release branch we're about to merge.
revision_to_merge = self.find_revision_id(target_branch)
elif not create:
raise ValueError("The target branch %r doesn't exist!" % target_branch)
elif self.compiled_filter.match(target_branch):
self.create_release_branch(target_branch)
else:
self.create_branch(target_branch)
# Check if we need to merge in a feature branch.
if feature_branch:
if feature_branch.location:
# Pull in the feature branch.
self.pull(remote=feature_branch.location,
revision=feature_branch.revision)
# Get the global revision id of the feature branch we're about to merge.
revision_to_merge = self.find_revision_id(feature_branch.revision)
# Merge in the feature branch.
self.merge(revision=feature_branch.revision)
# Commit the merge.
self.commit(message="Merged %s" % feature_branch.expression)
# We skip merging up through release branches when the target branch is
# the default branch (in other words, there's nothing to merge up).
if target_branch != self.default_revision:
# Find the release branches in the repository.
release_branches = [release.revision.branch for release in self.ordered_releases]
logger.debug("Found %s: %s",
pluralize(len(release_branches), "release branch", "release branches"),
concatenate(release_branches))
# Find the release branches after the target branch.
later_branches = release_branches[release_branches.index(target_branch) + 1:]
logger.info("Found %s after target branch (%s): %s",
pluralize(len(later_branches), "release branch", "release branches"),
target_branch,
concatenate(later_branches))
# Determine the branches that need to be merged.
branches_to_upmerge = later_branches + [self.default_revision]
logger.info("Merging up from '%s' to %s: %s",
target_branch,
pluralize(len(branches_to_upmerge), "branch", "branches"),
concatenate(branches_to_upmerge))
# Merge the feature branch up through the selected branches.
merge_queue = [target_branch] + branches_to_upmerge
while len(merge_queue) >= 2:
from_branch = merge_queue[0]
to_branch = merge_queue[1]
logger.info("Merging '%s' into '%s' ..", from_branch, to_branch)
self.checkout(revision=to_branch)
self.merge(revision=from_branch)
self.commit(message="Merged %s" % from_branch)
merge_queue.pop(0)
# Check if we need to delete or close the feature branch.
if delete and feature_branch and self.is_feature_branch(feature_branch.revision):
# Delete or close the feature branch.
self.delete_branch(
branch_name=feature_branch.revision,
message="Closing feature branch %s" % feature_branch.revision,
)
# Update the working tree to the default branch.
self.checkout()
logger.info("Done! Finished merging up in %s.", timer)
return revision_to_merge | Merge a change into one or more release branches and the default branch.
:param target_branch: The name of the release branch where merging of
the feature branch starts (a string or
:data:`None`, defaults to
:attr:`current_branch`).
:param feature_branch: The feature branch to merge in (any value
accepted by :func:`coerce_feature_branch()`).
:param delete: :data:`True` (the default) to delete or close the
feature branch after it is merged, :data:`False`
otherwise.
:param create: :data:`True` to automatically create the target branch
when it doesn't exist yet, :data:`False` otherwise.
:returns: If `feature_branch` is given the global revision id of the
feature branch is returned, otherwise the global revision id
of the target branch (before any merges performed by
:func:`merge_up()`) is returned. If the target branch is
created by :func:`merge_up()` and `feature_branch` isn't
given then :data:`None` is returned.
:raises: The following exceptions can be raised:
- :exc:`~exceptions.TypeError` when `target_branch` and
:attr:`current_branch` are both :data:`None`.
- :exc:`~exceptions.ValueError` when the given target branch
doesn't exist (based on :attr:`branches`) and `create` is
:data:`False`.
- :exc:`~executor.ExternalCommandFailed` if a command fails. |
def get_genres(self):
"""
Grab genre URLs from iTunes Podcast preview
"""
page = r.get(ITUNES_GENRES_URL)
tree = html.fromstring(page.content)
elements = tree.xpath("//a[@class='top-level-genre']")
return [e.attrib['href'] for e in elements] | Grab genre URLs from iTunes Podcast preview |
def _filter_list_to_conjunction_expression(filter_list):
"""Convert a list of filters to an Expression that is the conjunction of all of them."""
if not isinstance(filter_list, list):
raise AssertionError(u'Expected `list`, Received: {}.'.format(filter_list))
if any((not isinstance(filter_block, Filter) for filter_block in filter_list)):
raise AssertionError(u'Expected list of Filter objects. Received: {}'.format(filter_list))
expression_list = [filter_block.predicate for filter_block in filter_list]
return expression_list_to_conjunction(expression_list) | Convert a list of filters to an Expression that is the conjunction of all of them. |
def title(self):
"""
The title of the course. If no entry in the namemap of the configuration is found a new entry is created with name=$STUD.IP_NAME + $SEMESTER_NAME
"""
name = c.namemap_lookup(self.id)
if name is None:
name = self._title + " " + client.get_semester_title(self)
c.namemap_set(self.id, name)
return secure_filename(name) | The title of the course. If no entry in the namemap of the configuration is found a new entry is created with name=$STUD.IP_NAME + $SEMESTER_NAME |
def setup_matchedfltr_workflow(workflow, science_segs, datafind_outs,
tmplt_banks, output_dir=None,
injection_file=None, tags=None):
'''
This function aims to be the gateway for setting up a set of matched-filter
jobs in a workflow. This function is intended to support multiple
different ways/codes that could be used for doing this. For now the only
supported sub-module is one that runs the matched-filtering by setting up
a serious of matched-filtering jobs, from one executable, to create
matched-filter triggers covering the full range of science times for which
there is data and a template bank file.
Parameters
-----------
Workflow : pycbc.workflow.core.Workflow
The workflow instance that the coincidence jobs will be added to.
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
datafind_outs : pycbc.workflow.core.FileList
An FileList of the datafind files that are needed to obtain the
data used in the analysis.
tmplt_banks : pycbc.workflow.core.FileList
An FileList of the template bank files that will serve as input
in this stage.
output_dir : path
The directory in which output will be stored.
injection_file : pycbc.workflow.core.File, optional (default=None)
If given the file containing the simulation file to be sent to these
jobs on the command line. If not given no file will be sent.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. An example might be ['BNSINJECTIONS'] or
['NOINJECTIONANALYSIS']. This will be used in output names.
Returns
-------
inspiral_outs : pycbc.workflow.core.FileList
A list of output files written by this stage. This *will not* contain
any intermediate products produced within this stage of the workflow.
If you require access to any intermediate products produced at this
stage you can call the various sub-functions directly.
'''
if tags is None:
tags = []
logging.info("Entering matched-filtering setup module.")
make_analysis_dir(output_dir)
cp = workflow.cp
# Parse for options in .ini file
mfltrMethod = cp.get_opt_tags("workflow-matchedfilter", "matchedfilter-method",
tags)
# Could have a number of choices here
if mfltrMethod == "WORKFLOW_INDEPENDENT_IFOS":
logging.info("Adding matched-filter jobs to workflow.")
if cp.has_option_tags("workflow-matchedfilter",
"matchedfilter-link-to-tmpltbank", tags):
if not cp.has_option_tags("workflow-tmpltbank",
"tmpltbank-link-to-matchedfilter", tags):
errMsg = "If using matchedfilter-link-to-tmpltbank, you should "
errMsg += "also use tmpltbank-link-to-matchedfilter."
logging.warn(errMsg)
linkToTmpltbank = True
else:
linkToTmpltbank = False
if cp.has_option_tags("workflow-matchedfilter",
"matchedfilter-compatibility-mode", tags):
if not linkToTmpltbank:
errMsg = "Compatibility mode requires that the "
errMsg += "matchedfilter-link-to-tmpltbank option is also set."
raise ValueError(errMsg)
if not cp.has_option_tags("workflow-tmpltbank",
"tmpltbank-compatibility-mode", tags):
errMsg = "If using compatibility mode it must be set both in "
errMsg += "the template bank and matched-filtering stages."
raise ValueError(errMsg)
compatibility_mode = True
else:
compatibility_mode = False
inspiral_outs = setup_matchedfltr_dax_generated(workflow, science_segs,
datafind_outs, tmplt_banks, output_dir,
injection_file=injection_file,
tags=tags,
link_to_tmpltbank=linkToTmpltbank,
compatibility_mode=compatibility_mode)
elif mfltrMethod == "WORKFLOW_MULTIPLE_IFOS":
logging.info("Adding matched-filter jobs to workflow.")
inspiral_outs = setup_matchedfltr_dax_generated_multi(workflow,
science_segs, datafind_outs, tmplt_banks,
output_dir, injection_file=injection_file,
tags=tags)
else:
errMsg = "Matched filter method not recognized. Must be one of "
errMsg += "WORKFLOW_INDEPENDENT_IFOS (currently only one option)."
raise ValueError(errMsg)
logging.info("Leaving matched-filtering setup module.")
return inspiral_outs | This function aims to be the gateway for setting up a set of matched-filter
jobs in a workflow. This function is intended to support multiple
different ways/codes that could be used for doing this. For now the only
supported sub-module is one that runs the matched-filtering by setting up
a serious of matched-filtering jobs, from one executable, to create
matched-filter triggers covering the full range of science times for which
there is data and a template bank file.
Parameters
-----------
Workflow : pycbc.workflow.core.Workflow
The workflow instance that the coincidence jobs will be added to.
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
datafind_outs : pycbc.workflow.core.FileList
An FileList of the datafind files that are needed to obtain the
data used in the analysis.
tmplt_banks : pycbc.workflow.core.FileList
An FileList of the template bank files that will serve as input
in this stage.
output_dir : path
The directory in which output will be stored.
injection_file : pycbc.workflow.core.File, optional (default=None)
If given the file containing the simulation file to be sent to these
jobs on the command line. If not given no file will be sent.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. An example might be ['BNSINJECTIONS'] or
['NOINJECTIONANALYSIS']. This will be used in output names.
Returns
-------
inspiral_outs : pycbc.workflow.core.FileList
A list of output files written by this stage. This *will not* contain
any intermediate products produced within this stage of the workflow.
If you require access to any intermediate products produced at this
stage you can call the various sub-functions directly. |
def runSearchReferenceSets(self, request):
"""
Runs the specified SearchReferenceSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchReferenceSetsRequest,
protocol.SearchReferenceSetsResponse,
self.referenceSetsGenerator) | Runs the specified SearchReferenceSetsRequest. |
def start_time(self):
"""Start timestamp of the dataset"""
dt = self.nc['time'].dt
return datetime(year=dt.year, month=dt.month, day=dt.day,
hour=dt.hour, minute=dt.minute,
second=dt.second, microsecond=dt.microsecond) | Start timestamp of the dataset |
def get_lang_dict(self):
"""gets supported langs as an dictionary"""
r = self.yandex_translate_request("getLangs")
self.handle_errors(r)
return r.json()["langs"] | gets supported langs as an dictionary |
def add_or_update(self, app_id):
'''
Add or update the category.
'''
logger.info('Collect info: user-{0}, uid-{1}'.format(self.userinfo.uid, app_id))
MCollect.add_or_update(self.userinfo.uid, app_id)
out_dic = {'success': True}
return json.dump(out_dic, self) | Add or update the category. |
def wait_until_not_moving(self, timeout=None):
"""
Blocks until ``running`` is not in ``self.state`` or ``stalled`` is in
``self.state``. The condition is checked when there is an I/O event
related to the ``state`` attribute. Exits early when ``timeout``
(in milliseconds) is reached.
Returns ``True`` if the condition is met, and ``False`` if the timeout
is reached.
Example::
m.wait_until_not_moving()
"""
return self.wait(lambda state: self.STATE_RUNNING not in state or self.STATE_STALLED in state, timeout) | Blocks until ``running`` is not in ``self.state`` or ``stalled`` is in
``self.state``. The condition is checked when there is an I/O event
related to the ``state`` attribute. Exits early when ``timeout``
(in milliseconds) is reached.
Returns ``True`` if the condition is met, and ``False`` if the timeout
is reached.
Example::
m.wait_until_not_moving() |
def execute_cmdline_scenarios(scenario_name, args, command_args):
"""
Execute scenario sequences based on parsed command-line arguments.
This is useful for subcommands that run scenario sequences, which
excludes subcommands such as ``list``, ``login``, and ``matrix``.
``args`` and ``command_args`` are combined using :func:`get_configs`
to generate the scenario(s) configuration.
:param scenario_name: Name of scenario to run, or ``None`` to run all.
:param args: ``args`` dict from ``click`` command context
:param command_args: dict of command argumentss, including the target
subcommand to execute
:returns: None
"""
scenarios = molecule.scenarios.Scenarios(
get_configs(args, command_args), scenario_name)
scenarios.print_matrix()
for scenario in scenarios:
try:
execute_scenario(scenario)
except SystemExit:
# if the command has a 'destroy' arg, like test does,
# handle that behavior here.
if command_args.get('destroy') == 'always':
msg = ('An error occurred during the {} sequence action: '
"'{}'. Cleaning up.").format(scenario.config.subcommand,
scenario.config.action)
LOG.warn(msg)
execute_subcommand(scenario.config, 'cleanup')
execute_subcommand(scenario.config, 'destroy')
# always prune ephemeral dir if destroying on failure
scenario.prune()
util.sysexit()
else:
raise | Execute scenario sequences based on parsed command-line arguments.
This is useful for subcommands that run scenario sequences, which
excludes subcommands such as ``list``, ``login``, and ``matrix``.
``args`` and ``command_args`` are combined using :func:`get_configs`
to generate the scenario(s) configuration.
:param scenario_name: Name of scenario to run, or ``None`` to run all.
:param args: ``args`` dict from ``click`` command context
:param command_args: dict of command argumentss, including the target
subcommand to execute
:returns: None |
def setup(self, phase=None, quantity='', conductance='', r_tolerance=None,
max_iter=None, relaxation_source=None,
relaxation_quantity=None, **kwargs):
r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings
Parameters
----------
phase : OpenPNM Phase object
The phase on which the algorithm is to be run. If no value is
given, the existing value is kept.
quantity : string
The name of the physical quantity to be calcualted such as
``'pore.xxx'``.
conductance : string
The name of the pore-scale transport conductance values. These
are typically calculated by a model attached to a *Physics* object
associated with the given *Phase*. Example; ``'throat.yyy'``.
r_tolerance : scalar
Tolerance to achieve. The solver returns a solution when 'residual'
falls below 'r_tolerance'. The default value is 0.001.
max_iter : scalar
The maximum number of iterations the solver can perform to find
a solution. The default value is 5000.
relaxation_source : scalar, between 0 and 1
A relaxation factor to control under-relaxation of the source term.
Factor approaching 0 : improved stability but slow simulation.
Factor approaching 1 : fast simulation but may be unstable.
Default value is 1 (no under-relaxation).
relaxation_quantity : scalar, between 0 and 1
A relaxation factor to control under-relaxation for the quantity
solving for.
Factor approaching 0 : improved stability but slow simulation.
Factor approaching 1 : fast simulation but may be unstable.
Default value is 1 (no under-relaxation).
Notes
-----
Under-relaxation is a technique used for improving stability of a
computation, particularly in the presence of highly non-linear terms.
Under-relaxation used here limits the change in a variable from one
iteration to the next. An optimum choice of the relaxation factor is
one that is small enough to ensure stable simulation and large enough
to speed up the computation.
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
if r_tolerance:
self.settings['r_tolerance'] = r_tolerance
if max_iter:
self.settings['max_iter'] = max_iter
if relaxation_source:
self.settings['relaxation_source'] = relaxation_source
if relaxation_quantity:
self.settings['relaxation_quantity'] = relaxation_quantity
super().setup(**kwargs) | r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings
Parameters
----------
phase : OpenPNM Phase object
The phase on which the algorithm is to be run. If no value is
given, the existing value is kept.
quantity : string
The name of the physical quantity to be calcualted such as
``'pore.xxx'``.
conductance : string
The name of the pore-scale transport conductance values. These
are typically calculated by a model attached to a *Physics* object
associated with the given *Phase*. Example; ``'throat.yyy'``.
r_tolerance : scalar
Tolerance to achieve. The solver returns a solution when 'residual'
falls below 'r_tolerance'. The default value is 0.001.
max_iter : scalar
The maximum number of iterations the solver can perform to find
a solution. The default value is 5000.
relaxation_source : scalar, between 0 and 1
A relaxation factor to control under-relaxation of the source term.
Factor approaching 0 : improved stability but slow simulation.
Factor approaching 1 : fast simulation but may be unstable.
Default value is 1 (no under-relaxation).
relaxation_quantity : scalar, between 0 and 1
A relaxation factor to control under-relaxation for the quantity
solving for.
Factor approaching 0 : improved stability but slow simulation.
Factor approaching 1 : fast simulation but may be unstable.
Default value is 1 (no under-relaxation).
Notes
-----
Under-relaxation is a technique used for improving stability of a
computation, particularly in the presence of highly non-linear terms.
Under-relaxation used here limits the change in a variable from one
iteration to the next. An optimum choice of the relaxation factor is
one that is small enough to ensure stable simulation and large enough
to speed up the computation. |
def learn(self, state_arr, limit=1000):
'''
Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
'''
while self.t <= limit:
# Draw samples of next possible actions from any distribution.
next_action_arr = self.extract_possible_actions(state_arr)
# Inference Q-Values.
predicted_q_arr = self.__function_approximator.inference_q(next_action_arr)
# Set `np.ndarray` of rewards and next Q-Values.
reward_value_arr = np.empty((next_action_arr.shape[0], 1))
next_max_q_arr = np.empty((next_action_arr.shape[0], 1))
for i in range(reward_value_arr.shape[0]):
# Observe reward values.
reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i])
# Inference the Max-Q-Value in next action time.
next_next_action_arr = self.extract_possible_actions(next_action_arr[i])
next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max()
# Select action.
action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr)
# Update real Q-Values.
real_q_arr = self.update_q(
predicted_q_arr,
reward_value_arr,
next_max_q_arr
)
# Maximum of predicted and real Q-Values.
real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]]
if self.__q_logs_arr.shape[0] > 0:
self.__q_logs_arr = np.r_[
self.__q_logs_arr,
np.array([predicted_q, real_q]).reshape(1, 2)
]
else:
self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2)
# Learn Q-Values.
self.learn_q(predicted_q_arr, real_q_arr)
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break | Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms. |
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces) | Appends the objects ``args`` to the array under the ``path` in key
``name`` |
def _LeaseMessageHandlerRequests(self, lease_time, limit, cursor=None):
"""Leases a number of message handler requests up to the indicated limit."""
now = rdfvalue.RDFDatetime.Now()
now_str = mysql_utils.RDFDatetimeToTimestamp(now)
expiry = now + lease_time
expiry_str = mysql_utils.RDFDatetimeToTimestamp(expiry)
query = ("UPDATE message_handler_requests "
"SET leased_until=FROM_UNIXTIME(%s), leased_by=%s "
"WHERE leased_until IS NULL OR leased_until < FROM_UNIXTIME(%s) "
"LIMIT %s")
id_str = utils.ProcessIdString()
args = (expiry_str, id_str, now_str, limit)
updated = cursor.execute(query, args)
if updated == 0:
return []
cursor.execute(
"SELECT UNIX_TIMESTAMP(timestamp), request "
"FROM message_handler_requests "
"WHERE leased_by=%s AND leased_until=FROM_UNIXTIME(%s) LIMIT %s",
(id_str, expiry_str, updated))
res = []
for timestamp, request in cursor.fetchall():
req = rdf_objects.MessageHandlerRequest.FromSerializedString(request)
req.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
req.leased_until = expiry
req.leased_by = id_str
res.append(req)
return res | Leases a number of message handler requests up to the indicated limit. |
def setDetailedText( self, text ):
"""
Sets the details text for this message box to the inputed text. \
Overloading the default method to support HTML details.
:param text | <str>
"""
super(XMessageBox, self).setDetailedText(text)
if ( text ):
# update the text edit
widgets = self.findChildren(QTextEdit)
widgets[0].setLineWrapMode(QTextEdit.NoWrap)
widgets[0].setHtml(text)
widgets[0].setMaximumHeight(1000)
widgets[0].setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
# update push button
buttons = self.findChildren(QPushButton)
for button in buttons:
if ( button.text() == 'Show Details...' ):
button.clicked.connect( self.updateSizeMode )
break | Sets the details text for this message box to the inputed text. \
Overloading the default method to support HTML details.
:param text | <str> |
def to_pretty_midi(self, constant_tempo=None, constant_velocity=100):
"""
Convert to a :class:`pretty_midi.PrettyMIDI` instance.
Notes
-----
- Only constant tempo is supported by now.
- The velocities of the converted pianorolls are clipped to [0, 127],
i.e. values below 0 and values beyond 127 are replaced by 127 and 0,
respectively.
- Adjacent nonzero values of the same pitch will be considered a single
note with their mean as its velocity.
Parameters
----------
constant_tempo : int
The constant tempo value of the output object. Defaults to use the
first element of `tempo`.
constant_velocity : int
The constant velocity to be assigned to binarized tracks. Defaults
to 100.
Returns
-------
pm : `pretty_midi.PrettyMIDI` object
The converted :class:`pretty_midi.PrettyMIDI` instance.
"""
self.check_validity()
pm = pretty_midi.PrettyMIDI(initial_tempo=self.tempo[0])
# TODO: Add downbeat support -> time signature change events
# TODO: Add tempo support -> tempo change events
if constant_tempo is None:
constant_tempo = self.tempo[0]
time_step_size = 60. / constant_tempo / self.beat_resolution
for track in self.tracks:
instrument = pretty_midi.Instrument(
program=track.program, is_drum=track.is_drum, name=track.name)
copied = track.copy()
if copied.is_binarized():
copied.assign_constant(constant_velocity)
copied.clip()
clipped = copied.pianoroll.astype(np.uint8)
binarized = (clipped > 0)
padded = np.pad(binarized, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded.astype(np.int8), axis=0)
positives = np.nonzero((diff > 0).T)
pitches = positives[0]
note_ons = positives[1]
note_on_times = time_step_size * note_ons
note_offs = np.nonzero((diff < 0).T)[1]
note_off_times = time_step_size * note_offs
for idx, pitch in enumerate(pitches):
velocity = np.mean(clipped[note_ons[idx]:note_offs[idx], pitch])
note = pretty_midi.Note(
velocity=int(velocity), pitch=pitch,
start=note_on_times[idx], end=note_off_times[idx])
instrument.notes.append(note)
instrument.notes.sort(key=lambda x: x.start)
pm.instruments.append(instrument)
return pm | Convert to a :class:`pretty_midi.PrettyMIDI` instance.
Notes
-----
- Only constant tempo is supported by now.
- The velocities of the converted pianorolls are clipped to [0, 127],
i.e. values below 0 and values beyond 127 are replaced by 127 and 0,
respectively.
- Adjacent nonzero values of the same pitch will be considered a single
note with their mean as its velocity.
Parameters
----------
constant_tempo : int
The constant tempo value of the output object. Defaults to use the
first element of `tempo`.
constant_velocity : int
The constant velocity to be assigned to binarized tracks. Defaults
to 100.
Returns
-------
pm : `pretty_midi.PrettyMIDI` object
The converted :class:`pretty_midi.PrettyMIDI` instance. |
def _processor(self):
"""Application processor to setup session for every request"""
self.store.cleanup(self._config.timeout)
self._load() | Application processor to setup session for every request |
def handler(self, environ, start_response):
"""XMLRPC service for windmill browser core to communicate with"""
if environ['REQUEST_METHOD'] == 'POST':
return self.handle_POST(environ, start_response)
else:
start_response("400 Bad request", [('Content-Type', 'text/plain')])
return [''] | XMLRPC service for windmill browser core to communicate with |
async def verify_worker_impls(chain):
"""Verify the task type (e.g. decision, build) of each link in the chain.
Args:
chain (ChainOfTrust): the chain we're operating on
Raises:
CoTError: on failure
"""
valid_worker_impls = get_valid_worker_impls()
for obj in chain.get_all_links_in_chain():
worker_impl = obj.worker_impl
log.info("Verifying {} {} as a {} task...".format(obj.name, obj.task_id, worker_impl))
# Run tests synchronously for now. We can parallelize if efficiency
# is more important than a single simple logfile.
await valid_worker_impls[worker_impl](chain, obj) | Verify the task type (e.g. decision, build) of each link in the chain.
Args:
chain (ChainOfTrust): the chain we're operating on
Raises:
CoTError: on failure |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.