code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def load_kwargs(*args, **kwargs):
"""
Load geometry from a properly formatted dict or kwargs
"""
def handle_scene():
"""
Load a scene from our kwargs:
class: Scene
geometry: dict, name: Trimesh kwargs
graph: list of dict, kwargs for scene.graph.update
base_frame: str, base frame of graph
"""
scene = Scene()
scene.geometry.update({k: load_kwargs(v) for
k, v in kwargs['geometry'].items()})
for k in kwargs['graph']:
if isinstance(k, dict):
scene.graph.update(**k)
elif util.is_sequence(k) and len(k) == 3:
scene.graph.update(k[1], k[0], **k[2])
if 'base_frame' in kwargs:
scene.graph.base_frame = kwargs['base_frame']
if 'metadata' in kwargs:
scene.metadata.update(kwargs['metadata'])
return scene
def handle_trimesh_kwargs():
"""
Load information with vertices and faces into a mesh
or PointCloud object.
"""
if (isinstance(kwargs['vertices'], dict) or
isinstance(kwargs['faces'], dict)):
return Trimesh(**misc.load_dict(kwargs))
elif kwargs['faces'] is None:
# vertices without faces returns a PointCloud
return PointCloud(**kwargs)
else:
return Trimesh(**kwargs)
def handle_trimesh_export():
data, file_type = kwargs['data'], kwargs['file_type']
if not isinstance(data, dict):
data = util.wrap_as_stream(data)
k = mesh_loaders[file_type](data,
file_type=file_type)
return Trimesh(**k)
# if we've been passed a single dict instead of kwargs
# substitute the dict for kwargs
if (len(kwargs) == 0 and
len(args) == 1 and
isinstance(args[0], dict)):
kwargs = args[0]
# function : list of expected keys
handlers = {handle_scene: ('graph', 'geometry'),
handle_trimesh_kwargs: ('vertices', 'faces'),
handle_trimesh_export: ('file_type', 'data')}
# loop through handler functions and expected key
handler = None
for func, expected in handlers.items():
if all(i in kwargs for i in expected):
# all expected kwargs exist
handler = func
# exit the loop as we found one
break
if handler is None:
raise ValueError('unable to determine type!')
return handler() | Load geometry from a properly formatted dict or kwargs |
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items} | Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
``` |
def emit(self, event, *args, **kwargs):
"""Send out an event and call it's associated functions
:param event: Name of the event to trigger
"""
for func in self._registered_events[event].values():
func(*args, **kwargs) | Send out an event and call it's associated functions
:param event: Name of the event to trigger |
def _generate_normals(polygons):
"""
Takes a list of polygons and return an array of their normals.
Normals point towards the viewer for a face with its vertices in
counterclockwise order, following the right hand rule.
Uses three points equally spaced around the polygon.
This normal of course might not make sense for polygons with more than
three points not lying in a plane, but it's a plausible and fast
approximation.
Args:
polygons (list): list of (M_i, 3) array_like, or (..., M, 3) array_like
A sequence of polygons to compute normals for, which can have
varying numbers of vertices. If the polygons all have the same
number of vertices and array is passed, then the operation will
be vectorized.
Returns:
normals: (..., 3) array_like
A normal vector estimated for the polygon.
"""
if isinstance(polygons, np.ndarray):
# optimization: polygons all have the same number of points, so can
# vectorize
n = polygons.shape[-2]
i1, i2, i3 = 0, n//3, 2*n//3
v1 = polygons[..., i1, :] - polygons[..., i2, :]
v2 = polygons[..., i2, :] - polygons[..., i3, :]
else:
# The subtraction doesn't vectorize because polygons is jagged.
v1 = np.empty((len(polygons), 3))
v2 = np.empty((len(polygons), 3))
for poly_i, ps in enumerate(polygons):
n = len(ps)
i1, i2, i3 = 0, n//3, 2*n//3
v1[poly_i, :] = ps[i1, :] - ps[i2, :]
v2[poly_i, :] = ps[i2, :] - ps[i3, :]
return np.cross(v1, v2) | Takes a list of polygons and return an array of their normals.
Normals point towards the viewer for a face with its vertices in
counterclockwise order, following the right hand rule.
Uses three points equally spaced around the polygon.
This normal of course might not make sense for polygons with more than
three points not lying in a plane, but it's a plausible and fast
approximation.
Args:
polygons (list): list of (M_i, 3) array_like, or (..., M, 3) array_like
A sequence of polygons to compute normals for, which can have
varying numbers of vertices. If the polygons all have the same
number of vertices and array is passed, then the operation will
be vectorized.
Returns:
normals: (..., 3) array_like
A normal vector estimated for the polygon. |
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width, height, pixels, meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale(rows):
for row in rows:
yield array('BH'[targetbitdepth > 8],
[int(round(x * factor)) for x in row])
if maxval == targetmaxval:
return width, height, pixels, meta
else:
if 'transparent' in meta:
transparent = meta['transparent']
if isinstance(transparent, tuple):
transparent = tuple(list(
iterscale((transparent,))
)[0])
else:
transparent = tuple(list(
iterscale(((transparent,),))
)[0])[0]
meta['transparent'] = transparent
return width, height, iterscale(pixels), meta | Helper used by :meth:`asRGB8` and :meth:`asRGBA8`. |
def _file_model_from_db(self, record, content, format):
"""
Build a file model from database record.
"""
# TODO: Most of this is shared with _notebook_model_from_db.
path = to_api_path(record['parent_name'] + record['name'])
model = base_model(path)
model['type'] = 'file'
model['last_modified'] = model['created'] = record['created_at']
if content:
bcontent = record['content']
model['content'], model['format'], model['mimetype'] = from_b64(
path,
bcontent,
format,
)
return model | Build a file model from database record. |
def fetch_routing_info(self, address):
""" Fetch raw routing info from a given router address.
:param address: router address
:return: list of routing records or
None if no connection could be established
:raise ServiceUnavailable: if the server does not support routing or
if routing support is broken
"""
metadata = {}
records = []
def fail(md):
if md.get("code") == "Neo.ClientError.Procedure.ProcedureNotFound":
raise RoutingProtocolError("Server {!r} does not support routing".format(address))
else:
raise RoutingProtocolError("Routing support broken on server {!r}".format(address))
try:
with self.acquire_direct(address) as cx:
_, _, server_version = (cx.server.agent or "").partition("/")
# TODO 2.0: remove old routing procedure
if server_version and Version.parse(server_version) >= Version((3, 2)):
log_debug("[#%04X] C: <ROUTING> query=%r", cx.local_port, self.routing_context or {})
cx.run("CALL dbms.cluster.routing.getRoutingTable({context})",
{"context": self.routing_context}, on_success=metadata.update, on_failure=fail)
else:
log_debug("[#%04X] C: <ROUTING> query={}", cx.local_port)
cx.run("CALL dbms.cluster.routing.getServers", {}, on_success=metadata.update, on_failure=fail)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.sync()
routing_info = [dict(zip(metadata.get("fields", ()), values)) for values in records]
log_debug("[#%04X] S: <ROUTING> info=%r", cx.local_port, routing_info)
return routing_info
except RoutingProtocolError as error:
raise ServiceUnavailable(*error.args)
except ServiceUnavailable:
self.deactivate(address)
return None | Fetch raw routing info from a given router address.
:param address: router address
:return: list of routing records or
None if no connection could be established
:raise ServiceUnavailable: if the server does not support routing or
if routing support is broken |
def show_pattern(syncpr_output_dynamic, image_height, image_width):
"""!
@brief Displays evolution of phase oscillators as set of patterns where the last one means final result of recognition.
@param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.
@param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).
@param[in] image_width (uint): Width of the pattern.
"""
number_pictures = len(syncpr_output_dynamic);
iteration_math_step = 1.0;
if (number_pictures > 50):
iteration_math_step = number_pictures / 50.0;
number_pictures = 50;
number_cols = int(numpy.ceil(number_pictures ** 0.5));
number_rows = int(numpy.ceil(number_pictures / number_cols));
real_index = 0, 0;
double_indexer = True;
if ( (number_cols == 1) or (number_rows == 1) ):
real_index = 0;
double_indexer = False;
(_, axarr) = plt.subplots(number_rows, number_cols);
if (number_pictures > 1):
plt.setp([ax for ax in axarr], visible = False);
iteration_display = 0.0;
for iteration in range(len(syncpr_output_dynamic)):
if (iteration >= iteration_display):
iteration_display += iteration_math_step;
ax_handle = axarr;
if (number_pictures > 1):
ax_handle = axarr[real_index];
syncpr_visualizer.__show_pattern(ax_handle, syncpr_output_dynamic, image_height, image_width, iteration);
if (double_indexer is True):
real_index = real_index[0], real_index[1] + 1;
if (real_index[1] >= number_cols):
real_index = real_index[0] + 1, 0;
else:
real_index += 1;
plt.show(); | !
@brief Displays evolution of phase oscillators as set of patterns where the last one means final result of recognition.
@param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.
@param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).
@param[in] image_width (uint): Width of the pattern. |
def status(request): # pylint: disable=unused-argument
"""Status"""
token = request.GET.get("token", "")
if not token or token != settings.STATUS_TOKEN:
raise Http404()
info = {}
check_mapping = {
'REDIS': (get_redis_info, 'redis'),
'ELASTIC_SEARCH': (get_elasticsearch_info, 'elasticsearch'),
'POSTGRES': (get_pg_info, 'postgresql'),
'CELERY': (get_celery_info, 'celery'),
'CERTIFICATE': (get_certificate_info, 'certificate'),
}
for setting, (check_fn, key) in check_mapping.items():
if setting in settings.HEALTH_CHECK:
log.debug('getting: %s', key)
info[key] = check_fn()
log.debug('%s done', key)
code = HTTP_OK
status_all = UP
for key in info:
if info[key]["status"] == DOWN:
code = SERVICE_UNAVAILABLE
status_all = DOWN
break
info["status_all"] = status_all
resp = JsonResponse(info)
resp.status_code = code
return resp | Status |
def shapeless_placeholder(x, axis, name):
"""
Make the static shape of a tensor less specific.
If you want to feed to a tensor, the shape of the feed value must match
the tensor's static shape. This function creates a placeholder which
defaults to x if not fed, but has a less specific static shape than x.
See also `tensorflow#5680 <https://github.com/tensorflow/tensorflow/issues/5680>`_.
Args:
x: a tensor
axis(int or list of ints): these axes of ``x.get_shape()`` will become
None in the output.
name(str): name of the output tensor
Returns:
a tensor equal to x, but shape information is partially cleared.
"""
shp = x.get_shape().as_list()
if not isinstance(axis, list):
axis = [axis]
for a in axis:
if shp[a] is None:
raise ValueError("Axis {} of shape {} is already unknown!".format(a, shp))
shp[a] = None
x = tf.placeholder_with_default(x, shape=shp, name=name)
return x | Make the static shape of a tensor less specific.
If you want to feed to a tensor, the shape of the feed value must match
the tensor's static shape. This function creates a placeholder which
defaults to x if not fed, but has a less specific static shape than x.
See also `tensorflow#5680 <https://github.com/tensorflow/tensorflow/issues/5680>`_.
Args:
x: a tensor
axis(int or list of ints): these axes of ``x.get_shape()`` will become
None in the output.
name(str): name of the output tensor
Returns:
a tensor equal to x, but shape information is partially cleared. |
def p_select_from_where_statement_1(self, p):
'''
statement : SELECT ANY variable_name FROM INSTANCES OF identifier WHERE expression
| SELECT MANY variable_name FROM INSTANCES OF identifier WHERE expression
'''
p[0] = SelectFromWhereNode(cardinality=p[2],
variable_name=p[3],
key_letter=p[7],
where_clause=p[9]) | statement : SELECT ANY variable_name FROM INSTANCES OF identifier WHERE expression
| SELECT MANY variable_name FROM INSTANCES OF identifier WHERE expression |
def to_ds9(self, coordsys='fk5', fmt='.6f', radunit='deg'):
"""
Converts a list of ``regions.Shape`` objects to ds9 region strings.
Parameters
----------
coordsys : str
This overrides the coordinate system frame for all regions.
fmt : str
A python string format defining the output precision.
Default is .6f, which is accurate to 0.0036 arcseconds.
radunit : str
This denotes the unit of the radius.
Returns
-------
region_string : str
ds9 region string
Examples
--------
TODO
"""
valid_symbols_reverse = {y: x for x, y in valid_symbols_ds9.items()}
ds9_strings = {
'circle': '{0}circle({1:FMT},{2:FMT},{3:FMT}RAD)',
'circleannulus': '{0}annulus({1:FMT},{2:FMT},{3:FMT}RAD,{4:FMT}RAD)',
'ellipse': '{0}ellipse({1:FMT},{2:FMT},{3:FMT}RAD,{4:FMT}RAD,{5:FMT})',
'rectangle': '{0}box({1:FMT},{2:FMT},{3:FMT}RAD,{4:FMT}RAD,{5:FMT})',
'polygon': '{0}polygon({1})',
'point': '{0}point({1:FMT},{2:FMT})',
'line': '{0}line({1:FMT},{2:FMT},{3:FMT},{4:FMT})',
'text': '{0}text({1:FMT},{2:FMT})'
}
output = '# Region file format: DS9 astropy/regions\n'
if radunit == 'arcsec':
# what's this for?
if coordsys in coordsys_mapping['DS9'].values():
radunitstr = '"'
else:
raise ValueError('Radius unit arcsec not valid for coordsys {}'.format(coordsys))
else:
radunitstr = ''
for key, val in ds9_strings.items():
ds9_strings[key] = val.replace("FMT", fmt).replace("RAD", radunitstr)
output += '{}\n'.format(coordsys)
for shape in self:
shape.check_ds9()
shape.meta = to_ds9_meta(shape.meta)
# if unspecified, include is True.
include = "-" if shape.include in (False, '-') else ""
if 'point' in shape.meta:
shape.meta['point'] = valid_symbols_reverse[shape.meta['point']]
if 'symsize' in shape.meta:
shape.meta['point'] += " {}".format(shape.meta.pop('symsize'))
meta_str = " ".join("{0}={1}".format(key, val) for key, val in
shape.meta.items() if key not in ('include', 'tag', 'comment', 'font', 'text'))
if 'tag' in shape.meta:
meta_str += " " + " ".join(["tag={0}".format(tag) for tag in shape.meta['tag']])
if 'font' in shape.meta:
meta_str += " " + 'font="{0}"'.format(shape.meta['font'])
if shape.meta.get('text', '') != '':
meta_str += " " + 'text={' + shape.meta['text'] + '}'
if 'comment' in shape.meta:
meta_str += " " + shape.meta['comment']
coord = []
if coordsys not in ['image', 'physical']:
for val in shape.coord:
if isinstance(val, Angle):
coord.append(float(val.value))
else:
if radunit == '' or None:
coord.append(float(val.value))
else:
coord.append(float(val.to(radunit).value))
if shape.region_type in ['ellipse', 'rectangle'] and len(shape.coord) % 2 == 1:
coord[-1] = float(shape.coord[-1].to('deg').value)
else:
for val in shape.coord:
if isinstance(val, u.Quantity):
coord.append(float(val.value))
else:
coord.append(float(val))
if shape.region_type in ['polygon', 'line']:
coord = [x+1 for x in coord]
else:
coord[0] += 1
coord[1] += 1
if shape.region_type == 'polygon':
val = "{0:" + fmt + "}"
temp = [val.format(x) for x in coord]
coord = ",".join(temp)
line = ds9_strings['polygon'].format(include, coord)
elif shape.region_type == 'ellipse':
coord[2:] = [x / 2 for x in coord[2:]]
if len(coord) % 2 == 1:
coord[-1] *= 2
line = ds9_strings['ellipse'].format(include, *coord)
else:
line = ds9_strings[shape.region_type].format(include, *coord)
if meta_str.strip():
output += "{0} # {1}\n".format(line, meta_str)
else:
output += "{0}\n".format(line)
return output | Converts a list of ``regions.Shape`` objects to ds9 region strings.
Parameters
----------
coordsys : str
This overrides the coordinate system frame for all regions.
fmt : str
A python string format defining the output precision.
Default is .6f, which is accurate to 0.0036 arcseconds.
radunit : str
This denotes the unit of the radius.
Returns
-------
region_string : str
ds9 region string
Examples
--------
TODO |
def htmlDocContentDumpFormatOutput(self, cur, encoding, format):
"""Dump an HTML document. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpFormatOutput(self._o, cur__o, encoding, format) | Dump an HTML document. |
def registry_hostname(registry):
"""
Strip a reference to a registry to just the hostname:port
"""
if registry.startswith('http:') or registry.startswith('https:'):
return urlparse(registry).netloc
else:
return registry | Strip a reference to a registry to just the hostname:port |
def insert_successor(self, successor):
"""!
@brief Insert successor to the node.
@param[in] successor (cfnode): Successor for adding.
"""
self.feature += successor.feature;
self.successors.append(successor);
successor.parent = self; | !
@brief Insert successor to the node.
@param[in] successor (cfnode): Successor for adding. |
def start(self, version=None, **kwargs):#game_version=None, data_version=None, **kwargs):
"""Launch the game process."""
if not version:
version = self.mostRecentVersion
pysc2Version = lib.Version( # convert to pysc2 Version
version.version,
version.baseVersion,
version.dataHash,
version.fixedHash)
return sc_process.StarcraftProcess(
self,
exec_path=self.exec_path(version.baseVersion),
version=pysc2Version,
**kwargs) | Launch the game process. |
def get_message(self, dummy0, sock_info, use_cmd=False):
"""Get a getmore message."""
ns = self.namespace()
ctx = sock_info.compression_context
if use_cmd:
spec = self.as_command(sock_info)[0]
if sock_info.op_msg_enabled:
request_id, msg, size, _ = _op_msg(
0, spec, self.db, ReadPreference.PRIMARY,
False, False, self.codec_options,
ctx=sock_info.compression_context)
return request_id, msg, size
ns = _UJOIN % (self.db, "$cmd")
return query(0, ns, 0, -1, spec, None, self.codec_options, ctx=ctx)
return get_more(ns, self.ntoreturn, self.cursor_id, ctx) | Get a getmore message. |
def read_header(file):
"""
-----
Brief
-----
Universal function for reading the header of .txt, .h5 and .edf files generated by OpenSignals.
-----------
Description
-----------
Each file generated by the OpenSignals software (available at https://www.biosignalsplux.com/en/software) owns a set
of metadata that allows the proper identification and characterization of each acquisition, by the identification of
the mac address of the devices, date of acquisition, duration, number of samples, type of the devices and firmware
version.
This function allows to easily access all of this information using only one line of code and outputs a dictionary
to easily identify each field of the header of file.
----------
Parameters
----------
file : file path
File path.
Returns
-------
out : dict
Header data read from the input file as dictionary with keys:
[mac address]: The key is a string with the mac address of the device;
sensor: Sensor(s) used in the acquisition;
device name: String with the mac address identifying the device used in the acquisition process;
sync interval: Time interval (in seconds) at which a digital signal is sent by a
“pacemaker” thread to a single device (used when the sync mode in on OpenSignals for synchronized data
acquisition using multiple devices);
time: Time of the acquisition;
comments: Comments inserted in the OpenSignals software after the acquisition;
device connection: Used connection to the device while using it;
channels: Used channels;
keywords: Keywords inserted in the OpenSignals software after the acquisition;
digital IO: Digital channels available in each device (0 is the Input and 1 is the Output);
firmware version: Firmware version of the device;
device: Type of device used during the acquisition;
sampling rate: Sampling rate set prior to the acquisition;
resolution: Resolution set prior to the acquisition;
date: Date of the acquisition;
column labels: Labels of each set of data (e.g. channel 1).
"""
# =============================================================================================
# ============================== Identification of File Type ==================================
# =============================================================================================
file_type = _file_type(file)
# =============================================================================================
# ========================= Read Header accordingly to file type ==============================
# =============================================================================================
if file_type in ["txt", "plain", "bat"]:
file_temp = open(file, "r")
header = file_temp.readlines()[1]
file_temp.close()
# -------------------------- Conversion to dictionary. ------------------------------------
header = ast.literal_eval(header.split("# ")[1].split("\n")[0])
# -------------------------- Standardization of Header ------------------------------------
macs = header.keys()
col_nbr = 0
for mac in macs:
# ------------ Removal of "special", "sensor", "mode" and "position" keys -------------
del header[mac]["special"]
#del header[mac]["sensor"]
del header[mac]["position"]
del header[mac]["mode"]
# ---------------- Combination of the information in "label" and "column" -------------
column_labels = {}
for chn_nbr, chn in enumerate(header[mac]["channels"]):
chn_label = header[mac]["label"][chn_nbr]
column_labels[chn] = col_nbr + numpy.where(numpy.array(header[mac]["column"]) ==
chn_label)[0][0]
header[mac]["column labels"] = column_labels
col_nbr += len(header[mac]["column"])
del header[mac]["column"]
del header[mac]["label"]
elif file_type in ["h5", "x-hdf", "a"]:
file_temp = h5py.File(file)
macs = file_temp.keys()
header = {}
for mac in macs:
header[mac] = dict(file_temp.get(mac).attrs.items())
header[mac]["sensor"] = []
# --------- Removal of "duration", "keywords", "mode", "nsamples" ... keys ------------
for key in ["duration", "mode", "keywords", "nsamples", "forcePlatform values",
"macaddress"]:
if key in header[mac].keys():
del header[mac][key]
# del header[mac]["duration"]
# del header[mac]["mode"]
# del header[mac]["keywords"]
# del header[mac]["nsamples"]
# del header[mac]["forcePlatform values"]
# del header[mac]["macaddress"]
# -------------- Inclusion of a field used in .txt files (Convergence) ----------------
column_labels = {}
for chn in header[mac]["channels"]:
chn_label = "channel_" + str(chn)
column_labels[chn] = chn_label
header[mac]["sensor"].append(dict(file_temp.get(mac).get("raw").get("channel_" + str(chn)).attrs.items())["sensor"])
header[mac]["column labels"] = column_labels
file_temp.close()
# elif file_type in ["edf", "octet-stream"]:
#
# # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# # %%%%%%%%%%%% Code taken from convertEDF function of OpenSignals fileHandler %%%%%%%%%%%%%%
# # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# file_temp = pyedflib.EdfReader(file)
# # nbrSamples = file_temp.getNSamples()[0]
# nbr_signals = file_temp.signals_in_file
# file_header = file_temp.getHeader()
# start_date = file_header["startdate"]
# file_header["equipment"] += "']"
# equipment = ast.literal_eval(file_header['equipment'])
# equipment = [n.replace(" ", "_") for n in equipment]
# headers = file_temp.getSignalHeaders()
# header = {}
# mac_address_list = []
#
# # ---------------------------------- Mac Address List --------------------------------------
# for signal_nbr in numpy.arange(nbr_signals):
# config = headers[signal_nbr]
# mac_address = config["transducer"].split(",")[0]
#
# if mac_address not in header.keys():
# mac_address_list.append(mac_address)
# header[mac_address] = {}
# header[mac_address]["device name"] = mac_address
# header[mac_address]["sync interval"] = 2
# header[mac_address]["time"] = start_date.strftime('%H:%M:%S.%f')[:-3]
# header[mac_address]["comments"] = ""
# header[mac_address]["device connection"] = ""
# header[mac_address]["channels"] = []
# header[mac_address]["date"] = start_date.strftime('%Y-%m-%d')
# header[mac_address]["digital IO"] = []
#
# if "," in config['transducer']:
# header[mac_address]["firmware version"] = int(config['transducer'].
# split(",")[1])
# else:
# header[mac_address]["firmware version"] = ""
#
# header[mac_address]["device"] = equipment[len(mac_address_list) - 1]
# header[mac_address]["sampling rate"] = int(config['sample_rate'])
# header[mac_address]["resolution"] = []
# header[mac_address]["column labels"] = {}
# if "," in config['prefilter']:
# header[mac_address]["channels"].append(int(config['prefilter'].split(",")[0]))
# header[mac_address]["resolution"].append(int(config['prefilter'].split(",")[1]))
else:
raise RuntimeError("The type of the input file does not correspond to the predefined "
"formats of OpenSignals")
return header | -----
Brief
-----
Universal function for reading the header of .txt, .h5 and .edf files generated by OpenSignals.
-----------
Description
-----------
Each file generated by the OpenSignals software (available at https://www.biosignalsplux.com/en/software) owns a set
of metadata that allows the proper identification and characterization of each acquisition, by the identification of
the mac address of the devices, date of acquisition, duration, number of samples, type of the devices and firmware
version.
This function allows to easily access all of this information using only one line of code and outputs a dictionary
to easily identify each field of the header of file.
----------
Parameters
----------
file : file path
File path.
Returns
-------
out : dict
Header data read from the input file as dictionary with keys:
[mac address]: The key is a string with the mac address of the device;
sensor: Sensor(s) used in the acquisition;
device name: String with the mac address identifying the device used in the acquisition process;
sync interval: Time interval (in seconds) at which a digital signal is sent by a
“pacemaker” thread to a single device (used when the sync mode in on OpenSignals for synchronized data
acquisition using multiple devices);
time: Time of the acquisition;
comments: Comments inserted in the OpenSignals software after the acquisition;
device connection: Used connection to the device while using it;
channels: Used channels;
keywords: Keywords inserted in the OpenSignals software after the acquisition;
digital IO: Digital channels available in each device (0 is the Input and 1 is the Output);
firmware version: Firmware version of the device;
device: Type of device used during the acquisition;
sampling rate: Sampling rate set prior to the acquisition;
resolution: Resolution set prior to the acquisition;
date: Date of the acquisition;
column labels: Labels of each set of data (e.g. channel 1). |
def org_find_members(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /org-xxxx/findMembers API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindMembers
"""
return DXHTTPRequest('/%s/findMembers' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /org-xxxx/findMembers API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindMembers |
def patch_namespaced_replica_set_scale(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_replica_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs)
return data | partially update scale of the specified ReplicaSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_replica_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Scale
If the method is called asynchronously,
returns the request thread. |
def addRelationships(
self,
data: list,
LIMIT: int = 20,
_print: bool = True,
crawl: bool = False,
) -> list:
"""
data = [{
"term1_id", "term2_id", "relationship_tid",
"term1_version", "term2_version",
"relationship_term_version",}]
"""
url_base = self.base_url + '/api/1/term/add-relationship'
relationships = []
for relationship in data:
relationship.update({
'term1_version': relationship['term1_version'],
'term2_version': relationship['term2_version'],
'relationship_term_version': relationship['relationship_term_version']
})
relationships.append((url_base, relationship))
return self.post(
relationships,
LIMIT = LIMIT,
action = 'Adding Relationships',
_print = _print,
crawl = crawl,
) | data = [{
"term1_id", "term2_id", "relationship_tid",
"term1_version", "term2_version",
"relationship_term_version",}] |
def __sort_stats(self, sortedby=None):
"""Return the stats (dict) sorted by (sortedby)."""
return sort_stats(self.stats, sortedby,
reverse=glances_processes.sort_reverse) | Return the stats (dict) sorted by (sortedby). |
def get_firmware(self):
"""
Gets baseline firmware information for a SAS Logical Interconnect.
Returns:
dict: SAS Logical Interconnect Firmware.
"""
firmware_uri = "{}/firmware".format(self.data["uri"])
return self._helper.do_get(firmware_uri) | Gets baseline firmware information for a SAS Logical Interconnect.
Returns:
dict: SAS Logical Interconnect Firmware. |
def signature(cls):
"""Returns kwargs to construct a `TaskRule` that will construct the target Optionable.
TODO: This indirection avoids a cycle between this module and the `rules` module.
"""
snake_scope = cls.options_scope.replace('-', '_')
partial_construct_optionable = functools.partial(_construct_optionable, cls)
partial_construct_optionable.__name__ = 'construct_scope_{}'.format(snake_scope)
return dict(
output_type=cls.optionable_cls,
input_selectors=tuple(),
func=partial_construct_optionable,
input_gets=(Get.create_statically_for_rule_graph(ScopedOptions, Scope),),
dependency_optionables=(cls.optionable_cls,),
) | Returns kwargs to construct a `TaskRule` that will construct the target Optionable.
TODO: This indirection avoids a cycle between this module and the `rules` module. |
def hash_contents(contents):
"""
Creates a hash of key names and hashes in a package dictionary.
"contents" must be a GroupNode.
"""
assert isinstance(contents, GroupNode)
result = hashlib.sha256()
def _hash_int(value):
result.update(struct.pack(">L", value))
def _hash_str(string):
assert isinstance(string, string_types)
_hash_int(len(string))
result.update(string.encode())
def _hash_object(obj):
_hash_str(obj.json_type)
if isinstance(obj, (TableNode, FileNode)):
hashes = obj.hashes
_hash_int(len(hashes))
for hval in hashes:
_hash_str(hval)
elif isinstance(obj, GroupNode):
children = obj.children
_hash_int(len(children))
for key, child in sorted(iteritems(children)):
_hash_str(key)
_hash_object(child)
else:
assert False, "Unexpected object: %r" % obj
# Backward compatibility: only hash metadata_hash if it's present.
if obj.metadata_hash is not None:
_hash_str(obj.metadata_hash)
_hash_object(contents)
return result.hexdigest() | Creates a hash of key names and hashes in a package dictionary.
"contents" must be a GroupNode. |
def _dequeue_function(self):
""" Internal method to dequeue to events. """
from UcsBase import WriteUcsWarning, _GenericMO, WriteObject, UcsUtils
while len(self._wbs):
lowestTimeout = None
for wb in self._wbs:
pollSec = wb.params["pollSec"]
managedObject = wb.params["managedObject"]
timeoutSec = wb.params["timeoutSec"]
transientValue = wb.params["transientValue"]
successValue = wb.params["successValue"]
failureValue = wb.params["failureValue"]
prop = wb.params["prop"]
startTime = wb.params["startTime"]
gmo = None
pmo = None
mce = None
if (pollSec != None and managedObject != None):
crDn = self.ConfigResolveDn(managedObject.getattr("Dn"), inHierarchical=YesOrNo.FALSE,
dumpXml=YesOrNo.FALSE)
if (crDn.errorCode != 0):
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning(
'[Error]: WatchUcs [Code]:' + crDn.errorCode + ' [Description]:' + crDn.errorDescr)
continue
for eachMo in crDn.OutConfig.GetChild():
pmo = eachMo
if pmo == None:
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning('Mo ' + managedObject.getattr("Dn") + ' not found.')
continue
gmo = _GenericMO(mo=pmo, option=WriteXmlOption.All)
else:
ts = datetime.datetime.now() - startTime
timeoutMs = 0
if (timeoutSec != None):
if (ts.seconds >= timeoutSec): # TimeOut
self._remove_watch_block(wb)
continue
timeoutMs = (timeoutSec - ts.seconds)
if (lowestTimeout == None):
lowestTimeout = timeoutMs
else:
if (lowestTimeout > timeoutMs):
lowestTimeout = timeoutMs
if (timeoutMs > 0):
mce = wb.Dequeue(timeoutMs)
else:
mce = wb.Dequeue(2147483647)
if mce == None:
# break
continue
if (managedObject == None): # Means parameterset is not Mo
if wb.cb != None:
wb.cb(mce)
continue
if mce != None:
gmo = _GenericMO(mo=mce.mo, option=WriteXmlOption.All)
attributes = []
if mce == None:
attributes = gmo.properties.keys()
else:
attributes = mce.changeList
if prop.lower() in (attr.lower() for attr in attributes):
if (len(successValue) > 0 and gmo.GetAttribute(UcsUtils.WordU(prop)) in successValue):
if mce != None:
if wb.cb != None:
wb.cb(mce)
else:
if wb.cb != None:
wb.cb(UcsMoChangeEvent(eventId=0, mo=pmo, changeList=prop))
if wb != None:
self._remove_watch_block(wb)
wb = None
break
# return
continue
if (len(failureValue) > 0 and gmo.GetAttribute(UcsUtils.WordU(prop)) in failureValue):
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning('Encountered error value ' + gmo.GetAttribute(
UcsUtils.WordU(prop)) + ' for property ' + prop + '.')
if mce != None:
if wb.cb != None:
wb.cb(mce)
else:
if wb.cb != None:
wb.cb(UcsMoChangeEvent(eventId=0, mo=pmo, changeList=prop))
if wb != None:
self._remove_watch_block(wb) # TODO: implement removeStop call back
wb = None
break
continue
if ((len(transientValue) > 0) and (not gmo.GetAttribute(UcsUtils.WordU(prop)) in transientValue)):
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning('Encountered unknown value ' + gmo.GetAttribute(
UcsUtils.WordU(prop)) + ' for property ' + prop + '.')
if mce != None:
if wb.cb != None:
wb.cb(mce)
else:
if wb.cb != None:
wb.cb(UcsMoChangeEvent(eventId=0, mo=pmo, changeList=prop))
if wb != None:
self._remove_watch_block(wb) # TODO: implement removeStop call back
wb = None
break
continue
if (pollSec != None):
pollMs = pollSec
if (timeoutSec != None):
pts = datetime.datetime.now() - startTime
if (pts.seconds >= timeoutSec): # TimeOut
break
timeoutMs = (timeoutSec - pts.seconds)
if (timeoutMs < pollSec):
pollMs = pts.seconds
# time.sleep(pollMs)
if (lowestTimeout == None):
lowestTimeout = pollMs
else:
if (lowestTimeout > pollMs):
lowestTimeout = pollMs
if len(self._wbs):
self._dequeue_wait(lowestTimeout)
return | Internal method to dequeue to events. |
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath) | Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_xpath("//div[contains(@class, 'foo')]") |
async def create_webhook(self, *, name, avatar=None, reason=None):
"""|coro|
Creates a webhook for this channel.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
.. versionchanged:: 1.1.0
Added the ``reason`` keyword-only parameter.
Parameters
-------------
name: :class:`str`
The webhook's name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's default avatar.
This operates similarly to :meth:`~ClientUser.edit`.
reason: Optional[:class:`str`]
The reason for creating this webhook. Shows up in the audit logs.
Raises
-------
HTTPException
Creating the webhook failed.
Forbidden
You do not have permissions to create a webhook.
Returns
--------
:class:`Webhook`
The created webhook.
"""
if avatar is not None:
avatar = utils._bytes_to_base64_data(avatar)
data = await self._state.http.create_webhook(self.id, name=str(name), avatar=avatar, reason=reason)
return Webhook.from_state(data, state=self._state) | |coro|
Creates a webhook for this channel.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
.. versionchanged:: 1.1.0
Added the ``reason`` keyword-only parameter.
Parameters
-------------
name: :class:`str`
The webhook's name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's default avatar.
This operates similarly to :meth:`~ClientUser.edit`.
reason: Optional[:class:`str`]
The reason for creating this webhook. Shows up in the audit logs.
Raises
-------
HTTPException
Creating the webhook failed.
Forbidden
You do not have permissions to create a webhook.
Returns
--------
:class:`Webhook`
The created webhook. |
def classify_by_name(names):
"""Classify a (composite) ligand by the HETID(s)"""
if len(names) > 3: # Polymer
if len(set(config.RNA).intersection(set(names))) != 0:
ligtype = 'RNA'
elif len(set(config.DNA).intersection(set(names))) != 0:
ligtype = 'DNA'
else:
ligtype = "POLYMER"
else:
ligtype = 'SMALLMOLECULE'
for name in names:
if name in config.METAL_IONS:
if len(names) == 1:
ligtype = 'ION'
else:
if "ION" not in ligtype:
ligtype += '+ION'
return ligtype | Classify a (composite) ligand by the HETID(s) |
def unbroadcast(a, b):
'''
unbroadcast(a, b) yields a tuple (aa, bb) that is equivalent to (a, b) except that aa and bb
have been reshaped such that arithmetic numpy operations such as aa * bb will result in
row-wise operation instead of column-wise broadcasting.
'''
# they could be sparse:
spa = sps.issparse(a)
spb = sps.issparse(b)
if spa and spb: return (a,b)
elif spa or spb:
def fix(sp,nm):
nm = np.asarray(nm)
dnm = len(nm.shape)
nnm = np.prod(nm.shape)
# if we have (sparse matrix) * (high-dim array), unbroadcast the dense array
if dnm == 0: return (sp, np.reshape(nm, (1, 1)))
elif dnm == 1: return (sp, np.reshape(nm, (nnm, 1)))
elif dnm == 2: return (sp, nm)
else: return unbroadcast(sp.toarray(), nm)
return fix(a, b) if spa else tuple(reversed(fix(b, a)))
# okay, no sparse matrices found:
a = np.asarray(a)
b = np.asarray(b)
da = len(a.shape)
db = len(b.shape)
if da > db: return (a, np.reshape(b, b.shape + tuple(np.ones(da-db, dtype=np.int))))
elif da < db: return (np.reshape(a, a.shape + tuple(np.ones(db-da, dtype=np.int))), b)
else: return (a, b) | unbroadcast(a, b) yields a tuple (aa, bb) that is equivalent to (a, b) except that aa and bb
have been reshaped such that arithmetic numpy operations such as aa * bb will result in
row-wise operation instead of column-wise broadcasting. |
def get_replication_command_history(self, schedule_id, limit=20, offset=0,
view=None):
"""
Retrieve a list of commands for a replication schedule.
@param schedule_id: The id of the replication schedule.
@param limit: Maximum number of commands to retrieve.
@param offset: Index of first command to retrieve.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: List of commands executed for a replication schedule.
@since: API v4
"""
params = {
'limit': limit,
'offset': offset,
}
if view:
params['view'] = view
return self._get("replications/%s/history" % schedule_id,
ApiReplicationCommand, True, params=params, api_version=4) | Retrieve a list of commands for a replication schedule.
@param schedule_id: The id of the replication schedule.
@param limit: Maximum number of commands to retrieve.
@param offset: Index of first command to retrieve.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: List of commands executed for a replication schedule.
@since: API v4 |
def _get_index(n_items, item_size, n):
"""Prepare an index attribute for GPU uploading."""
index = np.arange(n_items)
index = np.repeat(index, item_size)
index = index.astype(np.float64)
assert index.shape == (n,)
return index | Prepare an index attribute for GPU uploading. |
def list_views(app, appbuilder):
"""
List all registered views
"""
_appbuilder = import_application(app, appbuilder)
echo_header("List of registered views")
for view in _appbuilder.baseviews:
click.echo(
"View:{0} | Route:{1} | Perms:{2}".format(
view.__class__.__name__, view.route_base, view.base_permissions
)
) | List all registered views |
def last_modified(self) -> Optional[datetime.datetime]:
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self._headers.get(hdrs.LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None | The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object. |
def get_version_naive(cls, name, ignore=''):
""" Checks a string for a possible version of an object (no prefix, no suffix) without filtering date out
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
"""
match = cls._get_regex_search(name, cls.REGEX_VERSION.format(SEP=cls.REGEX_SEPARATORS), ignore=ignore)
if match is not None:
if len(match) > 1:
for m in match:
m.update({'version': int(m['match'].upper().replace('V', ''))})
compound_version = '.'.join([str(m['version']) for m in match])
compound_version = float(compound_version) if compound_version.count('.') == 1 else compound_version
return {'compound_matches': match,
'compound_version': compound_version,
'pattern': match[0]['pattern'],
'input': match[0]['input']}
elif len(match) == 1:
match = match[0]
match.update({'version': int(match['match'].upper().replace('V', ''))})
return match
return None | Checks a string for a possible version of an object (no prefix, no suffix) without filtering date out
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches |
def get_ref_indices(self):
"""
:return: list of all indices in object reference.
"""
ixn_obj = self
ref_indices = []
while ixn_obj != ixn_obj.root:
ref_indices.append(ixn_obj.ref.split(':')[-1])
ixn_obj = ixn_obj.parent
return ref_indices[::-1] | :return: list of all indices in object reference. |
def jboss_standalone_main_config_files(broker):
"""Command: JBoss standalone main config files"""
ps = broker[DefaultSpecs.ps_auxww].content
results = []
search = re.compile(r"\-Djboss\.server\.base\.dir=(\S+)").search
# JBoss progress command content should contain jboss.home.dir
for p in ps:
if '-D[Standalone]' in p:
match = search(p)
# Only get the path which is absolute
if match and match.group(1)[0] == "/":
main_config_path = match.group(1)
main_config_file = "standalone.xml"
if " -c " in p:
main_config_file = p.split(" -c ")[1].split()[0]
elif "--server-config" in p:
main_config_file = p.split("--server-config=")[1].split()[0]
results.append(main_config_path + "/" + main_config_file)
return list(set(results)) | Command: JBoss standalone main config files |
def construct_xblock_from_class(self, cls, scope_ids, field_data=None, *args, **kwargs):
"""
Construct a new xblock of type cls, mixing in the mixins
defined for this application.
"""
return self.mixologist.mix(cls)(
runtime=self,
field_data=field_data,
scope_ids=scope_ids,
*args, **kwargs
) | Construct a new xblock of type cls, mixing in the mixins
defined for this application. |
def run(self):
"""
This runs the leader process to issue and manage jobs.
:raises: toil.leader.FailedJobsException if at the end of function their remain \
failed jobs
:return: The return value of the root job's run function.
:rtype: Any
"""
# Start the stats/logging aggregation thread
self.statsAndLogging.start()
if self.config.metrics:
self.toilMetrics = ToilMetrics(provisioner=self.provisioner)
try:
# Start service manager thread
self.serviceManager.start()
try:
# Create cluster scaling processes if not None
if self.clusterScaler is not None:
self.clusterScaler.start()
try:
# Run the main loop
self.innerLoop()
finally:
if self.clusterScaler is not None:
logger.debug('Waiting for workers to shutdown.')
startTime = time.time()
self.clusterScaler.shutdown()
logger.debug('Worker shutdown complete in %s seconds.', time.time() - startTime)
finally:
# Ensure service manager thread is properly shutdown
self.serviceManager.shutdown()
finally:
# Ensure the stats and logging thread is properly shutdown
self.statsAndLogging.shutdown()
if self.toilMetrics:
self.toilMetrics.shutdown()
# Filter the failed jobs
self.toilState.totalFailedJobs = [j for j in self.toilState.totalFailedJobs if self.jobStore.exists(j.jobStoreID)]
try:
self.create_status_sentinel_file(self.toilState.totalFailedJobs)
except IOError as e:
logger.debug('Error from importFile with hardlink=True: {}'.format(e))
logger.info("Finished toil run %s" %
("successfully." if not self.toilState.totalFailedJobs \
else ("with %s failed jobs." % len(self.toilState.totalFailedJobs))))
if len(self.toilState.totalFailedJobs):
logger.info("Failed jobs at end of the run: %s", ' '.join(str(job) for job in self.toilState.totalFailedJobs))
# Cleanup
if len(self.toilState.totalFailedJobs) > 0:
raise FailedJobsException(self.config.jobStore, self.toilState.totalFailedJobs, self.jobStore)
return self.jobStore.getRootJobReturnValue() | This runs the leader process to issue and manage jobs.
:raises: toil.leader.FailedJobsException if at the end of function their remain \
failed jobs
:return: The return value of the root job's run function.
:rtype: Any |
def _find_class_construction_fn(cls):
"""Find the first __init__ or __new__ method in the given class's MRO."""
for base in type.mro(cls):
if '__init__' in base.__dict__:
return base.__init__
if '__new__' in base.__dict__:
return base.__new__ | Find the first __init__ or __new__ method in the given class's MRO. |
def getdata(self):
"""
A sequence of pixel data relating to the changes that occurred
since the last time :py:func:`redraw_required` was last called.
:returns: A sequence of pixels or ``None``.
:rtype: iterable
"""
if self.bounding_box:
return self.image.crop(self.bounding_box).getdata() | A sequence of pixel data relating to the changes that occurred
since the last time :py:func:`redraw_required` was last called.
:returns: A sequence of pixels or ``None``.
:rtype: iterable |
def get_file_size(file_object):
'''Returns the size, in bytes, of a file. Expects an object that supports
seek and tell methods.
Args:
file_object (file_object) - The object that represents the file
Returns:
(int): size of the file, in bytes'''
position = file_object.tell()
file_object.seek(0, 2)
file_size = file_object.tell()
file_object.seek(position, 0)
return file_size | Returns the size, in bytes, of a file. Expects an object that supports
seek and tell methods.
Args:
file_object (file_object) - The object that represents the file
Returns:
(int): size of the file, in bytes |
def _make_regex(self):
"""
Build a re object based on keys in the current dictionary
"""
return re.compile("|".join(map(re.escape, self.keys()))) | Build a re object based on keys in the current dictionary |
def load_word_file(filename):
"""Loads a words file as a list of lines"""
words_file = resource_filename(__name__, "words/%s" % filename)
handle = open(words_file, 'r')
words = handle.readlines()
handle.close()
return words | Loads a words file as a list of lines |
def cache(self, f):
"""Cache a function using the context's cache directory."""
if self._memory is None: # pragma: no cover
logger.debug("Joblib is not installed: skipping cacheing.")
return f
assert f
# NOTE: discard self in instance methods.
if 'self' in inspect.getargspec(f).args:
ignore = ['self']
else:
ignore = None
disk_cached = self._memory.cache(f, ignore=ignore)
return disk_cached | Cache a function using the context's cache directory. |
def cosi_posterior(vsini_dist,veq_dist,vgrid=None,npts=100,vgrid_pts=1000):
"""returns posterior of cosI given dists for vsini and veq (incorporates unc. in vsini)
"""
if vgrid is None:
vgrid = np.linspace(min(veq_dist.ppf(0.001),vsini_dist.ppf(0.001)),
max(veq_dist.ppf(0.999),vsini_dist.ppf(0.999)),
vgrid_pts)
logging.debug('vgrid: {} pts, {} to {}'.format(vgrid_pts,
vgrid[0],
vgrid[-1]))
#vgrid = np.linspace(vsini_dist.ppf(0.005),vsini_dist.ppf(0.995),vgrid_pts)
cs = np.linspace(0,1,npts)
Ls = cs*0
for i,c in enumerate(cs):
Ls[i] = like_cosi(c,vsini_dist,veq_dist,vgrid=vgrid)
if np.isnan(Ls[-1]): #hack to prevent nan when cos=1
Ls[-1] = Ls[-2]
Ls /= np.trapz(Ls,cs)
return cs,Ls | returns posterior of cosI given dists for vsini and veq (incorporates unc. in vsini) |
def find_recipes(folders, pattern=None, base=None):
'''find recipes will use a list of base folders, files,
or patterns over a subset of content to find recipe files
(indicated by Starting with Singularity
Parameters
==========
base: if defined, consider folders recursively below this level.
'''
# If the user doesn't provide a list of folders, use $PWD
if folders is None:
folders = os.getcwd()
if not isinstance(folders,list):
folders = [folders]
manifest = dict()
for base_folder in folders:
# If we find a file, return the one file
custom_pattern = None
if os.path.isfile(base_folder): # updates manifest
manifest = find_single_recipe(filename=base_folder,
pattern=pattern,
manifest=manifest)
continue
# The user likely provided a custom pattern
elif not os.path.isdir(base_folder):
custom_pattern = base_folder.split('/')[-1:][0]
base_folder = "/".join(base_folder.split('/')[0:-1])
# If we don't trigger loop, we have directory
manifest = find_folder_recipes(base_folder=base_folder,
pattern=custom_pattern or pattern,
manifest=manifest,
base=base)
return manifest | find recipes will use a list of base folders, files,
or patterns over a subset of content to find recipe files
(indicated by Starting with Singularity
Parameters
==========
base: if defined, consider folders recursively below this level. |
def validate(self):
"""Perform some basic configuration validation.
"""
if not self.conf.get('auth_token'):
raise PacketManagerException('The auth token for Packet is not defined but required.')
if not self.conf.get('projects'):
raise PacketManagerException('Required "projects" section is missing.')
projects = self.conf.get('projects')
if not projects.keys():
raise PacketManagerException('At least one project at Packet is required.')
failure = False
for project, identifier in projects.items():
if not identifier:
failure = True
logging.error('Project "%s" has no valid identifier.', project)
if failure:
raise PacketManagerException('One or more projects are not setup appropriately.') | Perform some basic configuration validation. |
def __intermediate_addresses(self, interface):
"""
converts NetJSON address to
UCI intermediate data structure
"""
address_list = self.get_copy(interface, 'addresses')
# do not ignore interfaces if they do not contain any address
if not address_list:
return [{'proto': 'none'}]
result = []
static = {}
dhcp = []
for address in address_list:
family = address.get('family')
# dhcp
if address['proto'] == 'dhcp':
address['proto'] = 'dhcp' if family == 'ipv4' else 'dhcpv6'
dhcp.append(self.__intermediate_address(address))
continue
if 'gateway' in address:
uci_key = 'gateway' if family == 'ipv4' else 'ip6gw'
interface[uci_key] = address['gateway']
# static
address_key = 'ipaddr' if family == 'ipv4' else 'ip6addr'
static.setdefault(address_key, [])
static[address_key].append('{address}/{mask}'.format(**address))
static.update(self.__intermediate_address(address))
if static:
# do not use CIDR notation when using a single ipv4
# see https://github.com/openwisp/netjsonconfig/issues/54
if len(static.get('ipaddr', [])) == 1:
network = ip_interface(six.text_type(static['ipaddr'][0]))
static['ipaddr'] = str(network.ip)
static['netmask'] = str(network.netmask)
# do not use lists when using a single ipv6 address
# (avoids to change output of existing configuration)
if len(static.get('ip6addr', [])) == 1:
static['ip6addr'] = static['ip6addr'][0]
result.append(static)
if dhcp:
result += dhcp
return result | converts NetJSON address to
UCI intermediate data structure |
def set_duty_cycle(self, pin, dutycycle):
"""Set percent duty cycle of PWM output on specified pin. Duty cycle must
be a value 0.0 to 100.0 (inclusive).
"""
if dutycycle < 0.0 or dutycycle > 100.0:
raise ValueError('Invalid duty cycle value, must be between 0.0 to 100.0 (inclusive).')
if pin not in self.pwm:
raise ValueError('Pin {0} is not configured as a PWM. Make sure to first call start for the pin.'.format(pin))
self.pwm[pin].ChangeDutyCycle(dutycycle) | Set percent duty cycle of PWM output on specified pin. Duty cycle must
be a value 0.0 to 100.0 (inclusive). |
def add_dicts(d1, d2):
""" Merge two dicts of addable values """
if d1 is None:
return d2
if d2 is None:
return d1
keys = set(d1)
keys.update(set(d2))
ret = {}
for key in keys:
v1 = d1.get(key)
v2 = d2.get(key)
if v1 is None:
ret[key] = v2
elif v2 is None:
ret[key] = v1
else:
ret[key] = v1 + v2
return ret | Merge two dicts of addable values |
def intersects_any(self,
ray_origins,
ray_directions):
"""
Check if a list of rays hits the surface.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
----------
hit: (n,) bool, did each ray hit the surface
"""
first = self.intersects_first(ray_origins=ray_origins,
ray_directions=ray_directions)
hit = first != -1
return hit | Check if a list of rays hits the surface.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
----------
hit: (n,) bool, did each ray hit the surface |
def _simplify_feature_value(self, name, value):
"""Return simplified and more pythonic feature values."""
if name == 'prefix':
channel_modes, channel_chars = value.split(')')
channel_modes = channel_modes[1:]
# [::-1] to reverse order and go from lowest to highest privs
value = OrderedDict(list(zip(channel_modes, channel_chars))[::-1])
return value
elif name == 'chanmodes':
value = value.split(',')
return value
elif name == 'targmax':
max_available = {}
for sort in value.split(','):
command, limit = sort.split(':')
command = command.casefold()
max_available[command] = limit_to_number(limit)
return max_available
elif name == 'chanlimit':
limit_available = {}
for sort in value.split(','):
chan_types, limit = sort.split(':')
for prefix in chan_types:
limit_available[prefix] = limit_to_number(limit)
return limit_available
elif name in _limits:
value = limit_to_number(value)
return value
else:
return value | Return simplified and more pythonic feature values. |
def perl_cmd():
"""Retrieve path to locally installed conda Perl or first in PATH.
"""
perl = which(os.path.join(get_bcbio_bin(), "perl"))
if perl:
return perl
else:
return which("perl") | Retrieve path to locally installed conda Perl or first in PATH. |
def _check_backends(self):
""" Check that every backend in roles and attributes
is declared in main configuration
"""
backends = self.backends_params.keys()
for b in self.roles.get_backends():
if b not in backends:
raise MissingBackend(b, 'role')
for b in self.attributes.get_backends():
if b not in backends:
raise MissingBackend(b, 'attribute') | Check that every backend in roles and attributes
is declared in main configuration |
def hybrid_threaded_worker(selector, workers):
"""Runs a set of workers, each in a separate thread.
:param selector:
A function that takes a hints-tuple and returns a key
indexing a worker in the `workers` dictionary.
:param workers:
A dictionary of workers.
:returns:
A connection for the scheduler.
:rtype: Connection
The hybrid worker dispatches jobs to the different workers
based on the information contained in the hints. If no hints
were given, the job is run in the main thread.
Dispatching is done in the main thread. Retrieving results is
done in a separate thread for each worker. In this design it is
assumed that dispatching a job takes little time, while waiting for
one to return a result may take a long time.
"""
result_queue = Queue()
job_sink = {k: w.sink() for k, w in workers.items()}
@push
def dispatch_job():
default_sink = result_queue.sink()
while True:
msg = yield
if msg is EndOfQueue:
for k in workers.keys():
try:
job_sink[k].send(EndOfQueue)
except StopIteration:
pass
return
if msg is FlushQueue:
for k in workers.keys():
try:
job_sink[k].send(FlushQueue)
except StopIteration:
pass
return
worker = selector(msg.node)
if worker:
job_sink[worker].send(msg)
else:
default_sink.send(run_job(*msg))
for key, worker in workers.items():
t = threading.Thread(
target=patch,
args=(worker.source, result_queue.sink))
t.daemon = True
t.start()
return Connection(result_queue.source, dispatch_job) | Runs a set of workers, each in a separate thread.
:param selector:
A function that takes a hints-tuple and returns a key
indexing a worker in the `workers` dictionary.
:param workers:
A dictionary of workers.
:returns:
A connection for the scheduler.
:rtype: Connection
The hybrid worker dispatches jobs to the different workers
based on the information contained in the hints. If no hints
were given, the job is run in the main thread.
Dispatching is done in the main thread. Retrieving results is
done in a separate thread for each worker. In this design it is
assumed that dispatching a job takes little time, while waiting for
one to return a result may take a long time. |
def to_zhuyin(s, delimiter=' ', all_readings=False, container='[]'):
"""Convert a string's Chinese characters to Zhuyin readings.
*s* is a string containing Chinese characters.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched.
"""
numbered_pinyin = to_pinyin(s, delimiter, all_readings, container, False)
zhuyin = pinyin_to_zhuyin(numbered_pinyin)
return zhuyin | Convert a string's Chinese characters to Zhuyin readings.
*s* is a string containing Chinese characters.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched. |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(VMSFSCollector, self).get_default_config()
config.update({
'path': 'vmsfs'
})
return config | Returns the default collector settings |
def make_slash_number(self):
"""
Charset lines have \2 or \3 depending on type of partitioning and codon
positions requested for our dataset.
:return:
"""
if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd':
return '\\2'
elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]:
return '\\3'
else:
return '' | Charset lines have \2 or \3 depending on type of partitioning and codon
positions requested for our dataset.
:return: |
def enable_hostgroup_passive_svc_checks(self, hostgroup):
"""Enable service passive checks for a hostgroup
Format of the line that triggers function call::
ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;<hostgroup_name>
:param hostgroup: hostgroup to enable
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:return: None
"""
for host_id in hostgroup.get_hosts():
if host_id in self.daemon.hosts:
for service_id in self.daemon.hosts[host_id].services:
if service_id in self.daemon.services:
self.enable_passive_svc_checks(self.daemon.services[service_id]) | Enable service passive checks for a hostgroup
Format of the line that triggers function call::
ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;<hostgroup_name>
:param hostgroup: hostgroup to enable
:type hostgroup: alignak.objects.hostgroup.Hostgroup
:return: None |
def _get_audio_duration_seconds(self, audio_abs_path):
"""
Parameters
----------
audio_abs_path : str
Returns
-------
total_seconds : int
"""
HHMMSS_duration = subprocess.check_output(
("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}' | """
"""grep -oh "^[^=]*" """).format(
audio_abs_path, "Duration"),
shell=True, universal_newlines=True).rstrip()
total_seconds = sum(
[float(x) * 60 ** (2 - i)
for i, x in enumerate(HHMMSS_duration.split(":"))])
return total_seconds | Parameters
----------
audio_abs_path : str
Returns
-------
total_seconds : int |
def toString(value, mode):
""" Converts angle float to string.
Mode refers to LAT/LON.
"""
string = angle.toString(value)
sign = string[0]
separator = CHAR[mode][sign]
string = string.replace(':', separator, 1)
return string[1:] | Converts angle float to string.
Mode refers to LAT/LON. |
def flick(self, x, y, speed):
"""Deprecated use touch('drag', { fromX, fromY, toX, toY, duration(s) }) instead.
Flick on the touch screen using finger motion events.
This flickcommand starts at a particulat screen location.
Support:
iOS
Args:
x(float}: The x offset in pixels to flick by.
y(float): The y offset in pixels to flick by.
speed(float) The speed in pixels per seconds.
Returns:
WebElement object.
"""
self._driver.flick(self, x, y, speed) | Deprecated use touch('drag', { fromX, fromY, toX, toY, duration(s) }) instead.
Flick on the touch screen using finger motion events.
This flickcommand starts at a particulat screen location.
Support:
iOS
Args:
x(float}: The x offset in pixels to flick by.
y(float): The y offset in pixels to flick by.
speed(float) The speed in pixels per seconds.
Returns:
WebElement object. |
def batch_update_conversations(self, event, conversation_ids):
"""
Batch update conversations.
Perform a change on a set of conversations. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
"""
path = {}
data = {}
params = {}
# REQUIRED - conversation_ids
"""List of conversations to update. Limited to 500 conversations."""
data["conversation_ids"] = conversation_ids
# REQUIRED - event
"""The action to take on each conversation."""
self._validate_enum(event, ["mark_as_read", "mark_as_unread", "star", "unstar", "archive", "destroy"])
data["event"] = event
self.logger.debug("PUT /api/v1/conversations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/conversations".format(**path), data=data, params=params, single_item=True) | Batch update conversations.
Perform a change on a set of conversations. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation. |
def hypergraph(raw_events, entity_types=None, opts={}, drop_na=True, drop_edge_attrs=False, verbose=True, direct=False):
"""Transform a dataframe into a hypergraph.
:param Dataframe raw_events: Dataframe to transform
:param List entity_types: Optional list of columns (strings) to turn into nodes, None signifies all
:param Dict opts: See below
:param bool drop_edge_attrs: Whether to include each row's attributes on its edges, defaults to False (include)
:param bool verbose: Whether to print size information
:param bool direct: Omit hypernode and instead strongly connect nodes in an event
Create a graph out of the dataframe, and return the graph components as dataframes,
and the renderable result Plotter. It reveals relationships between the rows and between column values.
This transform is useful for lists of events, samples, relationships, and other structured high-dimensional data.
The transform creates a node for every row, and turns a row's column entries into node attributes.
If direct=False (default), every unique value within a column is also turned into a node.
Edges are added to connect a row's nodes to each of its column nodes, or if direct=True, to one another.
Nodes are given the attribute 'type' corresponding to the originating column name, or in the case of a row, 'EventID'.
Consider a list of events. Each row represents a distinct event, and each column some metadata about an event.
If multiple events have common metadata, they will be transitively connected through those metadata values.
The layout algorithm will try to cluster the events together.
Conversely, if an event has unique metadata, the unique metadata will turn into nodes that only have connections to the event node, and the clustering algorithm will cause them to form a ring around the event node.
Best practice is to set EVENTID to a row's unique ID,
SKIP to all non-categorical columns (or entity_types to all categorical columns),
and CATEGORY to group columns with the same kinds of values.
The optional ``opts={...}`` configuration options are:
* 'EVENTID': Column name to inspect for a row ID. By default, uses the row index.
* 'CATEGORIES': Dictionary mapping a category name to inhabiting columns. E.g., {'IP': ['srcAddress', 'dstAddress']}. If the same IP appears in both columns, this makes the transform generate one node for it, instead of one for each column.
* 'DELIM': When creating node IDs, defines the separator used between the column name and node value
* 'SKIP': List of column names to not turn into nodes. For example, dates and numbers are often skipped.
* 'EDGES': For direct=True, instead of making all edges, pick column pairs. E.g., {'a': ['b', 'd'], 'd': ['d']} creates edges between columns a->b and a->d, and self-edges d->d.
:returns: {'entities': DF, 'events': DF, 'edges': DF, 'nodes': DF, 'graph': Plotter}
:rtype: Dictionary
**Example**
::
import graphistry
h = graphistry.hypergraph(my_df)
g = h['graph'].plot()
"""
from . import hyper
return hyper.Hypergraph().hypergraph(PyGraphistry, raw_events, entity_types, opts, drop_na, drop_edge_attrs, verbose, direct) | Transform a dataframe into a hypergraph.
:param Dataframe raw_events: Dataframe to transform
:param List entity_types: Optional list of columns (strings) to turn into nodes, None signifies all
:param Dict opts: See below
:param bool drop_edge_attrs: Whether to include each row's attributes on its edges, defaults to False (include)
:param bool verbose: Whether to print size information
:param bool direct: Omit hypernode and instead strongly connect nodes in an event
Create a graph out of the dataframe, and return the graph components as dataframes,
and the renderable result Plotter. It reveals relationships between the rows and between column values.
This transform is useful for lists of events, samples, relationships, and other structured high-dimensional data.
The transform creates a node for every row, and turns a row's column entries into node attributes.
If direct=False (default), every unique value within a column is also turned into a node.
Edges are added to connect a row's nodes to each of its column nodes, or if direct=True, to one another.
Nodes are given the attribute 'type' corresponding to the originating column name, or in the case of a row, 'EventID'.
Consider a list of events. Each row represents a distinct event, and each column some metadata about an event.
If multiple events have common metadata, they will be transitively connected through those metadata values.
The layout algorithm will try to cluster the events together.
Conversely, if an event has unique metadata, the unique metadata will turn into nodes that only have connections to the event node, and the clustering algorithm will cause them to form a ring around the event node.
Best practice is to set EVENTID to a row's unique ID,
SKIP to all non-categorical columns (or entity_types to all categorical columns),
and CATEGORY to group columns with the same kinds of values.
The optional ``opts={...}`` configuration options are:
* 'EVENTID': Column name to inspect for a row ID. By default, uses the row index.
* 'CATEGORIES': Dictionary mapping a category name to inhabiting columns. E.g., {'IP': ['srcAddress', 'dstAddress']}. If the same IP appears in both columns, this makes the transform generate one node for it, instead of one for each column.
* 'DELIM': When creating node IDs, defines the separator used between the column name and node value
* 'SKIP': List of column names to not turn into nodes. For example, dates and numbers are often skipped.
* 'EDGES': For direct=True, instead of making all edges, pick column pairs. E.g., {'a': ['b', 'd'], 'd': ['d']} creates edges between columns a->b and a->d, and self-edges d->d.
:returns: {'entities': DF, 'events': DF, 'edges': DF, 'nodes': DF, 'graph': Plotter}
:rtype: Dictionary
**Example**
::
import graphistry
h = graphistry.hypergraph(my_df)
g = h['graph'].plot() |
def load(target, source_module=None):
"""Get the actual implementation of the target."""
module, klass, function = _get_module(target)
if not module and source_module:
module = source_module
if not module:
raise MissingModule(
"No module name supplied or source_module provided.")
actual_module = sys.modules[module]
if not klass:
return getattr(actual_module, function)
class_object = getattr(actual_module, klass)
if function:
return getattr(class_object, function)
return class_object | Get the actual implementation of the target. |
def _get_document_data(f, image_handler=None):
'''
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
'''
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id)
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
path, _ = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser)
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser)
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser)
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser)
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser)
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(
item.filename,
path,
)
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(
relationship_xml,
media,
image_sizes
)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict)
meta_data = MetaData(
numbering_dict=numbering_dict,
relationship_dict=relationship_dict,
styles_dict=styles_dict,
font_sizes_dict=font_sizes_dict,
image_handler=image_handler,
image_sizes=image_sizes,
)
return document_xml, meta_data | ``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data. |
def volume_mesh(mesh, count):
"""
Use rejection sampling to produce points randomly distributed
in the volume of a mesh.
Parameters
----------
mesh: Trimesh object
count: int, number of samples desired
Returns
----------
samples: (n,3) float, points in the volume of the mesh.
where: n <= count
"""
points = (np.random.random((count, 3)) * mesh.extents) + mesh.bounds[0]
contained = mesh.contains(points)
samples = points[contained][:count]
return samples | Use rejection sampling to produce points randomly distributed
in the volume of a mesh.
Parameters
----------
mesh: Trimesh object
count: int, number of samples desired
Returns
----------
samples: (n,3) float, points in the volume of the mesh.
where: n <= count |
def B(self,value):
""" set phenotype """
assert value.shape[0]==self._K, 'Dimension mismatch'
assert value.shape[1]==1, 'Dimension mismatch'
self._B = value
self.clear_cache('predict','Yres') | set phenotype |
def get_storage(self):
'''Get the storage instance.
:return Redis: Redis instance
'''
if self.storage:
return self.storage
self.storage = self.reconnect_redis()
return self.storage | Get the storage instance.
:return Redis: Redis instance |
def default_headers(self):
"""
It's always OK to include these headers
"""
_headers = {
"User-Agent": "Pyzotero/%s" % __version__,
"Zotero-API-Version": "%s" % __api_version__,
}
if self.api_key:
_headers["Authorization"] = "Bearer %s" % self.api_key
return _headers | It's always OK to include these headers |
def find_input(self, stream):
"""Find the input that responds to this stream.
Args:
stream (DataStream): The stream to find
Returns:
(index, None): The index if found or None
"""
for i, input_x in enumerate(self.inputs):
if input_x[0].matches(stream):
return i | Find the input that responds to this stream.
Args:
stream (DataStream): The stream to find
Returns:
(index, None): The index if found or None |
def run(
self,
cluster_config,
rg_parser,
partition_measurer,
cluster_balancer,
args,
):
"""Initialize cluster_config, args, and zk then call run_command."""
self.cluster_config = cluster_config
self.args = args
with ZK(self.cluster_config) as self.zk:
self.log.debug(
'Starting %s for cluster: %s and zookeeper: %s',
self.__class__.__name__,
self.cluster_config.name,
self.cluster_config.zookeeper,
)
brokers = self.zk.get_brokers()
assignment = self.zk.get_cluster_assignment()
pm = partition_measurer(
self.cluster_config,
brokers,
assignment,
args,
)
ct = ClusterTopology(
assignment,
brokers,
pm,
rg_parser.get_replication_group,
)
if len(ct.partitions) == 0:
self.log.info("The cluster is empty. No actions to perform.")
return
# Exit if there is an on-going reassignment
if self.is_reassignment_pending():
self.log.error('Previous reassignment pending.')
sys.exit(1)
self.run_command(ct, cluster_balancer(ct, args)) | Initialize cluster_config, args, and zk then call run_command. |
def open(self):
"""Open an existing database"""
if self._table_exists():
self.mode = "open"
# get table info
self._get_table_info()
self.types = dict([ (f[0],self.conv_func[f[1].upper()])
for f in self.fields if f[1].upper() in self.conv_func ])
return self
else:
# table not found
raise IOError,"Table %s doesn't exist" %self.name | Open an existing database |
def add(name, device):
'''
Add new device to RAID array.
CLI Example:
.. code-block:: bash
salt '*' raid.add /dev/md0 /dev/sda1
'''
cmd = 'mdadm --manage {0} --add {1}'.format(name, device)
if __salt__['cmd.retcode'](cmd) == 0:
return True
return False | Add new device to RAID array.
CLI Example:
.. code-block:: bash
salt '*' raid.add /dev/md0 /dev/sda1 |
def load_yaml_file(filename):
""" Load a YAML file from disk, throw a ParserError on failure."""
try:
with open(filename, 'r') as f:
return yaml.safe_load(f)
except IOError as e:
raise ParserError('Error opening ' + filename + ': ' + e.message)
except ValueError as e:
raise ParserError('Error parsing processes in {}: {}'
.format(filename, e.message)) | Load a YAML file from disk, throw a ParserError on failure. |
def add_activity_form(self, activity_pattern, is_active):
"""Adds the pattern as an active or inactive form to an Agent.
Parameters
----------
activity_pattern : dict
A dictionary of site names and their states.
is_active : bool
Is True if the given pattern corresponds to an active state.
"""
if is_active:
if activity_pattern not in self.active_forms:
self.active_forms.append(activity_pattern)
else:
if activity_pattern not in self.inactive_forms:
self.inactive_forms.append(activity_pattern) | Adds the pattern as an active or inactive form to an Agent.
Parameters
----------
activity_pattern : dict
A dictionary of site names and their states.
is_active : bool
Is True if the given pattern corresponds to an active state. |
def _kip(self, cycle_end, mix_thresh, xaxis, sparse):
"""
*** Should be used with care, therefore has been flagged as
a private routine ***
This function uses a threshold diffusion coefficient, above
which the the shell is considered to be convective, to plot a
Kippenhahn diagram.
Parameters
----------
cycle_end : integer
The final cycle number.
mix_thresh : float
The threshold diffusion coefficient.
xaxis : string
Choose one of 'age', 'cycle', 'log_age' or 'log_time_left'.
sparse : integer
Sparsity factor when plotting from cyclelist.
Examples
--------
>>> pt=mp.se('/ngpod1/swj/see/mppnp_out/scratch_data/M25.0Z1e-02','.h5')
>>> pt.kip(10000,'log_time_left',100)
"""
original_cyclelist = self.se.cycles
cyclelist = original_cyclelist[0:cycle_end:sparse]
xx = self.se.ages[:cycle_end:sparse]
totalmass = []
m_ini = float(self.se.get('mini'))
fig = pl.figure(1)
ax = pl.subplot(1,1,1)
fsize = 12
def getlims(d_coeff, massco):
"""
This function returns the convective boundaries for a cycle,
given the cycle's dcoeff and massco columns, taking into
account whether surface or centre are at the top.
"""
plotlims = []
if massco[0] > massco[-1]:
for j in range(-1,-len(d_coeff)-1,-1):
if j == -1:
if d_coeff[j] >= mix_thresh:
plotlims.append(massco[j])
else:
pass
elif (d_coeff[j]-mix_thresh)*(d_coeff[j+1]-mix_thresh) < 0:
plotlims.append(massco[j])
if j == -len(d_coeff):
if d_coeff[j] >= mix_thresh:
plotlims.append(massco[j])
return plotlims
else:
for j in range(len(d_coeff)):
if j == 0:
if d_coeff[j] >= mix_thresh:
plotlims.append(massco[j])
else:
pass
elif (d_coeff[j]-mix_thresh)*(d_coeff[j-1]-mix_thresh) < 0:
plotlims.append(massco[j])
if j == len(d_coeff)-1:
if d_coeff[j] >= mix_thresh:
plotlims.append(massco[j])
return plotlims
if xaxis == 'age':
ax.set_xlabel('Age [yr]',fontsize=fsize)
elif xaxis == 'cycle':
xx = cyclelist
ax.set_xlabel('Cycle',fontsize=fsize)
elif xaxis == 'log_age':
for i in range(len(xx)):
xx[i] = np.log10(xx[i])
ax.set_xlabel('log$_{10}$(age) [yr]',fontsize=fsize)
elif xaxis == 'log_time_left':
for i in range(len(xx)):
xx[i] = np.log10(max(xx)-xx[i])
xx[-2] = xx[-3]-abs(xx[-4]-xx[-3])
xx[-1] = xx[-2]-abs(xx[-3]-xx[-2])
ax.set_xlabel('log$_{10}$(time until collapse) [yr]',fontsize=fsize)
#centre-surface flag:
flag = False
if self.se.get(cyclelist[1],'mass')[0] > self.se.get(cyclelist[1],'mass')[-1]:
flag = True
for i in range(len(cyclelist)):
if flag == True:
totalmass.append(self.se.get(cyclelist[i],'mass')[0])
else:
totalmass.append(self.se.get(cyclelist[i],'mass')[-1])
percent = int(i*100/len(cyclelist))
sys.stdout.flush()
sys.stdout.write("\rcreating color map " + "...%d%%" % percent)
d_coeff = self.se.get(cyclelist[i],'dcoeff')
massco = self.se.get(cyclelist[i],'mass')
plotlims = getlims(d_coeff,massco)
for k in range(0,len(plotlims),2):
ax.axvline(xx[i],ymin=old_div(plotlims[k],m_ini),ymax=old_div(plotlims[k+1],m_ini),color='b',linewidth=0.5)
ax.plot(xx, totalmass, color='black', linewidth=1)
if xaxis == 'log_time_left':
ax.axis([xx[0],xx[-1],0.,m_ini])
else:
ax.axis([min(xx),max(xx),0.,m_ini])
ax.set_ylabel('Mass [$M_{\odot}$]',fontsize=fsize)
pl.show() | *** Should be used with care, therefore has been flagged as
a private routine ***
This function uses a threshold diffusion coefficient, above
which the the shell is considered to be convective, to plot a
Kippenhahn diagram.
Parameters
----------
cycle_end : integer
The final cycle number.
mix_thresh : float
The threshold diffusion coefficient.
xaxis : string
Choose one of 'age', 'cycle', 'log_age' or 'log_time_left'.
sparse : integer
Sparsity factor when plotting from cyclelist.
Examples
--------
>>> pt=mp.se('/ngpod1/swj/see/mppnp_out/scratch_data/M25.0Z1e-02','.h5')
>>> pt.kip(10000,'log_time_left',100) |
def pointAt(self, **axis_values):
"""
Returns the point on the chart where the inputed values are located.
:return <QPointF>
"""
scene_point = self.renderer().pointAt(self.axes(), axis_values)
chart_point = self.uiChartVIEW.mapFromScene(scene_point)
return self.uiChartVIEW.mapToParent(chart_point) | Returns the point on the chart where the inputed values are located.
:return <QPointF> |
def delete_cluster_role_binding(self, name, **kwargs): # noqa: E501
"""delete_cluster_role_binding # noqa: E501
delete a ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_role_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRoleBinding (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_cluster_role_binding_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.delete_cluster_role_binding_with_http_info(name, **kwargs) # noqa: E501
return data | delete_cluster_role_binding # noqa: E501
delete a ClusterRoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_role_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRoleBinding (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
def _delocalize_logging_command(self, logging_path, user_project):
"""Returns a command to delocalize logs.
Args:
logging_path: location of log files.
user_project: name of the project to be billed for the request.
Returns:
eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12'
"""
# Get the logging prefix (everything up to ".log")
logging_prefix = os.path.splitext(logging_path.uri)[0]
# Set the provider-specific mkdir and file copy commands
if logging_path.file_provider == job_model.P_LOCAL:
mkdir_cmd = 'mkdir -p "%s"\n' % os.path.dirname(logging_prefix)
cp_cmd = 'cp'
elif logging_path.file_provider == job_model.P_GCS:
mkdir_cmd = ''
if user_project:
cp_cmd = 'gsutil -u {} -mq cp'.format(user_project)
else:
cp_cmd = 'gsutil -mq cp'
else:
assert False
# Construct the copy command
copy_logs_cmd = textwrap.dedent("""\
local cp_cmd="{cp_cmd}"
local prefix="{prefix}"
""").format(
cp_cmd=cp_cmd, prefix=logging_prefix)
# Build up the command
body = textwrap.dedent("""\
{mkdir_cmd}
{copy_logs_cmd}
""").format(
mkdir_cmd=mkdir_cmd, copy_logs_cmd=copy_logs_cmd)
return body | Returns a command to delocalize logs.
Args:
logging_path: location of log files.
user_project: name of the project to be billed for the request.
Returns:
eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12' |
def points_from_xywh(box):
"""
Constructs a polygon representation from a rectangle described as a dict with keys x, y, w, h.
"""
x, y, w, h = box['x'], box['y'], box['w'], box['h']
# tesseract uses a different region representation format
return "%i,%i %i,%i %i,%i %i,%i" % (
x, y,
x + w, y,
x + w, y + h,
x, y + h
) | Constructs a polygon representation from a rectangle described as a dict with keys x, y, w, h. |
def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):
'''
Get the available certificates in the given store.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:return: A dictionary of the certificate thumbprints and properties.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' win_pki.get_certs
'''
ret = dict()
cmd = list()
blacklist_keys = ['DnsNameList']
store_path = r'Cert:\{0}\{1}'.format(context, store)
_validate_cert_path(name=store_path)
cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path))
cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version')
items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True)
for item in items:
cert_info = dict()
for key in item:
if key not in blacklist_keys:
cert_info[key.lower()] = item[key]
names = item.get('DnsNameList', None)
if isinstance(names, list):
cert_info['dnsnames'] = [name.get('Unicode') for name in names]
else:
cert_info['dnsnames'] = []
ret[item['Thumbprint']] = cert_info
return ret | Get the available certificates in the given store.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:return: A dictionary of the certificate thumbprints and properties.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' win_pki.get_certs |
def get_choices(cls, condition=None, order_by=None, query=None, value_field=None, text_field=None):
"""
Get [(value, text),...] list
:param condition:
:param value_field: default is primary_key
:param text_field: default is unicode(obj)
:return:
"""
result = []
if query is None:
query = cls.filter(condition).order_by(order_by)
for row in query:
if not value_field:
value = row._key
else:
value = getattr(row, value_field)
if not text_field:
text = unicode(row)
else:
text = getattr(row, text_field)
result.append((value, text))
return result | Get [(value, text),...] list
:param condition:
:param value_field: default is primary_key
:param text_field: default is unicode(obj)
:return: |
def json_decode(s: str) -> Any:
"""
Decodes an object from JSON using our custom decoder.
"""
try:
return json.JSONDecoder(object_hook=json_class_decoder_hook).decode(s)
except json.JSONDecodeError:
log.warning("Failed to decode JSON (returning None): {!r}", s)
return None | Decodes an object from JSON using our custom decoder. |
def get_ref(self, cat, refname):
"""Return one of the rules in the category ``cat`` with the name
``refname``. If multiple rule defintions exist for the defintion name
``refname``, use :any:`gramfuzz.rand` to choose a rule at random.
:param str cat: The category to look for the rule in.
:param str refname: The name of the rule definition. If the rule definition's name is
``"*"``, then a rule name will be chosen at random from within the category ``cat``.
:returns: gramfuzz.fields.Def
"""
if cat not in self.defs:
raise errors.GramFuzzError("referenced definition category ({!r}) not defined".format(cat))
if refname == "*":
refname = rand.choice(self.defs[cat].keys())
if refname not in self.defs[cat]:
raise errors.GramFuzzError("referenced definition ({!r}) not defined".format(refname))
return rand.choice(self.defs[cat][refname]) | Return one of the rules in the category ``cat`` with the name
``refname``. If multiple rule defintions exist for the defintion name
``refname``, use :any:`gramfuzz.rand` to choose a rule at random.
:param str cat: The category to look for the rule in.
:param str refname: The name of the rule definition. If the rule definition's name is
``"*"``, then a rule name will be chosen at random from within the category ``cat``.
:returns: gramfuzz.fields.Def |
def write_networking_file(version, pairs):
"""
Write the VMware networking file.
"""
vmnets = OrderedDict(sorted(pairs.items(), key=lambda t: t[0]))
try:
with open(VMWARE_NETWORKING_FILE, "w", encoding="utf-8") as f:
f.write(version)
for key, value in vmnets.items():
f.write("answer {} {}\n".format(key, value))
except OSError as e:
raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e))
# restart VMware networking service
if sys.platform.startswith("darwin"):
if not os.path.exists("/Applications/VMware Fusion.app/Contents/Library/vmnet-cli"):
raise SystemExit("VMware Fusion is not installed in Applications")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --configure")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --stop")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --start")
else:
os.system("vmware-networks --stop")
os.system("vmware-networks --start") | Write the VMware networking file. |
def _opcode_set(*names):
"""Return a set of opcodes by the names in `names`."""
s = set()
for name in names:
try:
s.add(_opcode(name))
except KeyError:
pass
return s | Return a set of opcodes by the names in `names`. |
def request_client_list(self, req, msg):
"""Request the list of connected clients.
The list of clients is sent as a sequence of #client-list informs.
Informs
-------
addr : str
The address of the client as host:port with host in dotted quad
notation. If the address of the client could not be determined
(because, for example, the client disconnected suddenly) then
a unique string representing the client is sent instead.
Returns
-------
success : {'ok', 'fail'}
Whether sending the client list succeeded.
informs : int
Number of #client-list inform messages sent.
Examples
--------
::
?client-list
#client-list 127.0.0.1:53600
!client-list ok 1
"""
# TODO Get list of ClientConnection* instances and implement a standard
# 'address-print' method in the ClientConnection class
clients = self._client_conns
num_clients = len(clients)
for conn in clients:
addr = conn.address
req.inform(addr)
return req.make_reply('ok', str(num_clients)) | Request the list of connected clients.
The list of clients is sent as a sequence of #client-list informs.
Informs
-------
addr : str
The address of the client as host:port with host in dotted quad
notation. If the address of the client could not be determined
(because, for example, the client disconnected suddenly) then
a unique string representing the client is sent instead.
Returns
-------
success : {'ok', 'fail'}
Whether sending the client list succeeded.
informs : int
Number of #client-list inform messages sent.
Examples
--------
::
?client-list
#client-list 127.0.0.1:53600
!client-list ok 1 |
def write_properties(self, properties, file_datetime):
"""
Write properties to the ndata file specified by reference.
:param reference: the reference to which to write
:param properties: the dict to write to the file
:param file_datetime: the datetime for the file
The properties param must not change during this method. Callers should
take care to ensure this does not happen.
"""
with self.__lock:
absolute_file_path = self.__file_path
#logging.debug("WRITE properties %s for %s", absolute_file_path, key)
make_directory_if_needed(os.path.dirname(absolute_file_path))
exists = os.path.exists(absolute_file_path)
if exists:
rewrite_zip(absolute_file_path, Utility.clean_dict(properties))
else:
write_zip(absolute_file_path, None, Utility.clean_dict(properties))
# convert to utc time.
tz_minutes = Utility.local_utcoffset_minutes(file_datetime)
timestamp = calendar.timegm(file_datetime.timetuple()) - tz_minutes * 60
os.utime(absolute_file_path, (time.time(), timestamp)) | Write properties to the ndata file specified by reference.
:param reference: the reference to which to write
:param properties: the dict to write to the file
:param file_datetime: the datetime for the file
The properties param must not change during this method. Callers should
take care to ensure this does not happen. |
def validate_unwrap(self, value):
''' Checks that value is a ``dict``, that every key is a valid MongoDB
key, and that every value validates based on DictField.value_type
'''
if not isinstance(value, dict):
self._fail_validation_type(value, dict)
for k, v in value.items():
self._validate_key_unwrap(k)
try:
self.value_type.validate_unwrap(v)
except BadValueException as bve:
self._fail_validation(value, 'Bad value for key %s' % k, cause=bve) | Checks that value is a ``dict``, that every key is a valid MongoDB
key, and that every value validates based on DictField.value_type |
def delivery_note_pdf(self, delivery_note_id):
"""
Opens a pdf of a delivery note
:param delivery_note_id: the delivery note id
:return: dict
"""
return self._create_get_request(resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=PDF) | Opens a pdf of a delivery note
:param delivery_note_id: the delivery note id
:return: dict |
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret | Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max |
def spksub(handle, descr, identin, begin, end, newh):
"""
Extract a subset of the data in an SPK segment into a
separate segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spksub_c.html
:param handle: Handle of source segment.
:type handle: int
:param descr: Descriptor of source segment.
:type descr: 5-Element Array of floats
:param identin: Indentifier of source segment.
:type identin: str
:param begin: Beginning (initial epoch) of subset.
:type begin: int
:param end: End (fincal epoch) of subset.
:type end: int
:param newh: Handle of new segment.
:type newh: int
"""
assert len(descr) is 5
handle = ctypes.c_int(handle)
descr = stypes.toDoubleVector(descr)
identin = stypes.stringToCharP(identin)
begin = ctypes.c_double(begin)
end = ctypes.c_double(end)
newh = ctypes.c_int(newh)
libspice.spksub_c(handle, descr, identin, begin, end, newh) | Extract a subset of the data in an SPK segment into a
separate segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spksub_c.html
:param handle: Handle of source segment.
:type handle: int
:param descr: Descriptor of source segment.
:type descr: 5-Element Array of floats
:param identin: Indentifier of source segment.
:type identin: str
:param begin: Beginning (initial epoch) of subset.
:type begin: int
:param end: End (fincal epoch) of subset.
:type end: int
:param newh: Handle of new segment.
:type newh: int |
def _dqdv_split_frames(cell, tidy=False, **kwargs):
"""Returns dqdv data as pandas.DataFrames for all cycles.
Args:
cell (CellpyData-object).
tidy (bool): return in wide format if False (default),
long (tidy) format if True.
Returns:
(charge_ica_frame, discharge_ica_frame) where the frames are
pandas.DataFrames where the first column is voltage ('v') and
the following columns are the incremental capcaity for each
cycle (multi-indexed, where cycle number is on the top level).
Example:
>>> from cellpy.utils import ica
>>> charge_ica_df, dcharge_ica_df = ica.ica_frames(my_cell)
>>> charge_ica_df.plot(x=("voltage", "v"))
"""
charge_dfs, cycles, minimum_v, maximum_v = _collect_capacity_curves(
cell,
direction="charge"
)
# charge_df = pd.concat(
# charge_dfs, axis=1, keys=[k.name for k in charge_dfs])
ica_charge_dfs = _make_ica_charge_curves(
charge_dfs, cycles, minimum_v, maximum_v,
**kwargs,
)
ica_charge_df = pd.concat(
ica_charge_dfs,
axis=1,
keys=[k.name for k in ica_charge_dfs]
)
dcharge_dfs, cycles, minimum_v, maximum_v = _collect_capacity_curves(
cell,
direction="discharge"
)
ica_dcharge_dfs = _make_ica_charge_curves(
dcharge_dfs, cycles, minimum_v, maximum_v,
**kwargs,
)
ica_discharge_df = pd.concat(
ica_dcharge_dfs,
axis=1,
keys=[k.name for k in ica_dcharge_dfs]
)
ica_charge_df.columns.names = ["cycle", "value"]
ica_discharge_df.columns.names = ["cycle", "value"]
if tidy:
ica_charge_df = ica_charge_df.melt(
"voltage",
var_name="cycle",
value_name="dq",
col_level=0
)
ica_discharge_df = ica_discharge_df.melt(
"voltage",
var_name="cycle",
value_name="dq",
col_level=0
)
return ica_charge_df, ica_discharge_df | Returns dqdv data as pandas.DataFrames for all cycles.
Args:
cell (CellpyData-object).
tidy (bool): return in wide format if False (default),
long (tidy) format if True.
Returns:
(charge_ica_frame, discharge_ica_frame) where the frames are
pandas.DataFrames where the first column is voltage ('v') and
the following columns are the incremental capcaity for each
cycle (multi-indexed, where cycle number is on the top level).
Example:
>>> from cellpy.utils import ica
>>> charge_ica_df, dcharge_ica_df = ica.ica_frames(my_cell)
>>> charge_ica_df.plot(x=("voltage", "v")) |
def get_token(self, request):
""" Create a stripe token for a card
"""
return stripe.Token.create(
card={
"number": request.data["number"],
"exp_month": request.data["exp_month"],
"exp_year": request.data["exp_year"],
"cvc": request.data["cvc"]
}
) | Create a stripe token for a card |
async def create_proof(self, proof_req: dict, briefs: Union[dict, Sequence[dict]], requested_creds: dict) -> str:
"""
Create proof as HolderProver.
Raise:
* AbsentLinkSecret if link secret not set
* CredentialFocus on attempt to create proof on no briefs or multiple briefs for a credential definition
* AbsentTails if missing required tails file
* | BadRevStateTime if a timestamp for a revocation registry state in the proof request
| occurs before revocation registry creation
* IndyError for any other indy-sdk error
* AbsentInterval if briefs missing non-revocation interval, but cred def supports revocation
* WalletState if the wallet is closed.
:param proof_req: proof request as per Verifier.build_proof_req_json()
:param briefs: cred-brief, sequence thereof, or mapping from wallet cred-id to briefs, to prove
:param requested_creds: requested credentials data structure; i.e.,
::
{
'self_attested_attributes': {},
'requested_attributes': {
'attr0_uuid': {
'cred_id': string,
'timestamp': integer, # for revocation state
'revealed': bool
},
...
},
'requested_predicates': {
'predicate0_uuid': {
'cred_id': string,
'timestamp': integer # for revocation state
}
}
}
:return: proof json
"""
LOGGER.debug(
'HolderProver.create_proof >>> proof_req: %s, briefs: %s, requested_creds: %s',
proof_req,
briefs,
requested_creds)
if not self.wallet.handle:
LOGGER.debug('HolderProver.create_proof <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
label = await self._assert_link_secret('create_proof')
cd_ids = set()
x_cd_ids = set()
for brief in iter_briefs(briefs):
cd_id = brief['cred_info']['cred_def_id']
if cd_id in cd_ids and cd_id not in x_cd_ids:
x_cd_ids.add(cd_id)
cd_ids.add(cd_id)
if x_cd_ids:
LOGGER.debug('HolderProver.create_proof <!< briefs specification out of focus (non-uniqueness)')
raise CredentialFocus('Briefs list repeats cred defs: {}'.format(x_cd_ids))
s_id2schema = {} # schema identifier to schema
cd_id2cred_def = {} # credential definition identifier to credential definition
rr_id2timestamp = {} # revocation registry of interest to timestamp of interest (or None)
rr_id2cr_id = {} # revocation registry of interest to credential revocation identifier
for brief in iter_briefs(briefs):
interval = brief.get('interval', None)
cred_info = brief['cred_info']
s_id = cred_info['schema_id']
if not ok_schema_id(s_id):
LOGGER.debug('HolderProver.create_proof <!< Bad schema id %s', s_id)
raise BadIdentifier('Bad schema id {}'.format(s_id))
if s_id not in s_id2schema:
schema = json.loads(await self.get_schema(s_id)) # add to cache en passant
if not schema:
LOGGER.debug(
'HolderProver.create_proof <!< absent schema %s, proof req may be for another ledger',
s_id)
raise AbsentSchema('Absent schema {}, proof req may be for another ledger'.format(s_id))
s_id2schema[s_id] = schema
cd_id = cred_info['cred_def_id']
if not ok_cred_def_id(cd_id):
LOGGER.debug('HolderProver.create_proof <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
if cd_id not in cd_id2cred_def:
cred_def = json.loads(await self.get_cred_def(cd_id)) # add to cache en passant
cd_id2cred_def[cd_id] = cred_def
rr_id = cred_info['rev_reg_id']
if rr_id:
if not ok_rev_reg_id(rr_id):
LOGGER.debug('HolderProver.create_proof <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
await self._sync_revoc_for_proof(rr_id) # link tails file to its rr_id if it's new
if interval:
if rr_id not in rr_id2timestamp:
if interval['to'] > int(time()):
LOGGER.debug(
'HolderProver.create_proof <!< interval to %s for rev reg %s is in the future',
interval['to'],
rr_id)
raise BadRevStateTime(
'Revocation registry {} timestamp {} is in the future'.format(rr_id, interval['to']))
rr_id2timestamp[rr_id] = interval['to']
elif 'revocation' in cd_id2cred_def[cd_id]['value']:
LOGGER.debug(
'HolderProver.create_proof <!< brief on cred def id %s missing non-revocation interval',
cd_id)
raise AbsentInterval('Brief on cred def id {} missing non-revocation interval'.format(cd_id))
if rr_id in rr_id2cr_id:
continue
rr_id2cr_id[rr_id] = cred_info['cred_rev_id']
rr_id2rev_state = {} # revocation registry identifier to its state
with REVO_CACHE.lock:
for rr_id in rr_id2timestamp:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
tails = revo_cache_entry.tails if revo_cache_entry else None
if tails is None: # missing tails file
LOGGER.debug('HolderProver.create_proof <!< missing tails file for rev reg id %s', rr_id)
raise AbsentTails('Missing tails file for rev reg id {}'.format(rr_id))
rr_def_json = await self.get_rev_reg_def(rr_id)
(rr_delta_json, ledger_timestamp) = await revo_cache_entry.get_delta_json(
self._build_rr_delta_json,
rr_id2timestamp[rr_id],
rr_id2timestamp[rr_id])
rr_state_json = await anoncreds.create_revocation_state(
tails.reader_handle,
rr_def_json,
rr_delta_json,
ledger_timestamp,
rr_id2cr_id[rr_id])
rr_id2rev_state[rr_id] = {
rr_id2timestamp[rr_id]: json.loads(rr_state_json)
}
rv = await anoncreds.prover_create_proof(
self.wallet.handle,
json.dumps(proof_req),
json.dumps(requested_creds),
label,
json.dumps(s_id2schema),
json.dumps(cd_id2cred_def),
json.dumps(rr_id2rev_state))
LOGGER.debug('HolderProver.create_proof <<< %s', rv)
return rv | Create proof as HolderProver.
Raise:
* AbsentLinkSecret if link secret not set
* CredentialFocus on attempt to create proof on no briefs or multiple briefs for a credential definition
* AbsentTails if missing required tails file
* | BadRevStateTime if a timestamp for a revocation registry state in the proof request
| occurs before revocation registry creation
* IndyError for any other indy-sdk error
* AbsentInterval if briefs missing non-revocation interval, but cred def supports revocation
* WalletState if the wallet is closed.
:param proof_req: proof request as per Verifier.build_proof_req_json()
:param briefs: cred-brief, sequence thereof, or mapping from wallet cred-id to briefs, to prove
:param requested_creds: requested credentials data structure; i.e.,
::
{
'self_attested_attributes': {},
'requested_attributes': {
'attr0_uuid': {
'cred_id': string,
'timestamp': integer, # for revocation state
'revealed': bool
},
...
},
'requested_predicates': {
'predicate0_uuid': {
'cred_id': string,
'timestamp': integer # for revocation state
}
}
}
:return: proof json |
def transfer_sanity_check( name, consensus_hash ):
"""
Verify that data for a transfer is valid.
Return True on success
Raise Exception on error
"""
if name is not None and (not is_b40( name ) or "+" in name or name.count(".") > 1):
raise Exception("Name '%s' has non-base-38 characters" % name)
# without the scheme, name must be 37 bytes
if name is not None and (len(name) > LENGTHS['blockchain_id_name']):
raise Exception("Name '%s' is too long; expected %s bytes" % (name, LENGTHS['blockchain_id_name']))
return True | Verify that data for a transfer is valid.
Return True on success
Raise Exception on error |
def _fracRoiSparse(self):
"""
Calculate an approximate pixel coverage fraction from the two masks.
We have no way to know a priori how much the coverage of the
two masks overlap in a give pixel. For example, masks that each
have frac = 0.5 could have a combined frac = [0.0 to 0.5].
The limits will be:
max: min(frac1,frac2)
min: max((frac1+frac2)-1, 0.0)
Sometimes we are lucky and our fracdet is actually already
calculated for the two masks combined, so that the max
condition is satisfied. That is what we will assume...
"""
self.frac_roi_sparse = np.min([self.mask_1.frac_roi_sparse,self.mask_2.frac_roi_sparse],axis=0)
return self.frac_roi_sparse | Calculate an approximate pixel coverage fraction from the two masks.
We have no way to know a priori how much the coverage of the
two masks overlap in a give pixel. For example, masks that each
have frac = 0.5 could have a combined frac = [0.0 to 0.5].
The limits will be:
max: min(frac1,frac2)
min: max((frac1+frac2)-1, 0.0)
Sometimes we are lucky and our fracdet is actually already
calculated for the two masks combined, so that the max
condition is satisfied. That is what we will assume... |
def process_file(pyfile_name):
'''Process a Python source file with Google style docstring comments.
Reads file header comment, function definitions, function docstrings.
Returns dictionary encapsulation for subsequent writing.
Args:
pyfile_name (str): file name to read.
Returns:
Dictionary object containing summary comment, with a list of entries for each function.
'''
print('Processing file: ' + pyfile_name)
# load the source file
with open(pyfile_name) as fpyfile:
pyfile_str = fpyfile.readlines()
# meta-doc for a source file
file_dict = {'source_file': pyfile_name.replace('\\', '/')}
# get file summary line at the top of the file
if pyfile_str[0].startswith("'''"):
file_dict['summary_comment'] = pyfile_str[0][:-1].strip("'")
else:
file_dict['summary_comment'] = pyfile_name
file_dict['functions'] = []
# find every function definition
for line in pyfile_str:
# process definition
if line.startswith('def '):
line_num = pyfile_str.index(line)
fn_def = line[4:]
fn_name = fn_def.split('(')[0]
function_info = {'name': fn_name}
extract = extract_code(':', fn_def, pyfile_str, line_num)
function_info['definition'] = extract['current_str']
# process docstring
line_num = extract['line_num'] + 1
doc_line = pyfile_str[line_num]
if doc_line.startswith(" '''"):
comment_str = doc_line[7:]
extract = extract_code(
"'''", comment_str, pyfile_str, line_num)
function_info['comments'] = extract['current_str']
file_dict['functions'].append(function_info)
return file_dict | Process a Python source file with Google style docstring comments.
Reads file header comment, function definitions, function docstrings.
Returns dictionary encapsulation for subsequent writing.
Args:
pyfile_name (str): file name to read.
Returns:
Dictionary object containing summary comment, with a list of entries for each function. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.